summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2022-08-04 17:33:35 +0100
committerMark Brown <broonie@kernel.org>2022-08-04 17:33:35 +0100
commitb9abdb8c69244339bbf79ec2276955195d7f3c3e (patch)
tree15813bf728b512cd3829387af0d3fd9e320450f5
parent833d5816401b1aa6f8b082bb21893396a93420ce (diff)
parent2630f18f3b58406287e8fe4ca6528c226098c9b2 (diff)
Merge branch 'rust-next' of https://github.com/Rust-for-Linux/linux.git
# Conflicts: # Makefile
-rw-r--r--.gitignore6
-rw-r--r--.rustfmt.toml12
-rw-r--r--Documentation/core-api/printk-formats.rst10
-rw-r--r--Documentation/doc-guide/kernel-doc.rst3
-rw-r--r--Documentation/index.rst1
-rw-r--r--Documentation/kbuild/kbuild.rst17
-rw-r--r--Documentation/kbuild/makefiles.rst50
-rw-r--r--Documentation/process/changes.rst41
-rw-r--r--Documentation/rust/arch-support.rst23
-rw-r--r--Documentation/rust/coding-guidelines.rst216
-rw-r--r--Documentation/rust/general-information.rst79
-rw-r--r--Documentation/rust/index.rst22
-rw-r--r--Documentation/rust/quick-start.rst232
-rw-r--r--MAINTAINERS15
-rw-r--r--Makefile171
-rw-r--r--arch/Kconfig6
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/riscv/Kconfig1
-rw-r--r--arch/riscv/Makefile5
-rw-r--r--arch/um/Kconfig1
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Makefile10
-rw-r--r--drivers/android/Kconfig6
-rw-r--r--drivers/android/Makefile2
-rw-r--r--drivers/android/allocation.rs266
-rw-r--r--drivers/android/context.rs80
-rw-r--r--drivers/android/defs.rs99
-rw-r--r--drivers/android/node.rs476
-rw-r--r--drivers/android/process.rs961
-rw-r--r--drivers/android/range_alloc.rs189
-rw-r--r--drivers/android/rust_binder.rs106
-rw-r--r--drivers/android/thread.rs871
-rw-r--r--drivers/android/transaction.rs326
-rw-r--r--drivers/gpio/Kconfig8
-rw-r--r--drivers/gpio/Makefile1
-rw-r--r--drivers/gpio/gpio_pl061_rust.rs367
-rw-r--r--include/linux/compiler_types.h6
-rw-r--r--include/linux/kallsyms.h2
-rw-r--r--include/linux/spinlock.h17
-rw-r--r--include/linux/workqueue.h17
-rw-r--r--include/uapi/linux/android/binder.h30
-rw-r--r--init/Kconfig52
-rw-r--r--kernel/configs/rust.config1
-rw-r--r--kernel/kallsyms.c26
-rw-r--r--kernel/livepatch/core.c4
-rw-r--r--lib/Kconfig.debug82
-rw-r--r--lib/vsprintf.c13
-rw-r--r--rust/.gitignore10
-rw-r--r--rust/Makefile415
-rw-r--r--rust/alloc/README.md33
-rw-r--r--rust/alloc/alloc.rs440
-rw-r--r--rust/alloc/borrow.rs498
-rw-r--r--rust/alloc/boxed.rs2026
-rw-r--r--rust/alloc/boxed/thin.rs219
-rw-r--r--rust/alloc/collections/mod.rs156
-rw-r--r--rust/alloc/ffi/c_str.rs1203
-rw-r--r--rust/alloc/ffi/mod.rs93
-rw-r--r--rust/alloc/fmt.rs614
-rw-r--r--rust/alloc/lib.rs239
-rw-r--r--rust/alloc/macros.rs128
-rw-r--r--rust/alloc/raw_vec.rs567
-rw-r--r--rust/alloc/slice.rs1295
-rw-r--r--rust/alloc/str.rs641
-rw-r--r--rust/alloc/string.rs2944
-rw-r--r--rust/alloc/vec/drain.rs186
-rw-r--r--rust/alloc/vec/drain_filter.rs145
-rw-r--r--rust/alloc/vec/into_iter.rs365
-rw-r--r--rust/alloc/vec/is_zero.rs120
-rw-r--r--rust/alloc/vec/mod.rs3420
-rw-r--r--rust/alloc/vec/partial_eq.rs49
-rw-r--r--rust/alloc/vec/set_len_on_drop.rs30
-rw-r--r--rust/alloc/vec/spec_extend.rs174
-rw-r--r--rust/bindgen_parameters21
-rw-r--r--rust/bindings/bindings_helper.h49
-rw-r--r--rust/bindings/lib.rs57
-rw-r--r--rust/build_error.rs29
-rw-r--r--rust/compiler_builtins.rs79
-rw-r--r--rust/exports.c21
-rw-r--r--rust/helpers.c679
-rw-r--r--rust/kernel/allocator.rs64
-rw-r--r--rust/kernel/amba.rs261
-rw-r--r--rust/kernel/build_assert.rs83
-rw-r--r--rust/kernel/chrdev.rs206
-rw-r--r--rust/kernel/clk.rs79
-rw-r--r--rust/kernel/cred.rs46
-rw-r--r--rust/kernel/delay.rs104
-rw-r--r--rust/kernel/device.rs527
-rw-r--r--rust/kernel/driver.rs442
-rw-r--r--rust/kernel/error.rs564
-rw-r--r--rust/kernel/file.rs887
-rw-r--r--rust/kernel/fs.rs846
-rw-r--r--rust/kernel/fs/param.rs553
-rw-r--r--rust/kernel/gpio.rs505
-rw-r--r--rust/kernel/hwrng.rs210
-rw-r--r--rust/kernel/io_buffer.rs153
-rw-r--r--rust/kernel/io_mem.rs278
-rw-r--r--rust/kernel/iov_iter.rs81
-rw-r--r--rust/kernel/irq.rs681
-rw-r--r--rust/kernel/kasync.rs50
-rw-r--r--rust/kernel/kasync/executor.rs154
-rw-r--r--rust/kernel/kasync/executor/workqueue.rs291
-rw-r--r--rust/kernel/kasync/net.rs322
-rw-r--r--rust/kernel/kunit.rs91
-rw-r--r--rust/kernel/lib.rs267
-rw-r--r--rust/kernel/linked_list.rs247
-rw-r--r--rust/kernel/miscdev.rs290
-rw-r--r--rust/kernel/mm.rs149
-rw-r--r--rust/kernel/module_param.rs499
-rw-r--r--rust/kernel/net.rs392
-rw-r--r--rust/kernel/net/filter.rs447
-rw-r--r--rust/kernel/of.rs63
-rw-r--r--rust/kernel/pages.rs144
-rw-r--r--rust/kernel/platform.rs223
-rw-r--r--rust/kernel/power.rs118
-rw-r--r--rust/kernel/prelude.rs36
-rw-r--r--rust/kernel/print.rs406
-rw-r--r--rust/kernel/random.rs42
-rw-r--r--rust/kernel/raw_list.rs361
-rw-r--r--rust/kernel/rbtree.rs563
-rw-r--r--rust/kernel/revocable.rs425
-rw-r--r--rust/kernel/security.rs38
-rw-r--r--rust/kernel/static_assert.rs34
-rw-r--r--rust/kernel/std_vendor.rs161
-rw-r--r--rust/kernel/str.rs597
-rw-r--r--rust/kernel/sync.rs169
-rw-r--r--rust/kernel/sync/arc.rs582
-rw-r--r--rust/kernel/sync/condvar.rs140
-rw-r--r--rust/kernel/sync/guard.rs159
-rw-r--r--rust/kernel/sync/locked_by.rs111
-rw-r--r--rust/kernel/sync/mutex.rs149
-rw-r--r--rust/kernel/sync/nowait.rs188
-rw-r--r--rust/kernel/sync/rcu.rs52
-rw-r--r--rust/kernel/sync/revocable.rs246
-rw-r--r--rust/kernel/sync/rwsem.rs196
-rw-r--r--rust/kernel/sync/seqlock.rs201
-rw-r--r--rust/kernel/sync/smutex.rs290
-rw-r--r--rust/kernel/sync/spinlock.rs357
-rw-r--r--rust/kernel/sysctl.rs199
-rw-r--r--rust/kernel/task.rs239
-rw-r--r--rust/kernel/types.rs705
-rw-r--r--rust/kernel/unsafe_list.rs680
-rw-r--r--rust/kernel/user_ptr.rs175
-rw-r--r--rust/kernel/workqueue.rs512
-rw-r--r--rust/macros/concat_idents.rs23
-rw-r--r--rust/macros/helpers.rs79
-rw-r--r--rust/macros/lib.rs191
-rw-r--r--rust/macros/module.rs655
-rw-r--r--rust/macros/vtable.rs95
-rw-r--r--samples/Kconfig2
-rw-r--r--samples/Makefile1
-rw-r--r--samples/rust/Kconfig165
-rw-r--r--samples/rust/Makefile19
-rw-r--r--samples/rust/hostprogs/.gitignore3
-rw-r--r--samples/rust/hostprogs/Makefile5
-rw-r--r--samples/rust/hostprogs/a.rs7
-rw-r--r--samples/rust/hostprogs/b.rs5
-rw-r--r--samples/rust/hostprogs/single.rs12
-rw-r--r--samples/rust/rust_chrdev.rs49
-rw-r--r--samples/rust/rust_echo_server.rs60
-rw-r--r--samples/rust/rust_fs.rs59
-rw-r--r--samples/rust/rust_minimal.rs35
-rw-r--r--samples/rust/rust_miscdev.rs142
-rw-r--r--samples/rust/rust_module_parameters.rs69
-rw-r--r--samples/rust/rust_netfilter.rs54
-rw-r--r--samples/rust/rust_platform.rs22
-rw-r--r--samples/rust/rust_print.rs54
-rw-r--r--samples/rust/rust_random.rs60
-rw-r--r--samples/rust/rust_selftests.rs99
-rw-r--r--samples/rust/rust_semaphore.rs170
-rw-r--r--samples/rust/rust_semaphore_c.c212
-rw-r--r--samples/rust/rust_stack_probing.rs36
-rw-r--r--samples/rust/rust_sync.rs93
-rw-r--r--scripts/.gitignore1
-rw-r--r--scripts/Kconfig.include6
-rw-r--r--scripts/Makefile3
-rw-r--r--scripts/Makefile.build60
-rw-r--r--scripts/Makefile.debug10
-rw-r--r--scripts/Makefile.host34
-rw-r--r--scripts/Makefile.lib12
-rw-r--r--scripts/Makefile.modfinal8
-rwxr-xr-xscripts/cc-version.sh12
-rwxr-xr-xscripts/checkpatch.pl12
-rwxr-xr-xscripts/decode_stacktrace.sh14
-rwxr-xr-xscripts/generate_rust_analyzer.py141
-rw-r--r--scripts/generate_rust_target.rs232
-rwxr-xr-xscripts/is_rust_module.sh16
-rw-r--r--scripts/kallsyms.c47
-rw-r--r--scripts/kconfig/confdata.c75
-rwxr-xr-xscripts/min-tool-version.sh6
-rw-r--r--scripts/rust-is-available-bindgen-libclang.h2
-rwxr-xr-xscripts/rust-is-available.sh160
-rwxr-xr-xscripts/rustdoc_test_builder.py59
-rwxr-xr-xscripts/rustdoc_test_gen.py164
-rw-r--r--tools/include/linux/kallsyms.h2
-rw-r--r--tools/lib/perf/include/perf/event.h2
-rw-r--r--tools/lib/symbol/kallsyms.h2
198 files changed, 43691 insertions, 73 deletions
diff --git a/.gitignore b/.gitignore
index 265959544978..5da004814678 100644
--- a/.gitignore
+++ b/.gitignore
@@ -37,6 +37,8 @@
*.o
*.o.*
*.patch
+*.rmeta
+*.rsi
*.s
*.so
*.so.dbg
@@ -97,6 +99,7 @@ modules.order
!.gitattributes
!.gitignore
!.mailmap
+!.rustfmt.toml
#
# Generated include files
@@ -162,3 +165,6 @@ x509.genkey
# Documentation toolchain
sphinx_*/
+
+# Rust analyzer configuration
+/rust-project.json
diff --git a/.rustfmt.toml b/.rustfmt.toml
new file mode 100644
index 000000000000..3de5cc497465
--- /dev/null
+++ b/.rustfmt.toml
@@ -0,0 +1,12 @@
+edition = "2021"
+newline_style = "Unix"
+
+# Unstable options that help catching some mistakes in formatting and that we may want to enable
+# when they become stable.
+#
+# They are kept here since they are useful to run from time to time.
+#format_code_in_doc_comments = true
+#reorder_impl_items = true
+#comment_width = 100
+#wrap_comments = true
+#normalize_comments = true
diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst
index 5e89497ba314..dbe1aacc79d0 100644
--- a/Documentation/core-api/printk-formats.rst
+++ b/Documentation/core-api/printk-formats.rst
@@ -625,6 +625,16 @@ Examples::
%p4cc Y10 little-endian (0x20303159)
%p4cc NV12 big-endian (0xb231564e)
+Rust
+----
+
+::
+
+ %pA
+
+Only intended to be used from Rust code to format ``core::fmt::Arguments``.
+Do *not* use it from C.
+
Thanks
======
diff --git a/Documentation/doc-guide/kernel-doc.rst b/Documentation/doc-guide/kernel-doc.rst
index 9c779bd7a751..1dcbd7332476 100644
--- a/Documentation/doc-guide/kernel-doc.rst
+++ b/Documentation/doc-guide/kernel-doc.rst
@@ -14,6 +14,9 @@ when it is embedded in source files.
reasons. The kernel source contains tens of thousands of kernel-doc
comments. Please stick to the style described here.
+.. note:: kernel-doc does not cover Rust code: please see
+ Documentation/rust/general-information.rst instead.
+
The kernel-doc structure is extracted from the comments, and proper
`Sphinx C Domain`_ function and type descriptions with anchors are
generated from them. The descriptions are filtered for special kernel-doc
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 67036a05b771..35d90903242a 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -82,6 +82,7 @@ merged much easier.
maintainer/index
fault-injection/index
livepatch/index
+ rust/index
Kernel API documentation
diff --git a/Documentation/kbuild/kbuild.rst b/Documentation/kbuild/kbuild.rst
index ef19b9c13523..08f575e6236c 100644
--- a/Documentation/kbuild/kbuild.rst
+++ b/Documentation/kbuild/kbuild.rst
@@ -48,6 +48,10 @@ KCFLAGS
-------
Additional options to the C compiler (for built-in and modules).
+KRUSTFLAGS
+----------
+Additional options to the Rust compiler (for built-in and modules).
+
CFLAGS_KERNEL
-------------
Additional options for $(CC) when used to compile
@@ -57,6 +61,15 @@ CFLAGS_MODULE
-------------
Additional module specific options to use for $(CC).
+RUSTFLAGS_KERNEL
+----------------
+Additional options for $(RUSTC) when used to compile
+code that is compiled as built-in.
+
+RUSTFLAGS_MODULE
+----------------
+Additional module specific options to use for $(RUSTC).
+
LDFLAGS_MODULE
--------------
Additional options used for $(LD) when linking modules.
@@ -69,6 +82,10 @@ HOSTCXXFLAGS
------------
Additional flags to be passed to $(HOSTCXX) when building host programs.
+HOSTRUSTFLAGS
+-------------
+Additional flags to be passed to $(HOSTRUSTC) when building host programs.
+
HOSTLDFLAGS
-----------
Additional flags to be passed when linking host programs.
diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst
index 11a296e52d68..5ea1e72d89c8 100644
--- a/Documentation/kbuild/makefiles.rst
+++ b/Documentation/kbuild/makefiles.rst
@@ -29,8 +29,9 @@ This document describes the Linux kernel Makefiles.
--- 4.1 Simple Host Program
--- 4.2 Composite Host Programs
--- 4.3 Using C++ for host programs
- --- 4.4 Controlling compiler options for host programs
- --- 4.5 When host programs are actually built
+ --- 4.4 Using Rust for host programs
+ --- 4.5 Controlling compiler options for host programs
+ --- 4.6 When host programs are actually built
=== 5 Userspace Program support
--- 5.1 Simple Userspace Program
@@ -835,7 +836,24 @@ Both possibilities are described in the following.
qconf-cxxobjs := qconf.o
qconf-objs := check.o
-4.4 Controlling compiler options for host programs
+4.4 Using Rust for host programs
+--------------------------------
+
+ Kbuild offers support for host programs written in Rust. However,
+ since a Rust toolchain is not mandatory for kernel compilation,
+ it may only be used in scenarios where Rust is required to be
+ available (e.g. when ``CONFIG_RUST`` is enabled).
+
+ Example::
+
+ hostprogs := target
+ target-rust := y
+
+ Kbuild will compile ``target`` using ``target.rs`` as the crate root,
+ located in the same directory as the ``Makefile``. The crate may
+ consist of several source files (see ``samples/rust/hostprogs``).
+
+4.5 Controlling compiler options for host programs
--------------------------------------------------
When compiling host programs, it is possible to set specific flags.
@@ -867,7 +885,7 @@ Both possibilities are described in the following.
When linking qconf, it will be passed the extra option
"-L$(QTDIR)/lib".
-4.5 When host programs are actually built
+4.6 When host programs are actually built
-----------------------------------------
Kbuild will only build host-programs when they are referenced
@@ -1181,6 +1199,17 @@ When kbuild executes, the following steps are followed (roughly):
The first example utilises the trick that a config option expands
to 'y' when selected.
+ KBUILD_RUSTFLAGS
+ $(RUSTC) compiler flags
+
+ Default value - see top level Makefile
+ Append or modify as required per architecture.
+
+ Often, the KBUILD_RUSTFLAGS variable depends on the configuration.
+
+ Note that target specification file generation (for ``--target``)
+ is handled in ``scripts/generate_rust_target.rs``.
+
KBUILD_AFLAGS_KERNEL
Assembler options specific for built-in
@@ -1208,6 +1237,19 @@ When kbuild executes, the following steps are followed (roughly):
are used for $(CC).
From commandline CFLAGS_MODULE shall be used (see kbuild.rst).
+ KBUILD_RUSTFLAGS_KERNEL
+ $(RUSTC) options specific for built-in
+
+ $(KBUILD_RUSTFLAGS_KERNEL) contains extra Rust compiler flags used to
+ compile resident kernel code.
+
+ KBUILD_RUSTFLAGS_MODULE
+ Options for $(RUSTC) when building modules
+
+ $(KBUILD_RUSTFLAGS_MODULE) is used to add arch-specific options that
+ are used for $(RUSTC).
+ From commandline RUSTFLAGS_MODULE shall be used (see kbuild.rst).
+
KBUILD_LDFLAGS_MODULE
Options for $(LD) when linking modules
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index 19c286c23786..9a90197989dd 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -31,6 +31,8 @@ you probably needn't concern yourself with pcmciautils.
====================== =============== ========================================
GNU C 5.1 gcc --version
Clang/LLVM (optional) 11.0.0 clang --version
+Rust (optional) 1.62.0 rustc --version
+bindgen (optional) 0.56.0 bindgen --version
GNU make 3.81 make --version
bash 4.2 bash --version
binutils 2.23 ld -v
@@ -80,6 +82,29 @@ kernels. Older releases aren't guaranteed to work, and we may drop workarounds
from the kernel that were used to support older versions. Please see additional
docs on :ref:`Building Linux with Clang/LLVM <kbuild_llvm>`.
+Rust (optional)
+---------------
+
+A particular version of the Rust toolchain is required. Newer versions may or
+may not work because the kernel depends on some unstable Rust features, for
+the moment.
+
+Each Rust toolchain comes with several "components", some of which are required
+(like ``rustc``) and some that are optional. The ``rust-src`` component (which
+is optional) needs to be installed to build the kernel. Other components are
+useful for developing.
+
+Please see Documentation/rust/quick-start.rst for instructions on how to
+satisfy the build requirements of Rust support. In particular, the ``Makefile``
+target ``rustavailable`` is useful to check why the Rust toolchain may not
+be detected.
+
+bindgen (optional)
+------------------
+
+``bindgen`` is used to generate the Rust bindings to the C side of the kernel.
+It depends on ``libclang``.
+
Make
----
@@ -348,6 +373,12 @@ Sphinx
Please see :ref:`sphinx_install` in :ref:`Documentation/doc-guide/sphinx.rst <sphinxdoc>`
for details about Sphinx requirements.
+rustdoc
+-------
+
+``rustdoc`` is used to generate the documentation for Rust code. Please see
+Documentation/rust/general-information.rst for more information.
+
Getting updated software
========================
@@ -364,6 +395,16 @@ Clang/LLVM
- :ref:`Getting LLVM <getting_llvm>`.
+Rust
+----
+
+- Documentation/rust/quick-start.rst.
+
+bindgen
+-------
+
+- Documentation/rust/quick-start.rst.
+
Make
----
diff --git a/Documentation/rust/arch-support.rst b/Documentation/rust/arch-support.rst
new file mode 100644
index 000000000000..bcc92cae11d9
--- /dev/null
+++ b/Documentation/rust/arch-support.rst
@@ -0,0 +1,23 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Arch Support
+============
+
+Currently, the Rust compiler (``rustc``) uses LLVM for code generation,
+which limits the supported architectures that can be targeted. In addition,
+support for building the kernel with LLVM/Clang varies (please see
+Documentation/kbuild/llvm.rst). This support is needed for ``bindgen``
+which uses ``libclang``.
+
+Below is a general summary of architectures that currently work. Level of
+support corresponds to ``S`` values in the ``MAINTAINERS`` file.
+
+============ ================ ==============================================
+Architecture Level of support Constraints
+============ ================ ==============================================
+``arm`` Maintained ``armv6`` and compatible only.
+``arm64`` Maintained None.
+``powerpc`` Maintained ``ppc64le`` only.
+``riscv`` Maintained ``riscv64`` only.
+``x86`` Maintained ``x86_64`` only.
+============ ================ ==============================================
diff --git a/Documentation/rust/coding-guidelines.rst b/Documentation/rust/coding-guidelines.rst
new file mode 100644
index 000000000000..aa8ed082613e
--- /dev/null
+++ b/Documentation/rust/coding-guidelines.rst
@@ -0,0 +1,216 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Coding Guidelines
+=================
+
+This document describes how to write Rust code in the kernel.
+
+
+Style & formatting
+------------------
+
+The code should be formatted using ``rustfmt``. In this way, a person
+contributing from time to time to the kernel does not need to learn and
+remember one more style guide. More importantly, reviewers and maintainers
+do not need to spend time pointing out style issues anymore, and thus
+less patch roundtrips may be needed to land a change.
+
+.. note:: Conventions on comments and documentation are not checked by
+ ``rustfmt``. Thus those are still needed to be taken care of.
+
+The default settings of ``rustfmt`` are used. This means the idiomatic Rust
+style is followed. For instance, 4 spaces are used for indentation rather
+than tabs.
+
+It is convenient to instruct editors/IDEs to format while typing,
+when saving or at commit time. However, if for some reason reformatting
+the entire kernel Rust sources is needed at some point, the following can be
+run::
+
+ make LLVM=1 rustfmt
+
+It is also possible to check if everything is formatted (printing a diff
+otherwise), for instance for a CI, with::
+
+ make LLVM=1 rustfmtcheck
+
+Like ``clang-format`` for the rest of the kernel, ``rustfmt`` works on
+individual files, and does not require a kernel configuration. Sometimes it may
+even work with broken code.
+
+
+Comments
+--------
+
+"Normal" comments (i.e. ``//``, rather than code documentation which starts
+with ``///`` or ``//!``) are written in Markdown the same way as documentation
+comments are, even though they will not be rendered. This improves consistency,
+simplifies the rules and allows to move content between the two kinds of
+comments more easily. For instance:
+
+.. code-block:: rust
+
+ // `object` is ready to be handled now.
+ f(object);
+
+Furthermore, just like documentation, comments are capitalized at the beginning
+of a sentence and ended with a period (even if it is a single sentence). This
+includes ``// SAFETY:``, ``// TODO:`` and other "tagged" comments, e.g.:
+
+.. code-block:: rust
+
+ // FIXME: The error should be handled properly.
+
+Comments should not be used for documentation purposes: comments are intended
+for implementation details, not users. This distinction is useful even if the
+reader of the source file is both an implementor and a user of an API. In fact,
+sometimes it is useful to use both comments and documentation at the same time.
+For instance, for a ``TODO`` list or to comment on the documentation itself.
+For the latter case, comments can be inserted in the middle; that is, closer to
+the line of documentation to be commented. For any other case, comments are
+written after the documentation, e.g.:
+
+.. code-block:: rust
+
+ /// Returns a new [`Foo`].
+ ///
+ /// # Examples
+ ///
+ // TODO: Find a better example.
+ /// ```
+ /// let foo = f(42);
+ /// ```
+ // FIXME: Use fallible approach.
+ pub fn f(x: i32) -> Foo {
+ // ...
+ }
+
+One special kind of comments are the ``// SAFETY:`` comments. These must appear
+before every ``unsafe`` block, and they explain why the code inside the block is
+correct/sound, i.e. why it cannot trigger undefined behavior in any case, e.g.:
+
+.. code-block:: rust
+
+ // SAFETY: `p` is valid by the safety requirements.
+ unsafe { *p = 0; }
+
+``// SAFETY:`` comments are not to be confused with the ``# Safety`` sections
+in code documentation. ``# Safety`` sections specify the contract that callers
+(for functions) or implementors (for traits) need to abide by. ``// SAFETY:``
+comments show why a call (for functions) or implementation (for traits) actually
+respects the preconditions stated in a ``# Safety`` section or the language
+reference.
+
+
+Code documentation
+------------------
+
+Rust kernel code is not documented like C kernel code (i.e. via kernel-doc).
+Instead, the usual system for documenting Rust code is used: the ``rustdoc``
+tool, which uses Markdown (a lightweight markup language).
+
+To learn Markdown, there are many guides available out there. For instance,
+the one at:
+
+ https://commonmark.org/help/
+
+This is how a well-documented Rust function may look like:
+
+.. code-block:: rust
+
+ /// Returns the contained [`Some`] value, consuming the `self` value,
+ /// without checking that the value is not [`None`].
+ ///
+ /// # Safety
+ ///
+ /// Calling this method on [`None`] is *[undefined behavior]*.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Some("air");
+ /// assert_eq!(unsafe { x.unwrap_unchecked() }, "air");
+ /// ```
+ pub unsafe fn unwrap_unchecked(self) -> T {
+ match self {
+ Some(val) => val,
+
+ // SAFETY: The safety contract must be upheld by the caller.
+ None => unsafe { hint::unreachable_unchecked() },
+ }
+ }
+
+This example showcases a few ``rustdoc`` features and some conventions followed
+in the kernel:
+
+ - The first paragraph must be a single sentence briefly describing what
+ the documented item does. Further explanations must go in extra paragraphs.
+
+ - Unsafe functions must document their safety preconditions under
+ a ``# Safety`` section.
+
+ - While not shown here, if a function may panic, the conditions under which
+ that happens must be described under a ``# Panics`` section.
+
+ Please note that panicking should be very rare and used only with a good
+ reason. In almost all cases, a fallible approach should be used, typically
+ returning a ``Result``.
+
+ - If providing examples of usage would help readers, they must be written in
+ a section called ``# Examples``.
+
+ - Rust items (functions, types, constants...) must be linked appropriately
+ (``rustdoc`` will create a link automatically).
+
+ - Any ``unsafe`` block must be preceded by a ``// SAFETY:`` comment
+ describing why the code inside is sound.
+
+ While sometimes the reason might look trivial and therefore unneeded,
+ writing these comments is not just a good way of documenting what has been
+ taken into account, but most importantly, it provides a way to know that
+ there are no *extra* implicit constraints.
+
+To learn more about how to write documentation for Rust and extra features,
+please take a look at the ``rustdoc`` book at:
+
+ https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html
+
+
+Naming
+------
+
+Rust kernel code follows the usual Rust naming conventions:
+
+ https://rust-lang.github.io/api-guidelines/naming.html
+
+When existing C concepts (e.g. macros, functions, objects...) are wrapped into
+a Rust abstraction, a name as close as reasonably possible to the C side should
+be used in order to avoid confusion and to improve readability when switching
+back and forth between the C and Rust sides. For instance, macros such as
+``pr_info`` from C are named the same in the Rust side.
+
+Having said that, casing should be adjusted to follow the Rust naming
+conventions, and namespacing introduced by modules and types should not be
+repeated in the item names. For instance, when wrapping constants like:
+
+.. code-block:: c
+
+ #define GPIO_LINE_DIRECTION_IN 0
+ #define GPIO_LINE_DIRECTION_OUT 1
+
+The equivalent in Rust may look like (ignoring documentation):
+
+.. code-block:: rust
+
+ pub mod gpio {
+ pub enum LineDirection {
+ In = bindings::GPIO_LINE_DIRECTION_IN as _,
+ Out = bindings::GPIO_LINE_DIRECTION_OUT as _,
+ }
+ }
+
+That is, the equivalent of ``GPIO_LINE_DIRECTION_IN`` would be referred to as
+``gpio::LineDirection::In``. In particular, it should not be named
+``gpio::gpio_line_direction::GPIO_LINE_DIRECTION_IN``.
diff --git a/Documentation/rust/general-information.rst b/Documentation/rust/general-information.rst
new file mode 100644
index 000000000000..49029ee82e55
--- /dev/null
+++ b/Documentation/rust/general-information.rst
@@ -0,0 +1,79 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+General Information
+===================
+
+This document contains useful information to know when working with
+the Rust support in the kernel.
+
+
+Code documentation
+------------------
+
+Rust kernel code is documented using ``rustdoc``, its built-in documentation
+generator.
+
+The generated HTML docs include integrated search, linked items (e.g. types,
+functions, constants), source code, etc. They may be read at (TODO: link when
+in mainline and generated alongside the rest of the documentation):
+
+ http://kernel.org/
+
+The docs can also be easily generated and read locally. This is quite fast
+(same order as compiling the code itself) and no special tools or environment
+are needed. This has the added advantage that they will be tailored to
+the particular kernel configuration used. To generate them, use the ``rustdoc``
+target with the same invocation used for compilation, e.g.::
+
+ make LLVM=1 rustdoc
+
+To read the docs locally in your web browser, run e.g.::
+
+ xdg-open rust/doc/kernel/index.html
+
+To learn about how to write the documentation, please see coding-guidelines.rst.
+
+
+Extra lints
+-----------
+
+While ``rustc`` is a very helpful compiler, some extra lints and analyses are
+available via ``clippy``, a Rust linter. To enable it, pass ``CLIPPY=1`` to
+the same invocation used for compilation, e.g.::
+
+ make LLVM=1 CLIPPY=1
+
+Please note that Clippy may change code generation, thus it should not be
+enabled while building a production kernel.
+
+
+Abstractions vs. bindings
+-------------------------
+
+Abstractions are Rust code wrapping kernel functionality from the C side.
+
+In order to use functions and types from the C side, bindings are created.
+Bindings are the declarations for Rust of those functions and types from
+the C side.
+
+For instance, one may write a ``Mutex`` abstraction in Rust which wraps
+a ``struct mutex`` from the C side and calls its functions through the bindings.
+
+Abstractions are not available for all the kernel internal APIs and concepts,
+but it is intended that coverage is expanded as time goes on. "Leaf" modules
+(e.g. drivers) should not use the C bindings directly. Instead, subsystems
+should provide as-safe-as-possible abstractions as needed.
+
+
+Conditional compilation
+-----------------------
+
+Rust code has access to conditional compilation based on the kernel
+configuration:
+
+.. code-block:: rust
+
+ #[cfg(CONFIG_X)] // Enabled (`y` or `m`)
+ #[cfg(CONFIG_X="y")] // Enabled as a built-in (`y`)
+ #[cfg(CONFIG_X="m")] // Enabled as a module (`m`)
+ #[cfg(not(CONFIG_X))] // Disabled
diff --git a/Documentation/rust/index.rst b/Documentation/rust/index.rst
new file mode 100644
index 000000000000..4ae8c66b94fa
--- /dev/null
+++ b/Documentation/rust/index.rst
@@ -0,0 +1,22 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Rust
+====
+
+Documentation related to Rust within the kernel. To start using Rust
+in the kernel, please read the quick-start.rst guide.
+
+.. toctree::
+ :maxdepth: 1
+
+ quick-start
+ general-information
+ coding-guidelines
+ arch-support
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/rust/quick-start.rst b/Documentation/rust/quick-start.rst
new file mode 100644
index 000000000000..13b7744b1e27
--- /dev/null
+++ b/Documentation/rust/quick-start.rst
@@ -0,0 +1,232 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+Quick Start
+===========
+
+This document describes how to get started with kernel development in Rust.
+
+
+Requirements: Building
+----------------------
+
+This section explains how to fetch the tools needed for building.
+
+Some of these requirements might be available from Linux distributions
+under names like ``rustc``, ``rust-src``, ``rust-bindgen``, etc. However,
+at the time of writing, they are likely not to be recent enough unless
+the distribution tracks the latest releases.
+
+To easily check whether the requirements are met, the following target
+can be used::
+
+ make LLVM=1 rustavailable
+
+This triggers the same logic used by Kconfig to determine whether
+``RUST_IS_AVAILABLE`` should be enabled; but it also explains why not
+if that is the case.
+
+
+rustc
+*****
+
+A particular version of the Rust compiler is required. Newer versions may or
+may not work because, for the moment, the kernel depends on some unstable
+Rust features.
+
+If ``rustup`` is being used, enter the checked out source code directory
+and run::
+
+ rustup override set $(scripts/min-tool-version.sh rustc)
+
+Otherwise, fetch a standalone installer or install ``rustup`` from:
+
+ https://www.rust-lang.org
+
+
+Rust standard library source
+****************************
+
+The Rust standard library source is required because the build system will
+cross-compile ``core`` and ``alloc``.
+
+If ``rustup`` is being used, run::
+
+ rustup component add rust-src
+
+The components are installed per toolchain, thus upgrading the Rust compiler
+version later on requires re-adding the component.
+
+Otherwise, if a standalone installer is used, the Rust repository may be cloned
+into the installation folder of the toolchain::
+
+ git clone --recurse-submodules \
+ --branch $(scripts/min-tool-version.sh rustc) \
+ https://github.com/rust-lang/rust \
+ $(rustc --print sysroot)/lib/rustlib/src/rust
+
+In this case, upgrading the Rust compiler version later on requires manually
+updating this clone.
+
+
+libclang
+********
+
+``libclang`` (part of LLVM) is used by ``bindgen`` to understand the C code
+in the kernel, which means LLVM needs to be installed; like when the kernel
+is compiled with ``CC=clang`` or ``LLVM=1``.
+
+Linux distributions are likely to have a suitable one available, so it is
+best to check that first.
+
+There are also some binaries for several systems and architectures uploaded at:
+
+ https://releases.llvm.org/download.html
+
+Otherwise, building LLVM takes quite a while, but it is not a complex process:
+
+ https://llvm.org/docs/GettingStarted.html#getting-the-source-code-and-building-llvm
+
+Please see Documentation/kbuild/llvm.rst for more information and further ways
+to fetch pre-built releases and distribution packages.
+
+
+bindgen
+*******
+
+The bindings to the C side of the kernel are generated at build time using
+the ``bindgen`` tool. A particular version is required.
+
+Install it via (note that this will download and build the tool from source)::
+
+ cargo install --locked --version $(scripts/min-tool-version.sh bindgen) bindgen
+
+
+Requirements: Developing
+------------------------
+
+This section explains how to fetch the tools needed for developing. That is,
+they are not needed when just building the kernel.
+
+
+rustfmt
+*******
+
+The ``rustfmt`` tool is used to automatically format all the Rust kernel code,
+including the generated C bindings (for details, please see
+coding-guidelines.rst).
+
+If ``rustup`` is being used, its ``default`` profile already installs the tool,
+thus nothing needs to be done. If another profile is being used, the component
+can be installed manually::
+
+ rustup component add rustfmt
+
+The standalone installers also come with ``rustfmt``.
+
+
+clippy
+******
+
+``clippy`` is a Rust linter. Running it provides extra warnings for Rust code.
+It can be run by passing ``CLIPPY=1`` to ``make`` (for details, please see
+general-information.rst).
+
+If ``rustup`` is being used, its ``default`` profile already installs the tool,
+thus nothing needs to be done. If another profile is being used, the component
+can be installed manually::
+
+ rustup component add clippy
+
+The standalone installers also come with ``clippy``.
+
+
+cargo
+*****
+
+``cargo`` is the Rust native build system. It is currently required to run
+the tests since it is used to build a custom standard library that contains
+the facilities provided by the custom ``alloc`` in the kernel. The tests can
+be run using the ``rusttest`` Make target.
+
+If ``rustup`` is being used, all the profiles already install the tool,
+thus nothing needs to be done.
+
+The standalone installers also come with ``cargo``.
+
+
+rustdoc
+*******
+
+``rustdoc`` is the documentation tool for Rust. It generates pretty HTML
+documentation for Rust code (for details, please see
+general-information.rst).
+
+``rustdoc`` is also used to test the examples provided in documented Rust code
+(called doctests or documentation tests). The ``rusttest`` Make target uses
+this feature.
+
+If ``rustup`` is being used, all the profiles already install the tool,
+thus nothing needs to be done.
+
+The standalone installers also come with ``rustdoc``.
+
+
+rust-analyzer
+*************
+
+The `rust-analyzer <https://rust-analyzer.github.io/>`_ language server can
+be used with many editors to enable syntax highlighting, completion, go to
+definition, and other features.
+
+``rust-analyzer`` needs a configuration file, ``rust-project.json``, which
+can be generated by the ``rust-analyzer`` Make target.
+
+
+Configuration
+-------------
+
+``Rust support`` (``CONFIG_RUST``) needs to be enabled in the ``General setup``
+menu. The option is only shown if a suitable Rust toolchain is found (see
+above), as long as the other requirements are met. In turn, this will make
+visible the rest of options that depend on Rust.
+
+Afterwards, go to::
+
+ Kernel hacking
+ -> Sample kernel code
+ -> Rust samples
+
+And enable some sample modules either as built-in or as loadable.
+
+
+Building
+--------
+
+Building a kernel with a complete LLVM toolchain is the best supported setup
+at the moment. That is::
+
+ make LLVM=1
+
+For architectures that do not support a full LLVM toolchain, use::
+
+ make CC=clang
+
+Using GCC also works for some configurations, but it is very experimental at
+the moment.
+
+
+Hacking
+-------
+
+To dive deeper, take a look at the source code of the samples
+at ``samples/rust/``, the Rust support code under ``rust/`` and
+the ``Rust hacking`` menu under ``Kernel hacking``.
+
+If GDB/Binutils is used and Rust symbols are not getting demangled, the reason
+is the toolchain does not support Rust's new v0 mangling scheme yet.
+There are a few ways out:
+
+ - Install a newer release (GDB >= 10.2, Binutils >= 2.36).
+
+ - Some versions of GDB (e.g. vanilla GDB 10.1) are able to use
+ the pre-demangled names embedded in the debug info (``CONFIG_DEBUG_INFO``).
diff --git a/MAINTAINERS b/MAINTAINERS
index b8696cc2211e..c5ea1aa99b96 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -17727,6 +17727,21 @@ L: linux-rdma@vger.kernel.org
S: Maintained
F: drivers/infiniband/ulp/rtrs/
+RUST
+M: Miguel Ojeda <ojeda@kernel.org>
+M: Alex Gaynor <alex.gaynor@gmail.com>
+M: Wedson Almeida Filho <wedsonaf@google.com>
+L: rust-for-linux@vger.kernel.org
+S: Supported
+W: https://github.com/Rust-for-Linux/linux
+B: https://github.com/Rust-for-Linux/linux/issues
+T: git https://github.com/Rust-for-Linux/linux.git rust-next
+F: Documentation/rust/
+F: rust/
+F: samples/rust/
+F: scripts/*rust*
+K: \b(?i:rust)\b
+
RXRPC SOCKETS (AF_RXRPC)
M: David Howells <dhowells@redhat.com>
M: Marc Dionne <marc.dionne@auristor.com>
diff --git a/Makefile b/Makefile
index 60a7e3e8b478..36d69eb967e4 100644
--- a/Makefile
+++ b/Makefile
@@ -120,6 +120,15 @@ endif
export KBUILD_CHECKSRC
+# Enable "clippy" (a linter) as part of the Rust compilation.
+#
+# Use 'make CLIPPY=1' to enable it.
+ifeq ("$(origin CLIPPY)", "command line")
+ KBUILD_CLIPPY := $(CLIPPY)
+endif
+
+export KBUILD_CLIPPY
+
# Use make M=dir or set the environment variable KBUILD_EXTMOD to specify the
# directory of external module to build. Setting M= takes precedence.
ifeq ("$(origin M)", "command line")
@@ -270,14 +279,14 @@ no-dot-config-targets := $(clean-targets) \
cscope gtags TAGS tags help% %docs check% coccicheck \
$(version_h) headers headers_% archheaders archscripts \
%asm-generic kernelversion %src-pkg dt_binding_check \
- outputmakefile
+ outputmakefile rustavailable rustfmt rustfmtcheck
# Installation targets should not require compiler. Unfortunately, vdso_install
# is an exception where build artifacts may be updated. This must be fixed.
no-compiler-targets := $(no-dot-config-targets) install dtbs_install \
headers_install modules_install kernelrelease image_name
no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease \
image_name
-single-targets := %.a %.i %.ko %.lds %.ll %.lst %.mod %.o %.s %.symtypes %/
+single-targets := %.a %.i %.rsi %.ko %.lds %.ll %.lst %.mod %.o %.s %.symtypes %/
config-build :=
mixed-build :=
@@ -439,6 +448,7 @@ else
HOSTCC = gcc
HOSTCXX = g++
endif
+HOSTRUSTC = rustc
HOSTPKG_CONFIG = pkg-config
KBUILD_USERHOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
@@ -447,8 +457,26 @@ KBUILD_USERHOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
KBUILD_USERCFLAGS := $(KBUILD_USERHOSTCFLAGS) $(USERCFLAGS)
KBUILD_USERLDFLAGS := $(USERLDFLAGS)
+# These flags apply to all Rust code in the tree, including the kernel and
+# host programs.
+export rust_common_flags := --edition=2021 \
+ -Zbinary_dep_depinfo=y \
+ -Dunsafe_op_in_unsafe_fn -Drust_2018_idioms \
+ -Dunreachable_pub -Dnon_ascii_idents \
+ -Wmissing_docs \
+ -Drustdoc::missing_crate_level_docs \
+ -Dclippy::correctness -Dclippy::style \
+ -Dclippy::suspicious -Dclippy::complexity \
+ -Dclippy::perf \
+ -Dclippy::let_unit_value -Dclippy::mut_mut \
+ -Dclippy::needless_bitwise_bool \
+ -Dclippy::needless_continue \
+ -Wclippy::dbg_macro
+
KBUILD_HOSTCFLAGS := $(KBUILD_USERHOSTCFLAGS) $(HOST_LFS_CFLAGS) $(HOSTCFLAGS)
KBUILD_HOSTCXXFLAGS := -Wall -O2 $(HOST_LFS_CFLAGS) $(HOSTCXXFLAGS)
+KBUILD_HOSTRUSTFLAGS := $(rust_common_flags) -O -Cstrip=debuginfo \
+ -Zallow-features= $(HOSTRUSTFLAGS)
KBUILD_HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) $(HOSTLDFLAGS)
KBUILD_HOSTLDLIBS := $(HOST_LFS_LIBS) $(HOSTLDLIBS)
@@ -473,6 +501,12 @@ OBJDUMP = $(CROSS_COMPILE)objdump
READELF = $(CROSS_COMPILE)readelf
STRIP = $(CROSS_COMPILE)strip
endif
+RUSTC = rustc
+RUSTDOC = rustdoc
+RUSTFMT = rustfmt
+CLIPPY_DRIVER = clippy-driver
+BINDGEN = bindgen
+CARGO = cargo
PAHOLE = pahole
RESOLVE_BTFIDS = $(objtree)/tools/bpf/resolve_btfids/resolve_btfids
LEX = flex
@@ -498,9 +532,11 @@ CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
-Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF)
NOSTDINC_FLAGS :=
CFLAGS_MODULE =
+RUSTFLAGS_MODULE =
AFLAGS_MODULE =
LDFLAGS_MODULE =
CFLAGS_KERNEL =
+RUSTFLAGS_KERNEL =
AFLAGS_KERNEL =
LDFLAGS_vmlinux =
@@ -529,15 +565,42 @@ KBUILD_CFLAGS := -Wall -Wundef -Werror=strict-prototypes -Wno-trigraphs \
-Werror=return-type -Wno-format-security \
-std=gnu11
KBUILD_CPPFLAGS := -D__KERNEL__
+KBUILD_RUSTFLAGS := $(rust_common_flags) \
+ --target=$(objtree)/rust/target.json \
+ -Cpanic=abort -Cembed-bitcode=n -Clto=n \
+ -Cforce-unwind-tables=n -Ccodegen-units=1 \
+ -Csymbol-mangling-version=v0 \
+ -Crelocation-model=static \
+ -Zfunction-sections=n \
+ -Dclippy::float_arithmetic
+
KBUILD_AFLAGS_KERNEL :=
KBUILD_CFLAGS_KERNEL :=
+KBUILD_RUSTFLAGS_KERNEL :=
KBUILD_AFLAGS_MODULE := -DMODULE
KBUILD_CFLAGS_MODULE := -DMODULE
+KBUILD_RUSTFLAGS_MODULE := --cfg MODULE
KBUILD_LDFLAGS_MODULE :=
KBUILD_LDFLAGS :=
CLANG_FLAGS :=
+ifeq ($(KBUILD_CLIPPY),1)
+ RUSTC_OR_CLIPPY_QUIET := CLIPPY
+ RUSTC_OR_CLIPPY = $(CLIPPY_DRIVER)
+else
+ RUSTC_OR_CLIPPY_QUIET := RUSTC
+ RUSTC_OR_CLIPPY = $(RUSTC)
+endif
+
+ifdef RUST_LIB_SRC
+ export RUST_LIB_SRC
+endif
+
+export RUSTC_BOOTSTRAP := 1
+
export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC HOSTPKG_CONFIG
+export RUSTC RUSTDOC RUSTFMT RUSTC_OR_CLIPPY_QUIET RUSTC_OR_CLIPPY BINDGEN CARGO
+export HOSTRUSTC KBUILD_HOSTRUSTFLAGS
export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL
export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
@@ -546,9 +609,10 @@ export KBUILD_USERCFLAGS KBUILD_USERLDFLAGS
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
+export KBUILD_RUSTFLAGS RUSTFLAGS_KERNEL RUSTFLAGS_MODULE
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
-export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
-export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
+export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_RUSTFLAGS_MODULE KBUILD_LDFLAGS_MODULE
+export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL KBUILD_RUSTFLAGS_KERNEL
export PAHOLE_FLAGS
# Files to ignore in find ... statements
@@ -729,7 +793,7 @@ $(KCONFIG_CONFIG):
#
# Do not use $(call cmd,...) here. That would suppress prompts from syncconfig,
# so you cannot notice that Kconfig is waiting for the user input.
-%/config/auto.conf %/config/auto.conf.cmd %/generated/autoconf.h: $(KCONFIG_CONFIG)
+%/config/auto.conf %/config/auto.conf.cmd %/generated/autoconf.h %/generated/rustc_cfg: $(KCONFIG_CONFIG)
$(Q)$(kecho) " SYNC $@"
$(Q)$(MAKE) -f $(srctree)/Makefile syncconfig
else # !may-sync-config
@@ -758,10 +822,17 @@ KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
KBUILD_CFLAGS += -O2
+KBUILD_RUSTFLAGS += -Copt-level=2
else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += -Os
+KBUILD_RUSTFLAGS += -Copt-level=s
endif
+# Always set `debug-assertions` and `overflow-checks` because their default
+# depends on `opt-level` and `debug-assertions`, respectively.
+KBUILD_RUSTFLAGS += -Cdebug-assertions=$(if $(CONFIG_RUST_DEBUG_ASSERTIONS),y,n)
+KBUILD_RUSTFLAGS += -Coverflow-checks=$(if $(CONFIG_RUST_OVERFLOW_CHECKS),y,n)
+
# Tell gcc to never replace conditional load with a non-conditional one
ifdef CONFIG_CC_IS_GCC
# gcc-10 renamed --param=allow-store-data-races=0 to
@@ -792,6 +863,9 @@ KBUILD_CFLAGS-$(CONFIG_WERROR) += -Werror
KBUILD_CFLAGS-$(CONFIG_CC_NO_ARRAY_BOUNDS) += -Wno-array-bounds
KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
+KBUILD_RUSTFLAGS-$(CONFIG_WERROR) += -Dwarnings
+KBUILD_RUSTFLAGS += $(KBUILD_RUSTFLAGS-y)
+
ifdef CONFIG_CC_IS_CLANG
KBUILD_CPPFLAGS += -Qunused-arguments
# The kernel builds with '-std=gnu11' so use of GNU extensions is acceptable.
@@ -812,12 +886,15 @@ KBUILD_CFLAGS += $(call cc-disable-warning, dangling-pointer)
ifdef CONFIG_FRAME_POINTER
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
+KBUILD_RUSTFLAGS += -Cforce-frame-pointers=y
else
# Some targets (ARM with Thumb2, for example), can't be built with frame
# pointers. For those, we don't have FUNCTION_TRACER automatically
# select FRAME_POINTER. However, FUNCTION_TRACER adds -pg, and this is
# incompatible with -fomit-frame-pointer with current GCC, so we don't use
# -fomit-frame-pointer with FUNCTION_TRACER.
+# In the Rust target specification, "frame-pointer" is set explicitly
+# to "may-omit".
ifndef CONFIG_FUNCTION_TRACER
KBUILD_CFLAGS += -fomit-frame-pointer
endif
@@ -882,8 +959,10 @@ ifdef CONFIG_DEBUG_SECTION_MISMATCH
KBUILD_CFLAGS += -fno-inline-functions-called-once
endif
+# `rustc`'s `-Zfunction-sections` applies to data too (as of 1.59.0).
ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
+KBUILD_RUSTFLAGS_KERNEL += -Zfunction-sections=y
LDFLAGS_vmlinux += --gc-sections
endif
@@ -1026,10 +1105,11 @@ include $(addprefix $(srctree)/, $(include-y))
# Do not add $(call cc-option,...) below this line. When you build the kernel
# from the clean source tree, the GCC plugins do not exist at this point.
-# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
+# Add user supplied CPPFLAGS, AFLAGS, CFLAGS and RUSTFLAGS as the last assignments
KBUILD_CPPFLAGS += $(KCPPFLAGS)
KBUILD_AFLAGS += $(KAFLAGS)
KBUILD_CFLAGS += $(KCFLAGS)
+KBUILD_RUSTFLAGS += $(KRUSTFLAGS)
KBUILD_LDFLAGS_MODULE += --build-id=sha1
LDFLAGS_vmlinux += --build-id=sha1
@@ -1099,6 +1179,7 @@ ifeq ($(KBUILD_EXTMOD),)
core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/
core-$(CONFIG_BLOCK) += block/
core-$(CONFIG_IO_URING) += io_uring/
+core-$(CONFIG_RUST) += rust/
vmlinux-dirs := $(patsubst %/,%,$(filter %/, \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
@@ -1203,6 +1284,10 @@ prepare0: archprepare
# All the preparing..
prepare: prepare0
+ifdef CONFIG_RUST
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/rust-is-available.sh -v
+ $(Q)$(MAKE) $(build)=rust
+endif
PHONY += remove-stale-files
remove-stale-files:
@@ -1497,7 +1582,7 @@ endif # CONFIG_MODULES
# Directories & files removed with 'make clean'
CLEAN_FILES += include/ksym vmlinux.symvers modules-only.symvers \
modules.builtin modules.builtin.modinfo modules.nsdeps \
- compile_commands.json .thinlto-cache
+ compile_commands.json .thinlto-cache rust/test rust/doc
# Directories & files removed with 'make mrproper'
MRPROPER_FILES += include/config include/generated \
@@ -1508,7 +1593,8 @@ MRPROPER_FILES += include/config include/generated \
certs/signing_key.pem \
certs/x509.genkey \
vmlinux-gdb.py \
- *.spec
+ *.spec \
+ rust/target.json rust/libmacros.so
# clean - Delete most, but leave enough to build external modules
#
@@ -1533,6 +1619,9 @@ $(mrproper-dirs):
mrproper: clean $(mrproper-dirs)
$(call cmd,rmfiles)
+ @find . $(RCS_FIND_IGNORE) \
+ \( -name '*.rmeta' \) \
+ -type f -print | xargs rm -f
# distclean
#
@@ -1620,6 +1709,24 @@ help:
@echo ' kselftest-merge - Merge all the config dependencies of'
@echo ' kselftest to existing .config.'
@echo ''
+ @echo 'Rust targets:'
+ @echo ' rustavailable - Checks whether the Rust toolchain is'
+ @echo ' available and, if not, explains why.'
+ @echo ' rustfmt - Reformat all the Rust code in the kernel'
+ @echo ' rustfmtcheck - Checks if all the Rust code in the kernel'
+ @echo ' is formatted, printing a diff otherwise.'
+ @echo ' rustdoc - Generate Rust documentation'
+ @echo ' (requires kernel .config)'
+ @echo ' rusttest - Runs the Rust tests'
+ @echo ' (requires kernel .config; downloads external repos)'
+ @echo ' rust-analyzer - Generate rust-project.json rust-analyzer support file'
+ @echo ' (requires kernel .config)'
+ @echo ' dir/file.[os] - Build specified target only'
+ @echo ' dir/file.rsi - Build macro expanded source, similar to C preprocessing.'
+ @echo ' Run with RUSTFMT=n to skip reformatting if needed.'
+ @echo ' The output is not intended to be compilable.'
+ @echo ' dir/file.ll - Build the LLVM assembly file'
+ @echo ''
@$(if $(dtstree), \
echo 'Devicetree:'; \
echo '* dtbs - Build device tree blobs for enabled boards'; \
@@ -1692,6 +1799,52 @@ PHONY += $(DOC_TARGETS)
$(DOC_TARGETS):
$(Q)$(MAKE) $(build)=Documentation $@
+
+# Rust targets
+# ---------------------------------------------------------------------------
+
+# "Is Rust available?" target
+PHONY += rustavailable
+rustavailable:
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/rust-is-available.sh -v && echo "Rust is available!"
+
+# Documentation target
+#
+# Using the singular to avoid running afoul of `no-dot-config-targets`.
+PHONY += rustdoc
+rustdoc: prepare
+ $(Q)$(MAKE) $(build)=rust $@
+
+# Testing target
+PHONY += rusttest
+rusttest: prepare
+ $(Q)$(MAKE) $(build)=rust $@
+
+# Formatting targets
+PHONY += rustfmt rustfmtcheck
+
+# We skip `rust/alloc` since we want to minimize the diff w.r.t. upstream.
+#
+# We match using absolute paths since `find` does not resolve them
+# when matching, which is a problem when e.g. `srctree` is `..`.
+# We `grep` afterwards in order to remove the directory entry itself.
+rustfmt:
+ $(Q)find $(abs_srctree) -type f -name '*.rs' \
+ -o -path $(abs_srctree)/rust/alloc -prune \
+ -o -path $(abs_objtree)/rust/test -prune \
+ | grep -Fv $(abs_srctree)/rust/alloc \
+ | grep -Fv $(abs_objtree)/rust/test \
+ | grep -Fv generated \
+ | xargs $(RUSTFMT) $(rustfmt_flags)
+
+rustfmtcheck: rustfmt_flags = --check
+rustfmtcheck: rustfmt
+
+# IDE support targets
+PHONY += rust-analyzer
+rust-analyzer:
+ $(Q)$(MAKE) $(build)=rust $@
+
# Misc
# ---------------------------------------------------------------------------
@@ -1859,7 +2012,7 @@ $(clean-dirs):
clean: $(clean-dirs)
$(call cmd,rmfiles)
@find $(or $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
- \( -name '*.[aios]' -o -name '*.ko' -o -name '.*.cmd' \
+ \( -name '*.[aios]' -o -name '*.rsi' -o -name '*.ko' -o -name '.*.cmd' \
-o -name '*.ko.*' \
-o -name '*.dtb' -o -name '*.dtbo' -o -name '*.dtb.S' -o -name '*.dt.yaml' \
-o -name '*.dwo' -o -name '*.lst' \
diff --git a/arch/Kconfig b/arch/Kconfig
index f330410da63a..f63e5b7f40f5 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -356,6 +356,12 @@ config HAVE_RSEQ
This symbol should be selected by an architecture if it
supports an implementation of restartable sequences.
+config HAVE_RUST
+ bool
+ help
+ This symbol should be selected by an architecture if it
+ supports Rust.
+
config HAVE_FUNCTION_ARG_ACCESS_API
bool
help
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 87badeae3181..c7fb8ef3b12d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -116,6 +116,7 @@ config ARM
select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
+ select HAVE_RUST if CPU_32v6 || CPU_32v6K
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6bb50124a7e9..d2eb39b79b18 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -205,6 +205,7 @@ config ARM64
select HAVE_FUNCTION_ARG_ACCESS_API
select MMU_GATHER_RCU_TABLE_FREE
select HAVE_RSEQ
+ select HAVE_RUST
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index c3bb865f1f73..cf9a33d6c430 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -245,6 +245,7 @@ config PPC
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE
select HAVE_RSEQ
+ select HAVE_RUST if PPC64 && CPU_LITTLE_ENDIAN
select HAVE_SETUP_PER_CPU_AREA if PPC64
select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 5c52a82e83a8..205c1e2f539c 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -104,6 +104,7 @@ config RISCV
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RUST if 64BIT
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 81029d40a672..24ebe042406e 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -26,6 +26,8 @@ ifeq ($(CONFIG_ARCH_RV64I),y)
KBUILD_CFLAGS += -mabi=lp64
KBUILD_AFLAGS += -mabi=lp64
+ KBUILD_RUSTFLAGS += -Ctarget-cpu=generic-rv64
+
KBUILD_LDFLAGS += -melf64lriscv
else
BITS := 32
@@ -33,6 +35,9 @@ else
KBUILD_CFLAGS += -mabi=ilp32
KBUILD_AFLAGS += -mabi=ilp32
+
+ KBUILD_RUSTFLAGS += -Ctarget-cpu=generic-rv32
+
KBUILD_LDFLAGS += -melf32lriscv
endif
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 78de31ac1da7..61f84b2b165e 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -27,6 +27,7 @@ config UML
select TRACE_IRQFLAGS_SUPPORT
select TTY # Needed for line.c
select HAVE_ARCH_VMAP_STACK
+ select HAVE_RUST if X86_64
config MMU
bool
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cca4c0ac39cc..74572c65d97f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -258,6 +258,7 @@ config X86
select HAVE_STATIC_CALL_INLINE if HAVE_OBJTOOL
select HAVE_PREEMPT_DYNAMIC_CALL
select HAVE_RSEQ
+ select HAVE_RUST if X86_64
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UACCESS_VALIDATION if HAVE_OBJTOOL
select HAVE_UNSTABLE_SCHED_CLOCK
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 7854685c5f25..bab595003f07 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -68,6 +68,7 @@ export BITS
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383
#
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
+KBUILD_RUSTFLAGS += -Ctarget-feature=-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-avx,-avx2
ifeq ($(CONFIG_X86_KERNEL_IBT),y)
#
@@ -155,8 +156,17 @@ else
cflags-$(CONFIG_GENERIC_CPU) += -mtune=generic
KBUILD_CFLAGS += $(cflags-y)
+ rustflags-$(CONFIG_MK8) += -Ctarget-cpu=k8
+ rustflags-$(CONFIG_MPSC) += -Ctarget-cpu=nocona
+ rustflags-$(CONFIG_MCORE2) += -Ctarget-cpu=core2
+ rustflags-$(CONFIG_MATOM) += -Ctarget-cpu=atom
+ rustflags-$(CONFIG_GENERIC_CPU) += -Ztune-cpu=generic
+ KBUILD_RUSTFLAGS += $(rustflags-y)
+
KBUILD_CFLAGS += -mno-red-zone
KBUILD_CFLAGS += -mcmodel=kernel
+ KBUILD_RUSTFLAGS += -Cno-redzone=y
+ KBUILD_RUSTFLAGS += -Ccode-model=kernel
endif
#
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 07aa8ae0a058..37f7080807c5 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -13,6 +13,12 @@ config ANDROID_BINDER_IPC
Android process, using Binder to identify, invoke and pass arguments
between said processes.
+config ANDROID_BINDER_IPC_RUST
+ bool "Android Binder IPC Driver in Rust"
+ depends on MMU && RUST
+ help
+ Implementation of the Binder IPC in Rust.
+
config ANDROID_BINDERFS
bool "Android Binderfs filesystem"
depends on ANDROID_BINDER_IPC
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index c9d3d0c99c25..c428f2ce2f05 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -4,3 +4,5 @@ ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
+
+obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += rust_binder.o
diff --git a/drivers/android/allocation.rs b/drivers/android/allocation.rs
new file mode 100644
index 000000000000..3ed7b649eeb7
--- /dev/null
+++ b/drivers/android/allocation.rs
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::mem::{replace, size_of, MaybeUninit};
+use kernel::{
+ bindings, linked_list::List, pages::Pages, prelude::*, sync::Ref, user_ptr::UserSlicePtrReader,
+};
+
+use crate::{
+ defs::*,
+ node::NodeRef,
+ process::{AllocationInfo, Process},
+ thread::{BinderError, BinderResult},
+ transaction::FileInfo,
+};
+
+pub(crate) struct Allocation<'a> {
+ pub(crate) offset: usize,
+ size: usize,
+ pub(crate) ptr: usize,
+ pages: Ref<[Pages<0>]>,
+ pub(crate) process: &'a Process,
+ allocation_info: Option<AllocationInfo>,
+ free_on_drop: bool,
+ file_list: List<Box<FileInfo>>,
+}
+
+impl<'a> Allocation<'a> {
+ pub(crate) fn new(
+ process: &'a Process,
+ offset: usize,
+ size: usize,
+ ptr: usize,
+ pages: Ref<[Pages<0>]>,
+ ) -> Self {
+ Self {
+ process,
+ offset,
+ size,
+ ptr,
+ pages,
+ allocation_info: None,
+ free_on_drop: true,
+ file_list: List::new(),
+ }
+ }
+
+ pub(crate) fn take_file_list(&mut self) -> List<Box<FileInfo>> {
+ replace(&mut self.file_list, List::new())
+ }
+
+ pub(crate) fn add_file_info(&mut self, file: Box<FileInfo>) {
+ self.file_list.push_back(file);
+ }
+
+ fn iterate<T>(&self, mut offset: usize, mut size: usize, mut cb: T) -> Result
+ where
+ T: FnMut(&Pages<0>, usize, usize) -> Result,
+ {
+ // Check that the request is within the buffer.
+ if offset.checked_add(size).ok_or(EINVAL)? > self.size {
+ return Err(EINVAL);
+ }
+ offset += self.offset;
+ let mut page_index = offset >> bindings::PAGE_SHIFT;
+ offset &= (1 << bindings::PAGE_SHIFT) - 1;
+ while size > 0 {
+ let available = core::cmp::min(size, (1 << bindings::PAGE_SHIFT) as usize - offset);
+ cb(&self.pages[page_index], offset, available)?;
+ size -= available;
+ page_index += 1;
+ offset = 0;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn copy_into(
+ &self,
+ reader: &mut UserSlicePtrReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ self.iterate(offset, size, |page, offset, to_copy| {
+ page.copy_into_page(reader, offset, to_copy)
+ })
+ }
+
+ pub(crate) fn read<T>(&self, offset: usize) -> Result<T> {
+ let mut out = MaybeUninit::<T>::uninit();
+ let mut out_offset = 0;
+ self.iterate(offset, size_of::<T>(), |page, offset, to_copy| {
+ // SAFETY: Data buffer is allocated on the stack.
+ unsafe {
+ page.read(
+ (out.as_mut_ptr() as *mut u8).add(out_offset),
+ offset,
+ to_copy,
+ )
+ }?;
+ out_offset += to_copy;
+ Ok(())
+ })?;
+ // SAFETY: We just initialised the data.
+ Ok(unsafe { out.assume_init() })
+ }
+
+ pub(crate) fn write<T>(&self, offset: usize, obj: &T) -> Result {
+ let mut obj_offset = 0;
+ self.iterate(offset, size_of::<T>(), |page, offset, to_copy| {
+ // SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.
+ let obj_ptr = unsafe { (obj as *const T as *const u8).add(obj_offset) };
+ // SAFETY: We have a reference to the object, so the pointer is valid.
+ unsafe { page.write(obj_ptr, offset, to_copy) }?;
+ obj_offset += to_copy;
+ Ok(())
+ })
+ }
+
+ pub(crate) fn keep_alive(mut self) {
+ self.process
+ .buffer_make_freeable(self.offset, self.allocation_info.take());
+ self.free_on_drop = false;
+ }
+
+ pub(crate) fn set_info(&mut self, info: AllocationInfo) {
+ self.allocation_info = Some(info);
+ }
+}
+
+impl Drop for Allocation<'_> {
+ fn drop(&mut self) {
+ if !self.free_on_drop {
+ return;
+ }
+
+ if let Some(info) = &self.allocation_info {
+ let offsets = info.offsets.clone();
+ let view = AllocationView::new(self, offsets.start);
+ for i in offsets.step_by(size_of::<usize>()) {
+ if view.cleanup_object(i).is_err() {
+ pr_warn!("Error cleaning up object at offset {}\n", i)
+ }
+ }
+ }
+
+ self.process.buffer_raw_free(self.ptr);
+ }
+}
+
+pub(crate) struct AllocationView<'a, 'b> {
+ pub(crate) alloc: &'a mut Allocation<'b>,
+ limit: usize,
+}
+
+impl<'a, 'b> AllocationView<'a, 'b> {
+ pub(crate) fn new(alloc: &'a mut Allocation<'b>, limit: usize) -> Self {
+ AllocationView { alloc, limit }
+ }
+
+ pub(crate) fn read<T>(&self, offset: usize) -> Result<T> {
+ if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.read(offset)
+ }
+
+ pub(crate) fn write<T>(&self, offset: usize, obj: &T) -> Result {
+ if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.write(offset, obj)
+ }
+
+ pub(crate) fn transfer_binder_object<T>(
+ &self,
+ offset: usize,
+ strong: bool,
+ get_node: T,
+ ) -> BinderResult
+ where
+ T: FnOnce(&bindings::flat_binder_object) -> BinderResult<NodeRef>,
+ {
+ // TODO: Do we want this function to take a &mut self?
+ let obj = self.read::<bindings::flat_binder_object>(offset)?;
+ let node_ref = get_node(&obj)?;
+
+ if core::ptr::eq(&*node_ref.node.owner, self.alloc.process) {
+ // The receiving process is the owner of the node, so send it a binder object (instead
+ // of a handle).
+ let (ptr, cookie) = node_ref.node.get_id();
+ let newobj = bindings::flat_binder_object {
+ hdr: bindings::binder_object_header {
+ type_: if strong {
+ BINDER_TYPE_BINDER
+ } else {
+ BINDER_TYPE_WEAK_BINDER
+ },
+ },
+ flags: obj.flags,
+ __bindgen_anon_1: bindings::flat_binder_object__bindgen_ty_1 { binder: ptr as _ },
+ cookie: cookie as _,
+ };
+ self.write(offset, &newobj)?;
+
+ // Increment the user ref count on the node. It will be decremented as part of the
+ // destruction of the buffer, when we see a binder or weak-binder object.
+ node_ref.node.update_refcount(true, strong);
+ } else {
+ // The receiving process is different from the owner, so we need to insert a handle to
+ // the binder object.
+ let handle = self
+ .alloc
+ .process
+ .insert_or_update_handle(node_ref, false)?;
+
+ let newobj = bindings::flat_binder_object {
+ hdr: bindings::binder_object_header {
+ type_: if strong {
+ BINDER_TYPE_HANDLE
+ } else {
+ BINDER_TYPE_WEAK_HANDLE
+ },
+ },
+ flags: obj.flags,
+ // TODO: To avoid padding, we write to `binder` instead of `handle` here. We need a
+ // better solution though.
+ __bindgen_anon_1: bindings::flat_binder_object__bindgen_ty_1 {
+ binder: handle as _,
+ },
+ ..bindings::flat_binder_object::default()
+ };
+ if self.write(offset, &newobj).is_err() {
+ // Decrement ref count on the handle we just created.
+ let _ = self.alloc.process.update_ref(handle, false, strong);
+ return Err(BinderError::new_failed());
+ }
+ }
+ Ok(())
+ }
+
+ fn cleanup_object(&self, index_offset: usize) -> Result {
+ let offset = self.alloc.read(index_offset)?;
+ let header = self.read::<bindings::binder_object_header>(offset)?;
+ // TODO: Handle other types.
+ match header.type_ {
+ BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => {
+ let obj = self.read::<bindings::flat_binder_object>(offset)?;
+ let strong = header.type_ == BINDER_TYPE_BINDER;
+ // SAFETY: The type is `BINDER_TYPE_{WEAK_}BINDER`, so the `binder` field is
+ // populated.
+ let ptr = unsafe { obj.__bindgen_anon_1.binder } as usize;
+ let cookie = obj.cookie as usize;
+ self.alloc.process.update_node(ptr, cookie, strong, false);
+ Ok(())
+ }
+ BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => {
+ let obj = self.read::<bindings::flat_binder_object>(offset)?;
+ let strong = header.type_ == BINDER_TYPE_HANDLE;
+ // SAFETY: The type is `BINDER_TYPE_{WEAK_}HANDLE`, so the `handle` field is
+ // populated.
+ let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
+ self.alloc.process.update_ref(handle, false, strong)
+ }
+ _ => Ok(()),
+ }
+ }
+}
diff --git a/drivers/android/context.rs b/drivers/android/context.rs
new file mode 100644
index 000000000000..2bb448df6641
--- /dev/null
+++ b/drivers/android/context.rs
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use kernel::{
+ bindings,
+ prelude::*,
+ security,
+ sync::{Mutex, Ref, UniqueRef},
+};
+
+use crate::{
+ node::NodeRef,
+ thread::{BinderError, BinderResult},
+};
+
+struct Manager {
+ node: Option<NodeRef>,
+ uid: Option<bindings::kuid_t>,
+}
+
+pub(crate) struct Context {
+ manager: Mutex<Manager>,
+}
+
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl Send for Context {}
+unsafe impl Sync for Context {}
+
+impl Context {
+ pub(crate) fn new() -> Result<Ref<Self>> {
+ let mut ctx = Pin::from(UniqueRef::try_new(Self {
+ // SAFETY: Init is called below.
+ manager: unsafe {
+ Mutex::new(Manager {
+ node: None,
+ uid: None,
+ })
+ },
+ })?);
+
+ // SAFETY: `manager` is also pinned when `ctx` is.
+ let manager = unsafe { ctx.as_mut().map_unchecked_mut(|c| &mut c.manager) };
+ kernel::mutex_init!(manager, "Context::manager");
+
+ Ok(ctx.into())
+ }
+
+ pub(crate) fn set_manager_node(&self, node_ref: NodeRef) -> Result {
+ let mut manager = self.manager.lock();
+ if manager.node.is_some() {
+ return Err(EBUSY);
+ }
+ security::binder_set_context_mgr(&node_ref.node.owner.cred)?;
+
+ // TODO: Get the actual caller id.
+ let caller_uid = bindings::kuid_t::default();
+ if let Some(ref uid) = manager.uid {
+ if uid.val != caller_uid.val {
+ return Err(EPERM);
+ }
+ }
+
+ manager.node = Some(node_ref);
+ manager.uid = Some(caller_uid);
+ Ok(())
+ }
+
+ pub(crate) fn unset_manager_node(&self) {
+ let node_ref = self.manager.lock().node.take();
+ drop(node_ref);
+ }
+
+ pub(crate) fn get_manager_node(&self, strong: bool) -> BinderResult<NodeRef> {
+ self.manager
+ .lock()
+ .node
+ .as_ref()
+ .ok_or_else(BinderError::new_dead)?
+ .clone(strong)
+ }
+}
diff --git a/drivers/android/defs.rs b/drivers/android/defs.rs
new file mode 100644
index 000000000000..925e751a2564
--- /dev/null
+++ b/drivers/android/defs.rs
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::ops::{Deref, DerefMut};
+use kernel::{
+ bindings,
+ bindings::*,
+ io_buffer::{ReadableFromBytes, WritableToBytes},
+};
+
+macro_rules! pub_no_prefix {
+ ($prefix:ident, $($newname:ident),+) => {
+ $(pub(crate) const $newname: u32 = kernel::macros::concat_idents!($prefix, $newname);)+
+ };
+}
+
+pub_no_prefix!(
+ binder_driver_return_protocol_,
+ BR_OK,
+ BR_ERROR,
+ BR_TRANSACTION,
+ BR_REPLY,
+ BR_DEAD_REPLY,
+ BR_TRANSACTION_COMPLETE,
+ BR_INCREFS,
+ BR_ACQUIRE,
+ BR_RELEASE,
+ BR_DECREFS,
+ BR_NOOP,
+ BR_SPAWN_LOOPER,
+ BR_DEAD_BINDER,
+ BR_CLEAR_DEATH_NOTIFICATION_DONE,
+ BR_FAILED_REPLY
+);
+
+pub_no_prefix!(
+ binder_driver_command_protocol_,
+ BC_TRANSACTION,
+ BC_REPLY,
+ BC_FREE_BUFFER,
+ BC_INCREFS,
+ BC_ACQUIRE,
+ BC_RELEASE,
+ BC_DECREFS,
+ BC_INCREFS_DONE,
+ BC_ACQUIRE_DONE,
+ BC_REGISTER_LOOPER,
+ BC_ENTER_LOOPER,
+ BC_EXIT_LOOPER,
+ BC_REQUEST_DEATH_NOTIFICATION,
+ BC_CLEAR_DEATH_NOTIFICATION,
+ BC_DEAD_BINDER_DONE
+);
+
+pub_no_prefix!(transaction_flags_, TF_ONE_WAY, TF_ACCEPT_FDS);
+
+pub(crate) use bindings::{
+ BINDER_TYPE_BINDER, BINDER_TYPE_FD, BINDER_TYPE_HANDLE, BINDER_TYPE_WEAK_BINDER,
+ BINDER_TYPE_WEAK_HANDLE, FLAT_BINDER_FLAG_ACCEPTS_FDS,
+};
+
+macro_rules! decl_wrapper {
+ ($newname:ident, $wrapped:ty) => {
+ #[derive(Copy, Clone, Default)]
+ pub(crate) struct $newname($wrapped);
+
+ // TODO: This must be justified by inspecting the type, so should live outside the macro or
+ // the macro should be somehow marked unsafe.
+ unsafe impl ReadableFromBytes for $newname {}
+ unsafe impl WritableToBytes for $newname {}
+
+ impl Deref for $newname {
+ type Target = $wrapped;
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+ }
+
+ impl DerefMut for $newname {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+ }
+ };
+}
+
+decl_wrapper!(BinderNodeDebugInfo, bindings::binder_node_debug_info);
+decl_wrapper!(BinderNodeInfoForRef, bindings::binder_node_info_for_ref);
+decl_wrapper!(FlatBinderObject, bindings::flat_binder_object);
+decl_wrapper!(BinderTransactionData, bindings::binder_transaction_data);
+decl_wrapper!(BinderWriteRead, bindings::binder_write_read);
+decl_wrapper!(BinderVersion, bindings::binder_version);
+
+impl BinderVersion {
+ pub(crate) fn current() -> Self {
+ Self(bindings::binder_version {
+ protocol_version: bindings::BINDER_CURRENT_PROTOCOL_VERSION as _,
+ })
+ }
+}
diff --git a/drivers/android/node.rs b/drivers/android/node.rs
new file mode 100644
index 000000000000..1a46de1e736c
--- /dev/null
+++ b/drivers/android/node.rs
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::sync::atomic::{AtomicU64, Ordering};
+use kernel::{
+ io_buffer::IoBufferWriter,
+ linked_list::{GetLinks, Links, List},
+ prelude::*,
+ sync::{Guard, LockedBy, Mutex, Ref, SpinLock},
+ user_ptr::UserSlicePtrWriter,
+};
+
+use crate::{
+ defs::*,
+ process::{Process, ProcessInner},
+ thread::{BinderError, BinderResult, Thread},
+ DeliverToRead,
+};
+
+struct CountState {
+ count: usize,
+ has_count: bool,
+ is_biased: bool,
+}
+
+impl CountState {
+ fn new() -> Self {
+ Self {
+ count: 0,
+ has_count: false,
+ is_biased: false,
+ }
+ }
+
+ fn add_bias(&mut self) {
+ self.count += 1;
+ self.is_biased = true;
+ }
+}
+
+struct NodeInner {
+ strong: CountState,
+ weak: CountState,
+ death_list: List<Ref<NodeDeath>>,
+}
+
+struct NodeDeathInner {
+ dead: bool,
+ cleared: bool,
+ notification_done: bool,
+
+ /// Indicates whether the normal flow was interrupted by removing the handle. In this case, we
+ /// need behave as if the death notification didn't exist (i.e., we don't deliver anything to
+ /// the user.
+ aborted: bool,
+}
+
+pub(crate) struct NodeDeath {
+ node: Ref<Node>,
+ process: Ref<Process>,
+ // TODO: Make this private.
+ pub(crate) cookie: usize,
+ work_links: Links<dyn DeliverToRead>,
+ // TODO: Add the moment we're using this for two lists, which isn't safe because we want to
+ // remove from the list without knowing the list it's in. We need to separate this out.
+ death_links: Links<NodeDeath>,
+ inner: SpinLock<NodeDeathInner>,
+}
+
+impl NodeDeath {
+ /// Constructs a new node death notification object.
+ ///
+ /// # Safety
+ ///
+ /// The caller must call `NodeDeath::init` before using the notification object.
+ pub(crate) unsafe fn new(node: Ref<Node>, process: Ref<Process>, cookie: usize) -> Self {
+ Self {
+ node,
+ process,
+ cookie,
+ work_links: Links::new(),
+ death_links: Links::new(),
+ inner: unsafe {
+ SpinLock::new(NodeDeathInner {
+ dead: false,
+ cleared: false,
+ notification_done: false,
+ aborted: false,
+ })
+ },
+ }
+ }
+
+ pub(crate) fn init(self: Pin<&mut Self>) {
+ // SAFETY: `inner` is pinned when `self` is.
+ let inner = unsafe { self.map_unchecked_mut(|n| &mut n.inner) };
+ kernel::spinlock_init!(inner, "NodeDeath::inner");
+ }
+
+ /// Sets the cleared flag to `true`.
+ ///
+ /// It removes `self` from the node's death notification list if needed. It must only be called
+ /// once.
+ ///
+ /// Returns whether it needs to be queued.
+ pub(crate) fn set_cleared(self: &Ref<Self>, abort: bool) -> bool {
+ let (needs_removal, needs_queueing) = {
+ // Update state and determine if we need to queue a work item. We only need to do it
+ // when the node is not dead or if the user already completed the death notification.
+ let mut inner = self.inner.lock();
+ inner.cleared = true;
+ if abort {
+ inner.aborted = true;
+ }
+ (!inner.dead, !inner.dead || inner.notification_done)
+ };
+
+ // Remove death notification from node.
+ if needs_removal {
+ let mut owner_inner = self.node.owner.inner.lock();
+ let node_inner = self.node.inner.access_mut(&mut owner_inner);
+ unsafe { node_inner.death_list.remove(self) };
+ }
+
+ needs_queueing
+ }
+
+ /// Sets the 'notification done' flag to `true`.
+ ///
+ /// Returns whether it needs to be queued.
+ pub(crate) fn set_notification_done(self: Ref<Self>, thread: &Thread) {
+ let needs_queueing = {
+ let mut inner = self.inner.lock();
+ inner.notification_done = true;
+ inner.cleared
+ };
+
+ if needs_queueing {
+ let _ = thread.push_work_if_looper(self);
+ }
+ }
+
+ /// Sets the 'dead' flag to `true` and queues work item if needed.
+ pub(crate) fn set_dead(self: Ref<Self>) {
+ let needs_queueing = {
+ let mut inner = self.inner.lock();
+ if inner.cleared {
+ false
+ } else {
+ inner.dead = true;
+ true
+ }
+ };
+
+ if needs_queueing {
+ // Push the death notification to the target process. There is nothing else to do if
+ // it's already dead.
+ let process = self.process.clone();
+ let _ = process.push_work(self);
+ }
+ }
+}
+
+impl GetLinks for NodeDeath {
+ type EntryType = NodeDeath;
+ fn get_links(data: &NodeDeath) -> &Links<NodeDeath> {
+ &data.death_links
+ }
+}
+
+impl DeliverToRead for NodeDeath {
+ fn do_work(self: Ref<Self>, _thread: &Thread, writer: &mut UserSlicePtrWriter) -> Result<bool> {
+ let done = {
+ let inner = self.inner.lock();
+ if inner.aborted {
+ return Ok(true);
+ }
+ inner.cleared && (!inner.dead || inner.notification_done)
+ };
+
+ let cookie = self.cookie;
+ let cmd = if done {
+ BR_CLEAR_DEATH_NOTIFICATION_DONE
+ } else {
+ let process = self.process.clone();
+ let mut process_inner = process.inner.lock();
+ let inner = self.inner.lock();
+ if inner.aborted {
+ return Ok(true);
+ }
+ // We're still holding the inner lock, so it cannot be aborted while we insert it into
+ // the delivered list.
+ process_inner.death_delivered(self.clone());
+ BR_DEAD_BINDER
+ };
+
+ writer.write(&cmd)?;
+ writer.write(&cookie)?;
+
+ // Mimic the original code: we stop processing work items when we get to a death
+ // notification.
+ Ok(cmd != BR_DEAD_BINDER)
+ }
+
+ fn get_links(&self) -> &Links<dyn DeliverToRead> {
+ &self.work_links
+ }
+}
+
+pub(crate) struct Node {
+ pub(crate) global_id: u64,
+ ptr: usize,
+ cookie: usize,
+ pub(crate) flags: u32,
+ pub(crate) owner: Ref<Process>,
+ inner: LockedBy<NodeInner, Mutex<ProcessInner>>,
+ links: Links<dyn DeliverToRead>,
+}
+
+impl Node {
+ pub(crate) fn new(ptr: usize, cookie: usize, flags: u32, owner: Ref<Process>) -> Self {
+ static NEXT_ID: AtomicU64 = AtomicU64::new(1);
+ let inner = LockedBy::new(
+ &owner.inner,
+ NodeInner {
+ strong: CountState::new(),
+ weak: CountState::new(),
+ death_list: List::new(),
+ },
+ );
+ Self {
+ global_id: NEXT_ID.fetch_add(1, Ordering::Relaxed),
+ ptr,
+ cookie,
+ flags,
+ owner,
+ inner,
+ links: Links::new(),
+ }
+ }
+
+ pub(crate) fn get_id(&self) -> (usize, usize) {
+ (self.ptr, self.cookie)
+ }
+
+ pub(crate) fn next_death(
+ &self,
+ guard: &mut Guard<'_, Mutex<ProcessInner>>,
+ ) -> Option<Ref<NodeDeath>> {
+ self.inner.access_mut(guard).death_list.pop_front()
+ }
+
+ pub(crate) fn add_death(
+ &self,
+ death: Ref<NodeDeath>,
+ guard: &mut Guard<'_, Mutex<ProcessInner>>,
+ ) {
+ self.inner.access_mut(guard).death_list.push_back(death);
+ }
+
+ pub(crate) fn update_refcount_locked(
+ &self,
+ inc: bool,
+ strong: bool,
+ biased: bool,
+ owner_inner: &mut ProcessInner,
+ ) -> bool {
+ let inner = self.inner.access_from_mut(owner_inner);
+
+ // Get a reference to the state we'll update.
+ let state = if strong {
+ &mut inner.strong
+ } else {
+ &mut inner.weak
+ };
+
+ // Update biased state: if the count is not biased, there is nothing to do; otherwise,
+ // we're removing the bias, so mark the state as such.
+ if biased {
+ if !state.is_biased {
+ return false;
+ }
+
+ state.is_biased = false;
+ }
+
+ // Update the count and determine whether we need to push work.
+ // TODO: Here we may want to check the weak count being zero but the strong count being 1,
+ // because in such cases, we won't deliver anything to userspace, so we shouldn't queue
+ // either.
+ if inc {
+ state.count += 1;
+ !state.has_count
+ } else {
+ state.count -= 1;
+ state.count == 0 && state.has_count
+ }
+ }
+
+ pub(crate) fn update_refcount(self: &Ref<Self>, inc: bool, strong: bool) {
+ self.owner
+ .inner
+ .lock()
+ .update_node_refcount(self, inc, strong, false, None);
+ }
+
+ pub(crate) fn populate_counts(
+ &self,
+ out: &mut BinderNodeInfoForRef,
+ guard: &Guard<'_, Mutex<ProcessInner>>,
+ ) {
+ let inner = self.inner.access(guard);
+ out.strong_count = inner.strong.count as _;
+ out.weak_count = inner.weak.count as _;
+ }
+
+ pub(crate) fn populate_debug_info(
+ &self,
+ out: &mut BinderNodeDebugInfo,
+ guard: &Guard<'_, Mutex<ProcessInner>>,
+ ) {
+ out.ptr = self.ptr as _;
+ out.cookie = self.cookie as _;
+ let inner = self.inner.access(guard);
+ if inner.strong.has_count {
+ out.has_strong_ref = 1;
+ }
+ if inner.weak.has_count {
+ out.has_weak_ref = 1;
+ }
+ }
+
+ pub(crate) fn force_has_count(&self, guard: &mut Guard<'_, Mutex<ProcessInner>>) {
+ let inner = self.inner.access_mut(guard);
+ inner.strong.has_count = true;
+ inner.weak.has_count = true;
+ }
+
+ fn write(&self, writer: &mut UserSlicePtrWriter, code: u32) -> Result {
+ writer.write(&code)?;
+ writer.write(&self.ptr)?;
+ writer.write(&self.cookie)?;
+ Ok(())
+ }
+}
+
+impl DeliverToRead for Node {
+ fn do_work(self: Ref<Self>, _thread: &Thread, writer: &mut UserSlicePtrWriter) -> Result<bool> {
+ let mut owner_inner = self.owner.inner.lock();
+ let inner = self.inner.access_mut(&mut owner_inner);
+ let strong = inner.strong.count > 0;
+ let has_strong = inner.strong.has_count;
+ let weak = strong || inner.weak.count > 0;
+ let has_weak = inner.weak.has_count;
+ inner.weak.has_count = weak;
+ inner.strong.has_count = strong;
+
+ if !weak {
+ // Remove the node if there are no references to it.
+ owner_inner.remove_node(self.ptr);
+ } else {
+ if !has_weak {
+ inner.weak.add_bias();
+ }
+
+ if !has_strong && strong {
+ inner.strong.add_bias();
+ }
+ }
+
+ drop(owner_inner);
+
+ // This could be done more compactly but we write out all the posibilities for
+ // compatibility with the original implementation wrt the order of events.
+ if weak && !has_weak {
+ self.write(writer, BR_INCREFS)?;
+ }
+
+ if strong && !has_strong {
+ self.write(writer, BR_ACQUIRE)?;
+ }
+
+ if !strong && has_strong {
+ self.write(writer, BR_RELEASE)?;
+ }
+
+ if !weak && has_weak {
+ self.write(writer, BR_DECREFS)?;
+ }
+
+ Ok(true)
+ }
+
+ fn get_links(&self) -> &Links<dyn DeliverToRead> {
+ &self.links
+ }
+}
+
+pub(crate) struct NodeRef {
+ pub(crate) node: Ref<Node>,
+ strong_count: usize,
+ weak_count: usize,
+}
+
+impl NodeRef {
+ pub(crate) fn new(node: Ref<Node>, strong_count: usize, weak_count: usize) -> Self {
+ Self {
+ node,
+ strong_count,
+ weak_count,
+ }
+ }
+
+ pub(crate) fn absorb(&mut self, mut other: Self) {
+ self.strong_count += other.strong_count;
+ self.weak_count += other.weak_count;
+ other.strong_count = 0;
+ other.weak_count = 0;
+ }
+
+ pub(crate) fn clone(&self, strong: bool) -> BinderResult<NodeRef> {
+ if strong && self.strong_count == 0 {
+ return Err(BinderError::new_failed());
+ }
+
+ Ok(self
+ .node
+ .owner
+ .inner
+ .lock()
+ .new_node_ref(self.node.clone(), strong, None))
+ }
+
+ /// Updates (increments or decrements) the number of references held against the node. If the
+ /// count being updated transitions from 0 to 1 or from 1 to 0, the node is notified by having
+ /// its `update_refcount` function called.
+ ///
+ /// Returns whether `self` should be removed (when both counts are zero).
+ pub(crate) fn update(&mut self, inc: bool, strong: bool) -> bool {
+ if strong && self.strong_count == 0 {
+ return false;
+ }
+
+ let (count, other_count) = if strong {
+ (&mut self.strong_count, self.weak_count)
+ } else {
+ (&mut self.weak_count, self.strong_count)
+ };
+
+ if inc {
+ if *count == 0 {
+ self.node.update_refcount(true, strong);
+ }
+ *count += 1;
+ } else {
+ *count -= 1;
+ if *count == 0 {
+ self.node.update_refcount(false, strong);
+ return other_count == 0;
+ }
+ }
+
+ false
+ }
+}
+
+impl Drop for NodeRef {
+ fn drop(&mut self) {
+ if self.strong_count > 0 {
+ self.node.update_refcount(false, true);
+ }
+
+ if self.weak_count > 0 {
+ self.node.update_refcount(false, false);
+ }
+ }
+}
diff --git a/drivers/android/process.rs b/drivers/android/process.rs
new file mode 100644
index 000000000000..229493f0d7a1
--- /dev/null
+++ b/drivers/android/process.rs
@@ -0,0 +1,961 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::{convert::TryFrom, mem::take, ops::Range};
+use kernel::{
+ bindings,
+ cred::Credential,
+ file::{self, File, IoctlCommand, IoctlHandler, PollTable},
+ io_buffer::{IoBufferReader, IoBufferWriter},
+ linked_list::List,
+ mm,
+ pages::Pages,
+ prelude::*,
+ rbtree::RBTree,
+ sync::{Guard, Mutex, Ref, RefBorrow, UniqueRef},
+ task::Task,
+ user_ptr::{UserSlicePtr, UserSlicePtrReader},
+ Either,
+};
+
+use crate::{
+ allocation::Allocation,
+ context::Context,
+ defs::*,
+ node::{Node, NodeDeath, NodeRef},
+ range_alloc::RangeAllocator,
+ thread::{BinderError, BinderResult, Thread},
+ DeliverToRead, DeliverToReadListAdapter,
+};
+
+// TODO: Review this:
+// Lock order: Process::node_refs -> Process::inner -> Thread::inner
+
+pub(crate) struct AllocationInfo {
+ /// Range within the allocation where we can find the offsets to the object descriptors.
+ pub(crate) offsets: Range<usize>,
+}
+
+struct Mapping {
+ address: usize,
+ alloc: RangeAllocator<AllocationInfo>,
+ pages: Ref<[Pages<0>]>,
+}
+
+impl Mapping {
+ fn new(address: usize, size: usize, pages: Ref<[Pages<0>]>) -> Result<Self> {
+ let alloc = RangeAllocator::new(size)?;
+ Ok(Self {
+ address,
+ alloc,
+ pages,
+ })
+ }
+}
+
+// TODO: Make this private.
+pub(crate) struct ProcessInner {
+ is_manager: bool,
+ is_dead: bool,
+ threads: RBTree<i32, Ref<Thread>>,
+ ready_threads: List<Ref<Thread>>,
+ work: List<DeliverToReadListAdapter>,
+ mapping: Option<Mapping>,
+ nodes: RBTree<usize, Ref<Node>>,
+
+ delivered_deaths: List<Ref<NodeDeath>>,
+
+ /// The number of requested threads that haven't registered yet.
+ requested_thread_count: u32,
+
+ /// The maximum number of threads used by the process thread pool.
+ max_threads: u32,
+
+ /// The number of threads the started and registered with the thread pool.
+ started_thread_count: u32,
+}
+
+impl ProcessInner {
+ fn new() -> Self {
+ Self {
+ is_manager: false,
+ is_dead: false,
+ threads: RBTree::new(),
+ ready_threads: List::new(),
+ work: List::new(),
+ mapping: None,
+ nodes: RBTree::new(),
+ requested_thread_count: 0,
+ max_threads: 0,
+ started_thread_count: 0,
+ delivered_deaths: List::new(),
+ }
+ }
+
+ fn push_work(&mut self, work: Ref<dyn DeliverToRead>) -> BinderResult {
+ // Try to find a ready thread to which to push the work.
+ if let Some(thread) = self.ready_threads.pop_front() {
+ // Push to thread while holding state lock. This prevents the thread from giving up
+ // (for example, because of a signal) when we're about to deliver work.
+ thread.push_work(work)
+ } else if self.is_dead {
+ Err(BinderError::new_dead())
+ } else {
+ // There are no ready threads. Push work to process queue.
+ self.work.push_back(work);
+
+ // Wake up polling threads, if any.
+ for thread in self.threads.values() {
+ thread.notify_if_poll_ready();
+ }
+ Ok(())
+ }
+ }
+
+ // TODO: Should this be private?
+ pub(crate) fn remove_node(&mut self, ptr: usize) {
+ self.nodes.remove(&ptr);
+ }
+
+ /// Updates the reference count on the given node.
+ // TODO: Decide if this should be private.
+ pub(crate) fn update_node_refcount(
+ &mut self,
+ node: &Ref<Node>,
+ inc: bool,
+ strong: bool,
+ biased: bool,
+ othread: Option<&Thread>,
+ ) {
+ let push = node.update_refcount_locked(inc, strong, biased, self);
+
+ // If we decided that we need to push work, push either to the process or to a thread if
+ // one is specified.
+ if push {
+ if let Some(thread) = othread {
+ thread.push_work_deferred(node.clone());
+ } else {
+ let _ = self.push_work(node.clone());
+ // Nothing to do: `push_work` may fail if the process is dead, but that's ok as in
+ // that case, it doesn't care about the notification.
+ }
+ }
+ }
+
+ // TODO: Make this private.
+ pub(crate) fn new_node_ref(
+ &mut self,
+ node: Ref<Node>,
+ strong: bool,
+ thread: Option<&Thread>,
+ ) -> NodeRef {
+ self.update_node_refcount(&node, true, strong, false, thread);
+ let strong_count = if strong { 1 } else { 0 };
+ NodeRef::new(node, strong_count, 1 - strong_count)
+ }
+
+ /// Returns an existing node with the given pointer and cookie, if one exists.
+ ///
+ /// Returns an error if a node with the given pointer but a different cookie exists.
+ fn get_existing_node(&self, ptr: usize, cookie: usize) -> Result<Option<Ref<Node>>> {
+ match self.nodes.get(&ptr) {
+ None => Ok(None),
+ Some(node) => {
+ let (_, node_cookie) = node.get_id();
+ if node_cookie == cookie {
+ Ok(Some(node.clone()))
+ } else {
+ Err(EINVAL)
+ }
+ }
+ }
+ }
+
+ /// Returns a reference to an existing node with the given pointer and cookie. It requires a
+ /// mutable reference because it needs to increment the ref count on the node, which may
+ /// require pushing work to the work queue (to notify userspace of 0 to 1 transitions).
+ fn get_existing_node_ref(
+ &mut self,
+ ptr: usize,
+ cookie: usize,
+ strong: bool,
+ thread: Option<&Thread>,
+ ) -> Result<Option<NodeRef>> {
+ Ok(self
+ .get_existing_node(ptr, cookie)?
+ .map(|node| self.new_node_ref(node, strong, thread)))
+ }
+
+ fn register_thread(&mut self) -> bool {
+ if self.requested_thread_count == 0 {
+ return false;
+ }
+
+ self.requested_thread_count -= 1;
+ self.started_thread_count += 1;
+ true
+ }
+
+ /// Finds a delivered death notification with the given cookie, removes it from the thread's
+ /// delivered list, and returns it.
+ fn pull_delivered_death(&mut self, cookie: usize) -> Option<Ref<NodeDeath>> {
+ let mut cursor = self.delivered_deaths.cursor_front_mut();
+ while let Some(death) = cursor.current() {
+ if death.cookie == cookie {
+ return cursor.remove_current();
+ }
+ cursor.move_next();
+ }
+ None
+ }
+
+ pub(crate) fn death_delivered(&mut self, death: Ref<NodeDeath>) {
+ self.delivered_deaths.push_back(death);
+ }
+}
+
+struct NodeRefInfo {
+ node_ref: NodeRef,
+ death: Option<Ref<NodeDeath>>,
+}
+
+impl NodeRefInfo {
+ fn new(node_ref: NodeRef) -> Self {
+ Self {
+ node_ref,
+ death: None,
+ }
+ }
+}
+
+struct ProcessNodeRefs {
+ by_handle: RBTree<u32, NodeRefInfo>,
+ by_global_id: RBTree<u64, u32>,
+}
+
+impl ProcessNodeRefs {
+ fn new() -> Self {
+ Self {
+ by_handle: RBTree::new(),
+ by_global_id: RBTree::new(),
+ }
+ }
+}
+
+pub(crate) struct Process {
+ ctx: Ref<Context>,
+
+ // The task leader (process).
+ pub(crate) task: ARef<Task>,
+
+ // Credential associated with file when `Process` is created.
+ pub(crate) cred: ARef<Credential>,
+
+ // TODO: For now this a mutex because we have allocations in RangeAllocator while holding the
+ // lock. We may want to split up the process state at some point to use a spin lock for the
+ // other fields.
+ // TODO: Make this private again.
+ pub(crate) inner: Mutex<ProcessInner>,
+
+ // References are in a different mutex to avoid recursive acquisition when
+ // incrementing/decrementing a node in another process.
+ node_refs: Mutex<ProcessNodeRefs>,
+}
+
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl Send for Process {}
+unsafe impl Sync for Process {}
+
+impl Process {
+ fn new(ctx: Ref<Context>, cred: ARef<Credential>) -> Result<Ref<Self>> {
+ let mut process = Pin::from(UniqueRef::try_new(Self {
+ ctx,
+ cred,
+ task: Task::current().group_leader().into(),
+ // SAFETY: `inner` is initialised in the call to `mutex_init` below.
+ inner: unsafe { Mutex::new(ProcessInner::new()) },
+ // SAFETY: `node_refs` is initialised in the call to `mutex_init` below.
+ node_refs: unsafe { Mutex::new(ProcessNodeRefs::new()) },
+ })?);
+
+ // SAFETY: `inner` is pinned when `Process` is.
+ let pinned = unsafe { process.as_mut().map_unchecked_mut(|p| &mut p.inner) };
+ kernel::mutex_init!(pinned, "Process::inner");
+
+ // SAFETY: `node_refs` is pinned when `Process` is.
+ let pinned = unsafe { process.as_mut().map_unchecked_mut(|p| &mut p.node_refs) };
+ kernel::mutex_init!(pinned, "Process::node_refs");
+
+ Ok(process.into())
+ }
+
+ /// Attempts to fetch a work item from the process queue.
+ pub(crate) fn get_work(&self) -> Option<Ref<dyn DeliverToRead>> {
+ self.inner.lock().work.pop_front()
+ }
+
+ /// Attempts to fetch a work item from the process queue. If none is available, it registers the
+ /// given thread as ready to receive work directly.
+ ///
+ /// This must only be called when the thread is not participating in a transaction chain; when
+ /// it is, work will always be delivered directly to the thread (and not through the process
+ /// queue).
+ pub(crate) fn get_work_or_register<'a>(
+ &'a self,
+ thread: &'a Ref<Thread>,
+ ) -> Either<Ref<dyn DeliverToRead>, Registration<'a>> {
+ let mut inner = self.inner.lock();
+
+ // Try to get work from the process queue.
+ if let Some(work) = inner.work.pop_front() {
+ return Either::Left(work);
+ }
+
+ // Register the thread as ready.
+ Either::Right(Registration::new(self, thread, &mut inner))
+ }
+
+ fn get_thread(self: RefBorrow<'_, Self>, id: i32) -> Result<Ref<Thread>> {
+ // TODO: Consider using read/write locks here instead.
+ {
+ let inner = self.inner.lock();
+ if let Some(thread) = inner.threads.get(&id) {
+ return Ok(thread.clone());
+ }
+ }
+
+ // Allocate a new `Thread` without holding any locks.
+ let ta = Thread::new(id, self.into())?;
+ let node = RBTree::try_allocate_node(id, ta.clone())?;
+
+ let mut inner = self.inner.lock();
+
+ // Recheck. It's possible the thread was create while we were not holding the lock.
+ if let Some(thread) = inner.threads.get(&id) {
+ return Ok(thread.clone());
+ }
+
+ inner.threads.insert(node);
+ Ok(ta)
+ }
+
+ pub(crate) fn push_work(&self, work: Ref<dyn DeliverToRead>) -> BinderResult {
+ self.inner.lock().push_work(work)
+ }
+
+ fn set_as_manager(
+ self: RefBorrow<'_, Self>,
+ info: Option<FlatBinderObject>,
+ thread: &Thread,
+ ) -> Result {
+ let (ptr, cookie, flags) = if let Some(obj) = info {
+ (
+ // SAFETY: The object type for this ioctl is implicitly `BINDER_TYPE_BINDER`, so it
+ // is safe to access the `binder` field.
+ unsafe { obj.__bindgen_anon_1.binder },
+ obj.cookie,
+ obj.flags,
+ )
+ } else {
+ (0, 0, 0)
+ };
+ let node_ref = self.get_node(ptr as _, cookie as _, flags as _, true, Some(thread))?;
+ let node = node_ref.node.clone();
+ self.ctx.set_manager_node(node_ref)?;
+ self.inner.lock().is_manager = true;
+
+ // Force the state of the node to prevent the delivery of acquire/increfs.
+ let mut owner_inner = node.owner.inner.lock();
+ node.force_has_count(&mut owner_inner);
+ Ok(())
+ }
+
+ pub(crate) fn get_node(
+ self: RefBorrow<'_, Self>,
+ ptr: usize,
+ cookie: usize,
+ flags: u32,
+ strong: bool,
+ thread: Option<&Thread>,
+ ) -> Result<NodeRef> {
+ // Try to find an existing node.
+ {
+ let mut inner = self.inner.lock();
+ if let Some(node) = inner.get_existing_node_ref(ptr, cookie, strong, thread)? {
+ return Ok(node);
+ }
+ }
+
+ // Allocate the node before reacquiring the lock.
+ let node = Ref::try_new(Node::new(ptr, cookie, flags, self.into()))?;
+ let rbnode = RBTree::try_allocate_node(ptr, node.clone())?;
+
+ let mut inner = self.inner.lock();
+ if let Some(node) = inner.get_existing_node_ref(ptr, cookie, strong, thread)? {
+ return Ok(node);
+ }
+
+ inner.nodes.insert(rbnode);
+ Ok(inner.new_node_ref(node, strong, thread))
+ }
+
+ pub(crate) fn insert_or_update_handle(
+ &self,
+ node_ref: NodeRef,
+ is_mananger: bool,
+ ) -> Result<u32> {
+ {
+ let mut refs = self.node_refs.lock();
+
+ // Do a lookup before inserting.
+ if let Some(handle_ref) = refs.by_global_id.get(&node_ref.node.global_id) {
+ let handle = *handle_ref;
+ let info = refs.by_handle.get_mut(&handle).unwrap();
+ info.node_ref.absorb(node_ref);
+ return Ok(handle);
+ }
+ }
+
+ // Reserve memory for tree nodes.
+ let reserve1 = RBTree::try_reserve_node()?;
+ let reserve2 = RBTree::try_reserve_node()?;
+
+ let mut refs = self.node_refs.lock();
+
+ // Do a lookup again as node may have been inserted before the lock was reacquired.
+ if let Some(handle_ref) = refs.by_global_id.get(&node_ref.node.global_id) {
+ let handle = *handle_ref;
+ let info = refs.by_handle.get_mut(&handle).unwrap();
+ info.node_ref.absorb(node_ref);
+ return Ok(handle);
+ }
+
+ // Find id.
+ let mut target = if is_mananger { 0 } else { 1 };
+ for handle in refs.by_handle.keys() {
+ if *handle > target {
+ break;
+ }
+ if *handle == target {
+ target = target.checked_add(1).ok_or(ENOMEM)?;
+ }
+ }
+
+ // Ensure the process is still alive while we insert a new reference.
+ let inner = self.inner.lock();
+ if inner.is_dead {
+ return Err(ESRCH);
+ }
+ refs.by_global_id
+ .insert(reserve1.into_node(node_ref.node.global_id, target));
+ refs.by_handle
+ .insert(reserve2.into_node(target, NodeRefInfo::new(node_ref)));
+ Ok(target)
+ }
+
+ pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
+ // When handle is zero, try to get the context manager.
+ if handle == 0 {
+ self.ctx.get_manager_node(true)
+ } else {
+ self.get_node_from_handle(handle, true)
+ }
+ }
+
+ pub(crate) fn get_node_from_handle(&self, handle: u32, strong: bool) -> BinderResult<NodeRef> {
+ self.node_refs
+ .lock()
+ .by_handle
+ .get(&handle)
+ .ok_or(ENOENT)?
+ .node_ref
+ .clone(strong)
+ }
+
+ pub(crate) fn remove_from_delivered_deaths(&self, death: &Ref<NodeDeath>) {
+ let mut inner = self.inner.lock();
+ let removed = unsafe { inner.delivered_deaths.remove(death) };
+ drop(inner);
+ drop(removed);
+ }
+
+ pub(crate) fn update_ref(&self, handle: u32, inc: bool, strong: bool) -> Result {
+ if inc && handle == 0 {
+ if let Ok(node_ref) = self.ctx.get_manager_node(strong) {
+ if core::ptr::eq(self, &*node_ref.node.owner) {
+ return Err(EINVAL);
+ }
+ let _ = self.insert_or_update_handle(node_ref, true);
+ return Ok(());
+ }
+ }
+
+ // To preserve original binder behaviour, we only fail requests where the manager tries to
+ // increment references on itself.
+ let mut refs = self.node_refs.lock();
+ if let Some(info) = refs.by_handle.get_mut(&handle) {
+ if info.node_ref.update(inc, strong) {
+ // Clean up death if there is one attached to this node reference.
+ if let Some(death) = info.death.take() {
+ death.set_cleared(true);
+ self.remove_from_delivered_deaths(&death);
+ }
+
+ // Remove reference from process tables.
+ let id = info.node_ref.node.global_id;
+ refs.by_handle.remove(&handle);
+ refs.by_global_id.remove(&id);
+ }
+ }
+ Ok(())
+ }
+
+ /// Decrements the refcount of the given node, if one exists.
+ pub(crate) fn update_node(&self, ptr: usize, cookie: usize, strong: bool, biased: bool) {
+ let mut inner = self.inner.lock();
+ if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
+ inner.update_node_refcount(&node, false, strong, biased, None);
+ }
+ }
+
+ pub(crate) fn inc_ref_done(&self, reader: &mut UserSlicePtrReader, strong: bool) -> Result {
+ let ptr = reader.read::<usize>()?;
+ let cookie = reader.read::<usize>()?;
+ self.update_node(ptr, cookie, strong, true);
+ Ok(())
+ }
+
+ pub(crate) fn buffer_alloc(&self, size: usize) -> BinderResult<Allocation<'_>> {
+ let mut inner = self.inner.lock();
+ let mapping = inner.mapping.as_mut().ok_or_else(BinderError::new_dead)?;
+
+ let offset = mapping.alloc.reserve_new(size)?;
+ Ok(Allocation::new(
+ self,
+ offset,
+ size,
+ mapping.address + offset,
+ mapping.pages.clone(),
+ ))
+ }
+
+ // TODO: Review if we want an Option or a Result.
+ pub(crate) fn buffer_get(&self, ptr: usize) -> Option<Allocation<'_>> {
+ let mut inner = self.inner.lock();
+ let mapping = inner.mapping.as_mut()?;
+ let offset = ptr.checked_sub(mapping.address)?;
+ let (size, odata) = mapping.alloc.reserve_existing(offset).ok()?;
+ let mut alloc = Allocation::new(self, offset, size, ptr, mapping.pages.clone());
+ if let Some(data) = odata {
+ alloc.set_info(data);
+ }
+ Some(alloc)
+ }
+
+ pub(crate) fn buffer_raw_free(&self, ptr: usize) {
+ let mut inner = self.inner.lock();
+ if let Some(ref mut mapping) = &mut inner.mapping {
+ if ptr < mapping.address
+ || mapping
+ .alloc
+ .reservation_abort(ptr - mapping.address)
+ .is_err()
+ {
+ pr_warn!(
+ "Pointer {:x} failed to free, base = {:x}\n",
+ ptr,
+ mapping.address
+ );
+ }
+ }
+ }
+
+ pub(crate) fn buffer_make_freeable(&self, offset: usize, data: Option<AllocationInfo>) {
+ let mut inner = self.inner.lock();
+ if let Some(ref mut mapping) = &mut inner.mapping {
+ if mapping.alloc.reservation_commit(offset, data).is_err() {
+ pr_warn!("Offset {} failed to be marked freeable\n", offset);
+ }
+ }
+ }
+
+ fn create_mapping(&self, vma: &mut mm::virt::Area) -> Result {
+ let size = core::cmp::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
+ let page_count = size / kernel::PAGE_SIZE;
+
+ // Allocate and map all pages.
+ //
+ // N.B. If we fail halfway through mapping these pages, the kernel will unmap them.
+ let mut pages = Vec::new();
+ pages.try_reserve_exact(page_count)?;
+ let mut address = vma.start();
+ for _ in 0..page_count {
+ let page = Pages::<0>::new()?;
+ vma.insert_page(address, &page)?;
+ pages.try_push(page)?;
+ address += kernel::PAGE_SIZE;
+ }
+
+ let ref_pages = Ref::try_from(pages)?;
+
+ // Save pages for later.
+ let mut inner = self.inner.lock();
+ match &inner.mapping {
+ None => inner.mapping = Some(Mapping::new(vma.start(), size, ref_pages)?),
+ Some(_) => return Err(EBUSY),
+ }
+ Ok(())
+ }
+
+ fn version(&self, data: UserSlicePtr) -> Result {
+ data.writer().write(&BinderVersion::current())
+ }
+
+ pub(crate) fn register_thread(&self) -> bool {
+ self.inner.lock().register_thread()
+ }
+
+ fn remove_thread(&self, thread: Ref<Thread>) {
+ self.inner.lock().threads.remove(&thread.id);
+ thread.release();
+ }
+
+ fn set_max_threads(&self, max: u32) {
+ self.inner.lock().max_threads = max;
+ }
+
+ fn get_node_debug_info(&self, data: UserSlicePtr) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+
+ // Read the starting point.
+ let ptr = reader.read::<BinderNodeDebugInfo>()?.ptr as usize;
+ let mut out = BinderNodeDebugInfo::default();
+
+ {
+ let inner = self.inner.lock();
+ for (node_ptr, node) in &inner.nodes {
+ if *node_ptr > ptr {
+ node.populate_debug_info(&mut out, &inner);
+ break;
+ }
+ }
+ }
+
+ writer.write(&out)
+ }
+
+ fn get_node_info_from_ref(&self, data: UserSlicePtr) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+ let mut out = reader.read::<BinderNodeInfoForRef>()?;
+
+ if out.strong_count != 0
+ || out.weak_count != 0
+ || out.reserved1 != 0
+ || out.reserved2 != 0
+ || out.reserved3 != 0
+ {
+ return Err(EINVAL);
+ }
+
+ // Only the context manager is allowed to use this ioctl.
+ if !self.inner.lock().is_manager {
+ return Err(EPERM);
+ }
+
+ let node_ref = self
+ .get_node_from_handle(out.handle, true)
+ .or(Err(EINVAL))?;
+
+ // Get the counts from the node.
+ {
+ let owner_inner = node_ref.node.owner.inner.lock();
+ node_ref.node.populate_counts(&mut out, &owner_inner);
+ }
+
+ // Write the result back.
+ writer.write(&out)
+ }
+
+ pub(crate) fn needs_thread(&self) -> bool {
+ let mut inner = self.inner.lock();
+ let ret = inner.requested_thread_count == 0
+ && inner.ready_threads.is_empty()
+ && inner.started_thread_count < inner.max_threads;
+ if ret {
+ inner.requested_thread_count += 1
+ };
+ ret
+ }
+
+ pub(crate) fn request_death(
+ self: &Ref<Self>,
+ reader: &mut UserSlicePtrReader,
+ thread: &Thread,
+ ) -> Result {
+ let handle: u32 = reader.read()?;
+ let cookie: usize = reader.read()?;
+
+ // TODO: First two should result in error, but not the others.
+
+ // TODO: Do we care about the context manager dying?
+
+ // Queue BR_ERROR if we can't allocate memory for the death notification.
+ let death = UniqueRef::try_new_uninit().map_err(|err| {
+ thread.push_return_work(BR_ERROR);
+ err
+ })?;
+
+ let mut refs = self.node_refs.lock();
+ let info = refs.by_handle.get_mut(&handle).ok_or(EINVAL)?;
+
+ // Nothing to do if there is already a death notification request for this handle.
+ if info.death.is_some() {
+ return Ok(());
+ }
+
+ let death = {
+ let mut pinned = Pin::from(death.write(
+ // SAFETY: `init` is called below.
+ unsafe { NodeDeath::new(info.node_ref.node.clone(), self.clone(), cookie) },
+ ));
+ pinned.as_mut().init();
+ Ref::<NodeDeath>::from(pinned)
+ };
+
+ info.death = Some(death.clone());
+
+ // Register the death notification.
+ {
+ let mut owner_inner = info.node_ref.node.owner.inner.lock();
+ if owner_inner.is_dead {
+ drop(owner_inner);
+ let _ = self.push_work(death);
+ } else {
+ info.node_ref.node.add_death(death, &mut owner_inner);
+ }
+ }
+ Ok(())
+ }
+
+ pub(crate) fn clear_death(&self, reader: &mut UserSlicePtrReader, thread: &Thread) -> Result {
+ let handle: u32 = reader.read()?;
+ let cookie: usize = reader.read()?;
+
+ let mut refs = self.node_refs.lock();
+ let info = refs.by_handle.get_mut(&handle).ok_or(EINVAL)?;
+
+ let death = info.death.take().ok_or(EINVAL)?;
+ if death.cookie != cookie {
+ info.death = Some(death);
+ return Err(EINVAL);
+ }
+
+ // Update state and determine if we need to queue a work item. We only need to do it when
+ // the node is not dead or if the user already completed the death notification.
+ if death.set_cleared(false) {
+ let _ = thread.push_work_if_looper(death);
+ }
+
+ Ok(())
+ }
+
+ pub(crate) fn dead_binder_done(&self, cookie: usize, thread: &Thread) {
+ if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
+ death.set_notification_done(thread);
+ }
+ }
+}
+
+impl IoctlHandler for Process {
+ type Target<'a> = RefBorrow<'a, Process>;
+
+ fn write(
+ this: RefBorrow<'_, Process>,
+ _file: &File,
+ cmd: u32,
+ reader: &mut UserSlicePtrReader,
+ ) -> Result<i32> {
+ let thread = this.get_thread(Task::current().pid())?;
+ match cmd {
+ bindings::BINDER_SET_MAX_THREADS => this.set_max_threads(reader.read()?),
+ bindings::BINDER_SET_CONTEXT_MGR => this.set_as_manager(None, &thread)?,
+ bindings::BINDER_THREAD_EXIT => this.remove_thread(thread),
+ bindings::BINDER_SET_CONTEXT_MGR_EXT => {
+ this.set_as_manager(Some(reader.read()?), &thread)?
+ }
+ _ => return Err(EINVAL),
+ }
+ Ok(0)
+ }
+
+ fn read_write(
+ this: RefBorrow<'_, Process>,
+ file: &File,
+ cmd: u32,
+ data: UserSlicePtr,
+ ) -> Result<i32> {
+ let thread = this.get_thread(Task::current().pid())?;
+ let blocking = (file.flags() & file::flags::O_NONBLOCK) == 0;
+ match cmd {
+ bindings::BINDER_WRITE_READ => thread.write_read(data, blocking)?,
+ bindings::BINDER_GET_NODE_DEBUG_INFO => this.get_node_debug_info(data)?,
+ bindings::BINDER_GET_NODE_INFO_FOR_REF => this.get_node_info_from_ref(data)?,
+ bindings::BINDER_VERSION => this.version(data)?,
+ _ => return Err(EINVAL),
+ }
+ Ok(0)
+ }
+}
+
+#[vtable]
+impl file::Operations for Process {
+ type Data = Ref<Self>;
+ type OpenData = Ref<Context>;
+
+ fn open(ctx: &Ref<Context>, file: &File) -> Result<Self::Data> {
+ Self::new(ctx.clone(), file.cred().into())
+ }
+
+ fn release(obj: Self::Data, _file: &File) {
+ // Mark this process as dead. We'll do the same for the threads later.
+ obj.inner.lock().is_dead = true;
+
+ // If this process is the manager, unset it.
+ if obj.inner.lock().is_manager {
+ obj.ctx.unset_manager_node();
+ }
+
+ // TODO: Do this in a worker?
+
+ // Cancel all pending work items.
+ while let Some(work) = obj.get_work() {
+ work.cancel();
+ }
+
+ // Free any resources kept alive by allocated buffers.
+ let omapping = obj.inner.lock().mapping.take();
+ if let Some(mut mapping) = omapping {
+ let address = mapping.address;
+ let pages = mapping.pages.clone();
+ mapping.alloc.for_each(|offset, size, odata| {
+ let ptr = offset + address;
+ let mut alloc = Allocation::new(&obj, offset, size, ptr, pages.clone());
+ if let Some(data) = odata {
+ alloc.set_info(data);
+ }
+ drop(alloc)
+ });
+ }
+
+ // Drop all references. We do this dance with `swap` to avoid destroying the references
+ // while holding the lock.
+ let mut refs = obj.node_refs.lock();
+ let mut node_refs = take(&mut refs.by_handle);
+ drop(refs);
+
+ // Remove all death notifications from the nodes (that belong to a different process).
+ for info in node_refs.values_mut() {
+ let death = if let Some(existing) = info.death.take() {
+ existing
+ } else {
+ continue;
+ };
+
+ death.set_cleared(false);
+ }
+
+ // Do similar dance for the state lock.
+ let mut inner = obj.inner.lock();
+ let threads = take(&mut inner.threads);
+ let nodes = take(&mut inner.nodes);
+ drop(inner);
+
+ // Release all threads.
+ for thread in threads.values() {
+ thread.release();
+ }
+
+ // Deliver death notifications.
+ for node in nodes.values() {
+ loop {
+ let death = {
+ let mut inner = obj.inner.lock();
+ if let Some(death) = node.next_death(&mut inner) {
+ death
+ } else {
+ break;
+ }
+ };
+
+ death.set_dead();
+ }
+ }
+ }
+
+ fn ioctl(this: RefBorrow<'_, Process>, file: &File, cmd: &mut IoctlCommand) -> Result<i32> {
+ cmd.dispatch::<Self>(this, file)
+ }
+
+ fn compat_ioctl(
+ this: RefBorrow<'_, Process>,
+ file: &File,
+ cmd: &mut IoctlCommand,
+ ) -> Result<i32> {
+ cmd.dispatch::<Self>(this, file)
+ }
+
+ fn mmap(this: RefBorrow<'_, Process>, _file: &File, vma: &mut mm::virt::Area) -> Result {
+ // We don't allow mmap to be used in a different process.
+ if !core::ptr::eq(Task::current().group_leader(), &*this.task) {
+ return Err(EINVAL);
+ }
+
+ if vma.start() == 0 {
+ return Err(EINVAL);
+ }
+
+ let mut flags = vma.flags();
+ use mm::virt::flags::*;
+ if flags & WRITE != 0 {
+ return Err(EPERM);
+ }
+
+ flags |= DONTCOPY | MIXEDMAP;
+ flags &= !MAYWRITE;
+ vma.set_flags(flags);
+
+ // TODO: Set ops. We need to learn when the user unmaps so that we can stop using it.
+ this.create_mapping(vma)
+ }
+
+ fn poll(this: RefBorrow<'_, Process>, file: &File, table: &PollTable) -> Result<u32> {
+ let thread = this.get_thread(Task::current().pid())?;
+ let (from_proc, mut mask) = thread.poll(file, table);
+ if mask == 0 && from_proc && !this.inner.lock().work.is_empty() {
+ mask |= bindings::POLLIN;
+ }
+ Ok(mask)
+ }
+}
+
+pub(crate) struct Registration<'a> {
+ process: &'a Process,
+ thread: &'a Ref<Thread>,
+}
+
+impl<'a> Registration<'a> {
+ fn new(
+ process: &'a Process,
+ thread: &'a Ref<Thread>,
+ guard: &mut Guard<'_, Mutex<ProcessInner>>,
+ ) -> Self {
+ guard.ready_threads.push_back(thread.clone());
+ Self { process, thread }
+ }
+}
+
+impl Drop for Registration<'_> {
+ fn drop(&mut self) {
+ let mut inner = self.process.inner.lock();
+ unsafe { inner.ready_threads.remove(self.thread) };
+ }
+}
diff --git a/drivers/android/range_alloc.rs b/drivers/android/range_alloc.rs
new file mode 100644
index 000000000000..7b149048879b
--- /dev/null
+++ b/drivers/android/range_alloc.rs
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::ptr::NonNull;
+use kernel::{
+ linked_list::{CursorMut, GetLinks, Links, List},
+ prelude::*,
+};
+
+pub(crate) struct RangeAllocator<T> {
+ list: List<Box<Descriptor<T>>>,
+}
+
+#[derive(Debug, PartialEq, Eq)]
+enum DescriptorState {
+ Free,
+ Reserved,
+ Allocated,
+}
+
+impl<T> RangeAllocator<T> {
+ pub(crate) fn new(size: usize) -> Result<Self> {
+ let desc = Box::try_new(Descriptor::new(0, size))?;
+ let mut list = List::new();
+ list.push_back(desc);
+ Ok(Self { list })
+ }
+
+ fn find_best_match(&self, size: usize) -> Option<NonNull<Descriptor<T>>> {
+ // TODO: Use a binary tree instead of list for this lookup.
+ let mut best = None;
+ let mut best_size = usize::MAX;
+ let mut cursor = self.list.cursor_front();
+ while let Some(desc) = cursor.current() {
+ if desc.state == DescriptorState::Free {
+ if size == desc.size {
+ return Some(NonNull::from(desc));
+ }
+
+ if size < desc.size && desc.size < best_size {
+ best = Some(NonNull::from(desc));
+ best_size = desc.size;
+ }
+ }
+
+ cursor.move_next();
+ }
+ best
+ }
+
+ pub(crate) fn reserve_new(&mut self, size: usize) -> Result<usize> {
+ let desc_ptr = match self.find_best_match(size) {
+ None => return Err(ENOMEM),
+ Some(found) => found,
+ };
+
+ // SAFETY: We hold the only mutable reference to list, so it cannot have changed.
+ let desc = unsafe { &mut *desc_ptr.as_ptr() };
+ if desc.size == size {
+ desc.state = DescriptorState::Reserved;
+ return Ok(desc.offset);
+ }
+
+ // We need to break up the descriptor.
+ let new = Box::try_new(Descriptor::new(desc.offset + size, desc.size - size))?;
+ unsafe { self.list.insert_after(desc_ptr, new) };
+ desc.state = DescriptorState::Reserved;
+ desc.size = size;
+ Ok(desc.offset)
+ }
+
+ fn free_with_cursor(cursor: &mut CursorMut<'_, Box<Descriptor<T>>>) -> Result {
+ let mut size = match cursor.current() {
+ None => return Err(EINVAL),
+ Some(ref mut entry) => {
+ match entry.state {
+ DescriptorState::Free => return Err(EINVAL),
+ DescriptorState::Allocated => return Err(EPERM),
+ DescriptorState::Reserved => {}
+ }
+ entry.state = DescriptorState::Free;
+ entry.size
+ }
+ };
+
+ // Try to merge with the next entry.
+ if let Some(next) = cursor.peek_next() {
+ if next.state == DescriptorState::Free {
+ next.offset -= size;
+ next.size += size;
+ size = next.size;
+ cursor.remove_current();
+ }
+ }
+
+ // Try to merge with the previous entry.
+ if let Some(prev) = cursor.peek_prev() {
+ if prev.state == DescriptorState::Free {
+ prev.size += size;
+ cursor.remove_current();
+ }
+ }
+
+ Ok(())
+ }
+
+ fn find_at_offset(&mut self, offset: usize) -> Option<CursorMut<'_, Box<Descriptor<T>>>> {
+ let mut cursor = self.list.cursor_front_mut();
+ while let Some(desc) = cursor.current() {
+ if desc.offset == offset {
+ return Some(cursor);
+ }
+
+ if desc.offset > offset {
+ return None;
+ }
+
+ cursor.move_next();
+ }
+ None
+ }
+
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result {
+ // TODO: The force case is currently O(n), but could be made O(1) with unsafe.
+ let mut cursor = self.find_at_offset(offset).ok_or(EINVAL)?;
+ Self::free_with_cursor(&mut cursor)
+ }
+
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: Option<T>) -> Result {
+ // TODO: This is currently O(n), make it O(1).
+ let mut cursor = self.find_at_offset(offset).ok_or(ENOENT)?;
+ let desc = cursor.current().unwrap();
+ desc.state = DescriptorState::Allocated;
+ desc.data = data;
+ Ok(())
+ }
+
+ /// Takes an entry at the given offset from [`DescriptorState::Allocated`] to
+ /// [`DescriptorState::Reserved`].
+ ///
+ /// Returns the size of the existing entry and the data associated with it.
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, Option<T>)> {
+ // TODO: This is currently O(n), make it O(log n).
+ let mut cursor = self.find_at_offset(offset).ok_or(ENOENT)?;
+ let desc = cursor.current().unwrap();
+ if desc.state != DescriptorState::Allocated {
+ return Err(ENOENT);
+ }
+ desc.state = DescriptorState::Reserved;
+ Ok((desc.size, desc.data.take()))
+ }
+
+ pub(crate) fn for_each<F: Fn(usize, usize, Option<T>)>(&mut self, callback: F) {
+ let mut cursor = self.list.cursor_front_mut();
+ while let Some(desc) = cursor.current() {
+ if desc.state == DescriptorState::Allocated {
+ callback(desc.offset, desc.size, desc.data.take());
+ }
+
+ cursor.move_next();
+ }
+ }
+}
+
+struct Descriptor<T> {
+ state: DescriptorState,
+ size: usize,
+ offset: usize,
+ links: Links<Descriptor<T>>,
+ data: Option<T>,
+}
+
+impl<T> Descriptor<T> {
+ fn new(offset: usize, size: usize) -> Self {
+ Self {
+ size,
+ offset,
+ state: DescriptorState::Free,
+ links: Links::new(),
+ data: None,
+ }
+ }
+}
+
+impl<T> GetLinks for Descriptor<T> {
+ type EntryType = Self;
+ fn get_links(desc: &Self) -> &Links<Self> {
+ &desc.links
+ }
+}
diff --git a/drivers/android/rust_binder.rs b/drivers/android/rust_binder.rs
new file mode 100644
index 000000000000..6305a3045911
--- /dev/null
+++ b/drivers/android/rust_binder.rs
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Binder -- the Android IPC mechanism.
+//!
+//! TODO: This module is a work in progress.
+
+use kernel::{
+ io_buffer::IoBufferWriter,
+ linked_list::{GetLinks, GetLinksWrapped, Links},
+ miscdev::Registration,
+ prelude::*,
+ str::CStr,
+ sync::Ref,
+ user_ptr::UserSlicePtrWriter,
+};
+
+mod allocation;
+mod context;
+mod defs;
+mod node;
+mod process;
+mod range_alloc;
+mod thread;
+mod transaction;
+
+use {context::Context, thread::Thread};
+
+module! {
+ type: BinderModule,
+ name: b"rust_binder",
+ author: b"Wedson Almeida Filho",
+ description: b"Android Binder",
+ license: b"GPL",
+}
+
+trait DeliverToRead {
+ /// Performs work. Returns true if remaining work items in the queue should be processed
+ /// immediately, or false if it should return to caller before processing additional work
+ /// items.
+ fn do_work(self: Ref<Self>, thread: &Thread, writer: &mut UserSlicePtrWriter) -> Result<bool>;
+
+ /// Cancels the given work item. This is called instead of [`DeliverToRead::do_work`] when work
+ /// won't be delivered.
+ fn cancel(self: Ref<Self>) {}
+
+ /// Returns the linked list links for the work item.
+ fn get_links(&self) -> &Links<dyn DeliverToRead>;
+}
+
+struct DeliverToReadListAdapter {}
+
+impl GetLinks for DeliverToReadListAdapter {
+ type EntryType = dyn DeliverToRead;
+
+ fn get_links(data: &Self::EntryType) -> &Links<Self::EntryType> {
+ data.get_links()
+ }
+}
+
+impl GetLinksWrapped for DeliverToReadListAdapter {
+ type Wrapped = Ref<dyn DeliverToRead>;
+}
+
+struct DeliverCode {
+ code: u32,
+ links: Links<dyn DeliverToRead>,
+}
+
+impl DeliverCode {
+ fn new(code: u32) -> Self {
+ Self {
+ code,
+ links: Links::new(),
+ }
+ }
+}
+
+impl DeliverToRead for DeliverCode {
+ fn do_work(self: Ref<Self>, _thread: &Thread, writer: &mut UserSlicePtrWriter) -> Result<bool> {
+ writer.write(&self.code)?;
+ Ok(true)
+ }
+
+ fn get_links(&self) -> &Links<dyn DeliverToRead> {
+ &self.links
+ }
+}
+
+const fn ptr_align(value: usize) -> usize {
+ let size = core::mem::size_of::<usize>() - 1;
+ (value + size) & !size
+}
+
+unsafe impl Sync for BinderModule {}
+
+struct BinderModule {
+ _reg: Pin<Box<Registration<process::Process>>>,
+}
+
+impl kernel::Module for BinderModule {
+ fn init(name: &'static CStr, _module: &'static kernel::ThisModule) -> Result<Self> {
+ let ctx = Context::new()?;
+ let reg = Registration::new_pinned(fmt!("{name}"), ctx)?;
+ Ok(Self { _reg: reg })
+ }
+}
diff --git a/drivers/android/thread.rs b/drivers/android/thread.rs
new file mode 100644
index 000000000000..105a65cec1d4
--- /dev/null
+++ b/drivers/android/thread.rs
@@ -0,0 +1,871 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::{
+ alloc::AllocError,
+ mem::size_of,
+ sync::atomic::{AtomicU32, Ordering},
+};
+use kernel::{
+ bindings,
+ file::{File, PollTable},
+ io_buffer::{IoBufferReader, IoBufferWriter},
+ linked_list::{GetLinks, Links, List},
+ prelude::*,
+ security,
+ sync::{CondVar, Ref, SpinLock, UniqueRef},
+ user_ptr::{UserSlicePtr, UserSlicePtrWriter},
+ Either,
+};
+
+use crate::{
+ allocation::{Allocation, AllocationView},
+ defs::*,
+ process::{AllocationInfo, Process},
+ ptr_align,
+ transaction::{FileInfo, Transaction},
+ DeliverCode, DeliverToRead, DeliverToReadListAdapter,
+};
+
+pub(crate) type BinderResult<T = ()> = core::result::Result<T, BinderError>;
+
+pub(crate) struct BinderError {
+ pub(crate) reply: u32,
+}
+
+impl BinderError {
+ pub(crate) fn new_failed() -> Self {
+ Self {
+ reply: BR_FAILED_REPLY,
+ }
+ }
+
+ pub(crate) fn new_dead() -> Self {
+ Self {
+ reply: BR_DEAD_REPLY,
+ }
+ }
+}
+
+impl From<Error> for BinderError {
+ fn from(_: Error) -> Self {
+ Self::new_failed()
+ }
+}
+
+impl From<AllocError> for BinderError {
+ fn from(_: AllocError) -> Self {
+ Self::new_failed()
+ }
+}
+
+const LOOPER_REGISTERED: u32 = 0x01;
+const LOOPER_ENTERED: u32 = 0x02;
+const LOOPER_EXITED: u32 = 0x04;
+const LOOPER_INVALID: u32 = 0x08;
+const LOOPER_WAITING: u32 = 0x10;
+const LOOPER_POLL: u32 = 0x20;
+
+struct InnerThread {
+ /// Determines the looper state of the thread. It is a bit-wise combination of the constants
+ /// prefixed with `LOOPER_`.
+ looper_flags: u32,
+
+ /// Determines if thread is dead.
+ is_dead: bool,
+
+ /// Work item used to deliver error codes to the thread that started a transaction. When set to
+ /// `Some(x)`, it will hold the only reference to the object so that it can update the error
+ /// code to be delivered before queuing it.
+ reply_work: Option<Ref<ThreadError>>,
+
+ /// Work item used to deliver error codes to the current thread. When set to `Some(x)`, it will
+ /// hold the only reference to the object so that it can update the error code to be delivered
+ /// before queuing.
+ return_work: Option<Ref<ThreadError>>,
+
+ /// Determines whether the work list below should be processed. When set to false, `work_list`
+ /// is treated as if it were empty.
+ process_work_list: bool,
+ work_list: List<DeliverToReadListAdapter>,
+ current_transaction: Option<Ref<Transaction>>,
+}
+
+impl InnerThread {
+ fn new() -> Self {
+ Self {
+ looper_flags: 0,
+ is_dead: false,
+ process_work_list: false,
+ work_list: List::new(),
+ current_transaction: None,
+ return_work: None,
+ reply_work: None,
+ }
+ }
+
+ fn set_reply_work(&mut self, reply_work: Ref<ThreadError>) {
+ self.reply_work = Some(reply_work);
+ }
+
+ fn push_reply_work(&mut self, code: u32) {
+ let work = self.reply_work.take();
+ self.push_existing_work(work, code);
+ }
+
+ fn set_return_work(&mut self, return_work: Ref<ThreadError>) {
+ self.return_work = Some(return_work);
+ }
+
+ fn push_return_work(&mut self, code: u32) {
+ let work = self.return_work.take();
+ self.push_existing_work(work, code);
+ }
+
+ fn push_existing_work(&mut self, owork: Option<Ref<ThreadError>>, code: u32) {
+ // TODO: Write some warning when the following fails. It should not happen, and
+ // if it does, there is likely something wrong.
+ if let Some(work) = owork {
+ // `error_code` is written to with relaxed semantics because the queue onto which it is
+ // being inserted is protected by a lock. The release barrier when the lock is released
+ // by the caller matches with the acquire barrier of the future reader to guarantee
+ // that `error_code` is visible.
+ work.error_code.store(code, Ordering::Relaxed);
+ self.push_work(work);
+ }
+ }
+
+ fn pop_work(&mut self) -> Option<Ref<dyn DeliverToRead>> {
+ if !self.process_work_list {
+ return None;
+ }
+
+ let ret = self.work_list.pop_front();
+ // Once the queue is drained, we stop processing it until a non-deferred item is pushed
+ // again onto it.
+ self.process_work_list = !self.work_list.is_empty();
+ ret
+ }
+
+ fn push_work_deferred(&mut self, work: Ref<dyn DeliverToRead>) {
+ self.work_list.push_back(work);
+ }
+
+ fn push_work(&mut self, work: Ref<dyn DeliverToRead>) {
+ self.push_work_deferred(work);
+ self.process_work_list = true;
+ }
+
+ fn has_work(&self) -> bool {
+ self.process_work_list && !self.work_list.is_empty()
+ }
+
+ /// Fetches the transaction the thread can reply to. If the thread has a pending transaction
+ /// (that it could respond to) but it has also issued a transaction, it must first wait for the
+ /// previously-issued transaction to complete.
+ fn pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<Ref<Transaction>> {
+ let transaction = self.current_transaction.take().ok_or(EINVAL)?;
+
+ if core::ptr::eq(thread, transaction.from.as_ref()) {
+ self.current_transaction = Some(transaction);
+ return Err(EINVAL);
+ }
+
+ // Find a new current transaction for this thread.
+ self.current_transaction = transaction.find_from(thread);
+ Ok(transaction)
+ }
+
+ fn pop_transaction_replied(&mut self, transaction: &Ref<Transaction>) -> bool {
+ match self.current_transaction.take() {
+ None => false,
+ Some(old) => {
+ if !Ref::ptr_eq(transaction, &old) {
+ self.current_transaction = Some(old);
+ return false;
+ }
+ self.current_transaction = old.clone_next();
+ true
+ }
+ }
+ }
+
+ fn looper_enter(&mut self) {
+ self.looper_flags |= LOOPER_ENTERED;
+ if self.looper_flags & LOOPER_REGISTERED != 0 {
+ self.looper_flags |= LOOPER_INVALID;
+ }
+ }
+
+ fn looper_register(&mut self, valid: bool) {
+ self.looper_flags |= LOOPER_REGISTERED;
+ if !valid || self.looper_flags & LOOPER_ENTERED != 0 {
+ self.looper_flags |= LOOPER_INVALID;
+ }
+ }
+
+ fn looper_exit(&mut self) {
+ self.looper_flags |= LOOPER_EXITED;
+ }
+
+ /// Determines whether the thread is part of a pool, i.e., if it is a looper.
+ fn is_looper(&self) -> bool {
+ self.looper_flags & (LOOPER_ENTERED | LOOPER_REGISTERED) != 0
+ }
+
+ /// Determines whether the thread should attempt to fetch work items from the process queue
+ /// (when its own queue is empty). This is case when the thread is not part of a transaction
+ /// stack and it is registered as a looper.
+ fn should_use_process_work_queue(&self) -> bool {
+ self.current_transaction.is_none() && self.is_looper()
+ }
+
+ fn poll(&mut self) -> u32 {
+ self.looper_flags |= LOOPER_POLL;
+ if self.has_work() {
+ bindings::POLLIN
+ } else {
+ 0
+ }
+ }
+}
+
+pub(crate) struct Thread {
+ pub(crate) id: i32,
+ pub(crate) process: Ref<Process>,
+ inner: SpinLock<InnerThread>,
+ work_condvar: CondVar,
+ links: Links<Thread>,
+}
+
+impl Thread {
+ pub(crate) fn new(id: i32, process: Ref<Process>) -> Result<Ref<Self>> {
+ let return_work = Ref::try_new(ThreadError::new(InnerThread::set_return_work))?;
+ let reply_work = Ref::try_new(ThreadError::new(InnerThread::set_reply_work))?;
+ let mut thread = Pin::from(UniqueRef::try_new(Self {
+ id,
+ process,
+ // SAFETY: `inner` is initialised in the call to `spinlock_init` below.
+ inner: unsafe { SpinLock::new(InnerThread::new()) },
+ // SAFETY: `work_condvar` is initialised in the call to `condvar_init` below.
+ work_condvar: unsafe { CondVar::new() },
+ links: Links::new(),
+ })?);
+
+ // SAFETY: `inner` is pinned when `thread` is.
+ let inner = unsafe { thread.as_mut().map_unchecked_mut(|t| &mut t.inner) };
+ kernel::spinlock_init!(inner, "Thread::inner");
+
+ // SAFETY: `work_condvar` is pinned when `thread` is.
+ let condvar = unsafe { thread.as_mut().map_unchecked_mut(|t| &mut t.work_condvar) };
+ kernel::condvar_init!(condvar, "Thread::work_condvar");
+
+ {
+ let mut inner = thread.inner.lock();
+ inner.set_reply_work(reply_work);
+ inner.set_return_work(return_work);
+ }
+
+ Ok(thread.into())
+ }
+
+ pub(crate) fn set_current_transaction(&self, transaction: Ref<Transaction>) {
+ self.inner.lock().current_transaction = Some(transaction);
+ }
+
+ /// Attempts to fetch a work item from the thread-local queue. The behaviour if the queue is
+ /// empty depends on `wait`: if it is true, the function waits for some work to be queued (or a
+ /// signal); otherwise it returns indicating that none is available.
+ fn get_work_local(self: &Ref<Self>, wait: bool) -> Result<Ref<dyn DeliverToRead>> {
+ // Try once if the caller does not want to wait.
+ if !wait {
+ return self.inner.lock().pop_work().ok_or(EAGAIN);
+ }
+
+ // Loop waiting only on the local queue (i.e., not registering with the process queue).
+ let mut inner = self.inner.lock();
+ loop {
+ if let Some(work) = inner.pop_work() {
+ return Ok(work);
+ }
+
+ inner.looper_flags |= LOOPER_WAITING;
+ let signal_pending = self.work_condvar.wait(&mut inner);
+ inner.looper_flags &= !LOOPER_WAITING;
+
+ if signal_pending {
+ return Err(ERESTARTSYS);
+ }
+ }
+ }
+
+ /// Attempts to fetch a work item from the thread-local queue, falling back to the process-wide
+ /// queue if none is available locally.
+ ///
+ /// This must only be called when the thread is not participating in a transaction chain. If it
+ /// is, the local version (`get_work_local`) should be used instead.
+ fn get_work(self: &Ref<Self>, wait: bool) -> Result<Ref<dyn DeliverToRead>> {
+ // Try to get work from the thread's work queue, using only a local lock.
+ {
+ let mut inner = self.inner.lock();
+ if let Some(work) = inner.pop_work() {
+ return Ok(work);
+ }
+ }
+
+ // If the caller doesn't want to wait, try to grab work from the process queue.
+ //
+ // We know nothing will have been queued directly to the thread queue because it is not in
+ // a transaction and it is not in the process' ready list.
+ if !wait {
+ return self.process.get_work().ok_or(EAGAIN);
+ }
+
+ // Get work from the process queue. If none is available, atomically register as ready.
+ let reg = match self.process.get_work_or_register(self) {
+ Either::Left(work) => return Ok(work),
+ Either::Right(reg) => reg,
+ };
+
+ let mut inner = self.inner.lock();
+ loop {
+ if let Some(work) = inner.pop_work() {
+ return Ok(work);
+ }
+
+ inner.looper_flags |= LOOPER_WAITING;
+ let signal_pending = self.work_condvar.wait(&mut inner);
+ inner.looper_flags &= !LOOPER_WAITING;
+
+ if signal_pending {
+ // A signal is pending. We need to pull the thread off the list, then check the
+ // state again after it's off the list to ensure that something was not queued in
+ // the meantime. If something has been queued, we just return it (instead of the
+ // error).
+ drop(inner);
+ drop(reg);
+ return self.inner.lock().pop_work().ok_or(ERESTARTSYS);
+ }
+ }
+ }
+
+ pub(crate) fn push_work(&self, work: Ref<dyn DeliverToRead>) -> BinderResult {
+ {
+ let mut inner = self.inner.lock();
+ if inner.is_dead {
+ return Err(BinderError::new_dead());
+ }
+ inner.push_work(work);
+ }
+ self.work_condvar.notify_one();
+ Ok(())
+ }
+
+ /// Attempts to push to given work item to the thread if it's a looper thread (i.e., if it's
+ /// part of a thread pool) and is alive. Otherwise, push the work item to the process instead.
+ pub(crate) fn push_work_if_looper(&self, work: Ref<dyn DeliverToRead>) -> BinderResult {
+ let mut inner = self.inner.lock();
+ if inner.is_looper() && !inner.is_dead {
+ inner.push_work(work);
+ Ok(())
+ } else {
+ drop(inner);
+ self.process.push_work(work)
+ }
+ }
+
+ pub(crate) fn push_work_deferred(&self, work: Ref<dyn DeliverToRead>) {
+ self.inner.lock().push_work_deferred(work);
+ }
+
+ fn translate_object(
+ &self,
+ index_offset: usize,
+ view: &mut AllocationView<'_, '_>,
+ allow_fds: bool,
+ ) -> BinderResult {
+ let offset = view.alloc.read(index_offset)?;
+ let header = view.read::<bindings::binder_object_header>(offset)?;
+ // TODO: Handle other types.
+ match header.type_ {
+ BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => {
+ let strong = header.type_ == BINDER_TYPE_BINDER;
+ view.transfer_binder_object(offset, strong, |obj| {
+ // SAFETY: `binder` is a `binder_uintptr_t`; any bit pattern is a valid
+ // representation.
+ let ptr = unsafe { obj.__bindgen_anon_1.binder } as _;
+ let cookie = obj.cookie as _;
+ let flags = obj.flags as _;
+ let node = self.process.as_ref_borrow().get_node(
+ ptr,
+ cookie,
+ flags,
+ strong,
+ Some(self),
+ )?;
+ security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
+ Ok(node)
+ })?;
+ }
+ BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => {
+ let strong = header.type_ == BINDER_TYPE_HANDLE;
+ view.transfer_binder_object(offset, strong, |obj| {
+ // SAFETY: `handle` is a `u32`; any bit pattern is a valid representation.
+ let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
+ let node = self.process.get_node_from_handle(handle, strong)?;
+ security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
+ Ok(node)
+ })?;
+ }
+ BINDER_TYPE_FD => {
+ if !allow_fds {
+ return Err(BinderError::new_failed());
+ }
+
+ let obj = view.read::<bindings::binder_fd_object>(offset)?;
+ // SAFETY: `fd` is a `u32`; any bit pattern is a valid representation.
+ let fd = unsafe { obj.__bindgen_anon_1.fd };
+ let file = File::from_fd(fd)?;
+ security::binder_transfer_file(
+ &self.process.cred,
+ &view.alloc.process.cred,
+ &file,
+ )?;
+ let field_offset =
+ kernel::offset_of!(bindings::binder_fd_object, __bindgen_anon_1.fd) as usize;
+ let file_info = Box::try_new(FileInfo::new(file, offset + field_offset))?;
+ view.alloc.add_file_info(file_info);
+ }
+ _ => pr_warn!("Unsupported binder object type: {:x}\n", header.type_),
+ }
+ Ok(())
+ }
+
+ fn translate_objects(
+ &self,
+ alloc: &mut Allocation<'_>,
+ start: usize,
+ end: usize,
+ allow_fds: bool,
+ ) -> BinderResult {
+ let mut view = AllocationView::new(alloc, start);
+ for i in (start..end).step_by(size_of::<usize>()) {
+ if let Err(err) = self.translate_object(i, &mut view, allow_fds) {
+ alloc.set_info(AllocationInfo { offsets: start..i });
+ return Err(err);
+ }
+ }
+ alloc.set_info(AllocationInfo {
+ offsets: start..end,
+ });
+ Ok(())
+ }
+
+ pub(crate) fn copy_transaction_data<'a>(
+ &self,
+ to_process: &'a Process,
+ tr: &BinderTransactionData,
+ allow_fds: bool,
+ ) -> BinderResult<Allocation<'a>> {
+ let data_size = tr.data_size as _;
+ let adata_size = ptr_align(data_size);
+ let offsets_size = tr.offsets_size as _;
+ let aoffsets_size = ptr_align(offsets_size);
+
+ // This guarantees that at least `sizeof(usize)` bytes will be allocated.
+ let len = core::cmp::max(
+ adata_size.checked_add(aoffsets_size).ok_or(ENOMEM)?,
+ size_of::<usize>(),
+ );
+ let mut alloc = to_process.buffer_alloc(len)?;
+
+ // Copy raw data.
+ let mut reader = unsafe { UserSlicePtr::new(tr.data.ptr.buffer as _, data_size) }.reader();
+ alloc.copy_into(&mut reader, 0, data_size)?;
+
+ // Copy offsets if there are any.
+ if offsets_size > 0 {
+ let mut reader =
+ unsafe { UserSlicePtr::new(tr.data.ptr.offsets as _, offsets_size) }.reader();
+ alloc.copy_into(&mut reader, adata_size, offsets_size)?;
+
+ // Traverse the objects specified.
+ self.translate_objects(
+ &mut alloc,
+ adata_size,
+ adata_size + aoffsets_size,
+ allow_fds,
+ )?;
+ }
+
+ Ok(alloc)
+ }
+
+ fn unwind_transaction_stack(self: &Ref<Self>) {
+ let mut thread = self.clone();
+ while let Ok(transaction) = {
+ let mut inner = thread.inner.lock();
+ inner.pop_transaction_to_reply(thread.as_ref())
+ } {
+ let reply = Either::Right(BR_DEAD_REPLY);
+ if !transaction.from.deliver_single_reply(reply, &transaction) {
+ break;
+ }
+
+ thread = transaction.from.clone();
+ }
+ }
+
+ pub(crate) fn deliver_reply(
+ &self,
+ reply: Either<Ref<Transaction>, u32>,
+ transaction: &Ref<Transaction>,
+ ) {
+ if self.deliver_single_reply(reply, transaction) {
+ transaction.from.unwind_transaction_stack();
+ }
+ }
+
+ /// Delivers a reply to the thread that started a transaction. The reply can either be a
+ /// reply-transaction or an error code to be delivered instead.
+ ///
+ /// Returns whether the thread is dead. If it is, the caller is expected to unwind the
+ /// transaction stack by completing transactions for threads that are dead.
+ fn deliver_single_reply(
+ &self,
+ reply: Either<Ref<Transaction>, u32>,
+ transaction: &Ref<Transaction>,
+ ) -> bool {
+ {
+ let mut inner = self.inner.lock();
+ if !inner.pop_transaction_replied(transaction) {
+ return false;
+ }
+
+ if inner.is_dead {
+ return true;
+ }
+
+ match reply {
+ Either::Left(work) => inner.push_work(work),
+ Either::Right(code) => inner.push_reply_work(code),
+ }
+ }
+
+ // Notify the thread now that we've released the inner lock.
+ self.work_condvar.notify_one();
+ false
+ }
+
+ /// Determines if the given transaction is the current transaction for this thread.
+ fn is_current_transaction(&self, transaction: &Ref<Transaction>) -> bool {
+ let inner = self.inner.lock();
+ match &inner.current_transaction {
+ None => false,
+ Some(current) => Ref::ptr_eq(current, transaction),
+ }
+ }
+
+ fn transaction<T>(self: &Ref<Self>, tr: &BinderTransactionData, inner: T)
+ where
+ T: FnOnce(&Ref<Self>, &BinderTransactionData) -> BinderResult,
+ {
+ if let Err(err) = inner(self, tr) {
+ self.inner.lock().push_return_work(err.reply);
+ }
+ }
+
+ fn reply_inner(self: &Ref<Self>, tr: &BinderTransactionData) -> BinderResult {
+ let orig = self.inner.lock().pop_transaction_to_reply(self)?;
+ if !orig.from.is_current_transaction(&orig) {
+ return Err(BinderError::new_failed());
+ }
+
+ // We need to complete the transaction even if we cannot complete building the reply.
+ (|| -> BinderResult<_> {
+ let completion = Ref::try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
+ let process = orig.from.process.clone();
+ let allow_fds = orig.flags & TF_ACCEPT_FDS != 0;
+ let reply = Transaction::new_reply(self, process, tr, allow_fds)?;
+ self.inner.lock().push_work(completion);
+ orig.from.deliver_reply(Either::Left(reply), &orig);
+ Ok(())
+ })()
+ .map_err(|mut err| {
+ // At this point we only return `BR_TRANSACTION_COMPLETE` to the caller, and we must let
+ // the sender know that the transaction has completed (with an error in this case).
+ let reply = Either::Right(BR_FAILED_REPLY);
+ orig.from.deliver_reply(reply, &orig);
+ err.reply = BR_TRANSACTION_COMPLETE;
+ err
+ })
+ }
+
+ /// Determines the current top of the transaction stack. It fails if the top is in another
+ /// thread (i.e., this thread belongs to a stack but it has called another thread). The top is
+ /// [`None`] if the thread is not currently participating in a transaction stack.
+ fn top_of_transaction_stack(&self) -> Result<Option<Ref<Transaction>>> {
+ let inner = self.inner.lock();
+ Ok(if let Some(cur) = &inner.current_transaction {
+ if core::ptr::eq(self, cur.from.as_ref()) {
+ return Err(EINVAL);
+ }
+ Some(cur.clone())
+ } else {
+ None
+ })
+ }
+
+ fn oneway_transaction_inner(self: &Ref<Self>, tr: &BinderTransactionData) -> BinderResult {
+ let handle = unsafe { tr.target.handle };
+ let node_ref = self.process.get_transaction_node(handle)?;
+ security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
+ let completion = Ref::try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
+ let transaction = Transaction::new(node_ref, None, self, tr)?;
+ self.inner.lock().push_work(completion);
+ // TODO: Remove the completion on error?
+ transaction.submit()?;
+ Ok(())
+ }
+
+ fn transaction_inner(self: &Ref<Self>, tr: &BinderTransactionData) -> BinderResult {
+ let handle = unsafe { tr.target.handle };
+ let node_ref = self.process.get_transaction_node(handle)?;
+ security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
+ // TODO: We need to ensure that there isn't a pending transaction in the work queue. How
+ // could this happen?
+ let top = self.top_of_transaction_stack()?;
+ let completion = Ref::try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
+ let transaction = Transaction::new(node_ref, top, self, tr)?;
+
+ // Check that the transaction stack hasn't changed while the lock was released, then update
+ // it with the new transaction.
+ {
+ let mut inner = self.inner.lock();
+ if !transaction.is_stacked_on(&inner.current_transaction) {
+ return Err(BinderError::new_failed());
+ }
+ inner.current_transaction = Some(transaction.clone());
+ }
+
+ // We push the completion as a deferred work so that we wait for the reply before returning
+ // to userland.
+ self.push_work_deferred(completion);
+ // TODO: Remove completion if submission fails?
+ transaction.submit()?;
+ Ok(())
+ }
+
+ fn write(self: &Ref<Self>, req: &mut BinderWriteRead) -> Result {
+ let write_start = req.write_buffer.wrapping_add(req.write_consumed);
+ let write_len = req.write_size - req.write_consumed;
+ let mut reader = unsafe { UserSlicePtr::new(write_start as _, write_len as _).reader() };
+
+ while reader.len() >= size_of::<u32>() && self.inner.lock().return_work.is_some() {
+ let before = reader.len();
+ match reader.read::<u32>()? {
+ BC_TRANSACTION => {
+ let tr = reader.read::<BinderTransactionData>()?;
+ if tr.flags & TF_ONE_WAY != 0 {
+ self.transaction(&tr, Self::oneway_transaction_inner)
+ } else {
+ self.transaction(&tr, Self::transaction_inner)
+ }
+ }
+ BC_REPLY => self.transaction(&reader.read()?, Self::reply_inner),
+ BC_FREE_BUFFER => drop(self.process.buffer_get(reader.read()?)),
+ BC_INCREFS => self.process.update_ref(reader.read()?, true, false)?,
+ BC_ACQUIRE => self.process.update_ref(reader.read()?, true, true)?,
+ BC_RELEASE => self.process.update_ref(reader.read()?, false, true)?,
+ BC_DECREFS => self.process.update_ref(reader.read()?, false, false)?,
+ BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?,
+ BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?,
+ BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?,
+ BC_CLEAR_DEATH_NOTIFICATION => self.process.clear_death(&mut reader, self)?,
+ BC_DEAD_BINDER_DONE => self.process.dead_binder_done(reader.read()?, self),
+ BC_REGISTER_LOOPER => {
+ let valid = self.process.register_thread();
+ self.inner.lock().looper_register(valid);
+ }
+ BC_ENTER_LOOPER => self.inner.lock().looper_enter(),
+ BC_EXIT_LOOPER => self.inner.lock().looper_exit(),
+
+ // TODO: Add support for BC_TRANSACTION_SG and BC_REPLY_SG.
+ // BC_ATTEMPT_ACQUIRE and BC_ACQUIRE_RESULT are no longer supported.
+ _ => return Err(EINVAL),
+ }
+
+ // Update the number of write bytes consumed.
+ req.write_consumed += (before - reader.len()) as u64;
+ }
+ Ok(())
+ }
+
+ fn read(self: &Ref<Self>, req: &mut BinderWriteRead, wait: bool) -> Result {
+ let read_start = req.read_buffer.wrapping_add(req.read_consumed);
+ let read_len = req.read_size - req.read_consumed;
+ let mut writer = unsafe { UserSlicePtr::new(read_start as _, read_len as _) }.writer();
+ let (in_pool, getter) = {
+ let inner = self.inner.lock();
+ (
+ inner.is_looper(),
+ if inner.should_use_process_work_queue() {
+ Self::get_work
+ } else {
+ Self::get_work_local
+ },
+ )
+ };
+
+ // Reserve some room at the beginning of the read buffer so that we can send a
+ // BR_SPAWN_LOOPER if we need to.
+ if req.read_consumed == 0 {
+ writer.write(&BR_NOOP)?;
+ }
+
+ // Loop doing work while there is room in the buffer.
+ let initial_len = writer.len();
+ while writer.len() >= size_of::<u32>() {
+ match getter(self, wait && initial_len == writer.len()) {
+ Ok(work) => {
+ if !work.do_work(self, &mut writer)? {
+ break;
+ }
+ }
+ Err(err) => {
+ // Propagate the error if we haven't written anything else.
+ if initial_len == writer.len() {
+ return Err(err);
+ } else {
+ break;
+ }
+ }
+ }
+ }
+
+ req.read_consumed += read_len - writer.len() as u64;
+
+ // Write BR_SPAWN_LOOPER if the process needs more threads for its pool.
+ if in_pool && self.process.needs_thread() {
+ let mut writer =
+ unsafe { UserSlicePtr::new(req.read_buffer as _, req.read_size as _) }.writer();
+ writer.write(&BR_SPAWN_LOOPER)?;
+ }
+
+ Ok(())
+ }
+
+ pub(crate) fn write_read(self: &Ref<Self>, data: UserSlicePtr, wait: bool) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+ let mut req = reader.read::<BinderWriteRead>()?;
+
+ // TODO: `write(&req)` happens in all exit paths from here on. Find a better way to encode
+ // it.
+
+ // Go through the write buffer.
+ if req.write_size > 0 {
+ if let Err(err) = self.write(&mut req) {
+ req.read_consumed = 0;
+ writer.write(&req)?;
+ return Err(err);
+ }
+ }
+
+ // Go through the work queue.
+ let mut ret = Ok(());
+ if req.read_size > 0 {
+ ret = self.read(&mut req, wait);
+ }
+
+ // Write the request back so that the consumed fields are visible to the caller.
+ writer.write(&req)?;
+ ret
+ }
+
+ pub(crate) fn poll(&self, file: &File, table: &PollTable) -> (bool, u32) {
+ // SAFETY: `free_waiters` is called on release.
+ unsafe { table.register_wait(file, &self.work_condvar) };
+ let mut inner = self.inner.lock();
+ (inner.should_use_process_work_queue(), inner.poll())
+ }
+
+ pub(crate) fn notify_if_poll_ready(&self) {
+ // Determine if we need to notify. This requires the lock.
+ let inner = self.inner.lock();
+ let notify = inner.looper_flags & LOOPER_POLL != 0
+ && inner.should_use_process_work_queue()
+ && !inner.has_work();
+ drop(inner);
+
+ // Now that the lock is no longer held, notify the waiters if we have to.
+ if notify {
+ self.work_condvar.notify_one();
+ }
+ }
+
+ pub(crate) fn push_return_work(&self, code: u32) {
+ self.inner.lock().push_return_work(code)
+ }
+
+ pub(crate) fn release(self: &Ref<Self>) {
+ // Mark the thread as dead.
+ self.inner.lock().is_dead = true;
+
+ // Cancel all pending work items.
+ while let Ok(work) = self.get_work_local(false) {
+ work.cancel();
+ }
+
+ // Complete the transaction stack as far as we can.
+ self.unwind_transaction_stack();
+
+ // Remove epoll items if polling was ever used on the thread.
+ let poller = self.inner.lock().looper_flags & LOOPER_POLL != 0;
+ if poller {
+ self.work_condvar.free_waiters();
+
+ unsafe { bindings::synchronize_rcu() };
+ }
+ }
+}
+
+impl GetLinks for Thread {
+ type EntryType = Thread;
+ fn get_links(data: &Thread) -> &Links<Thread> {
+ &data.links
+ }
+}
+
+struct ThreadError {
+ error_code: AtomicU32,
+ return_fn: fn(&mut InnerThread, Ref<ThreadError>),
+ links: Links<dyn DeliverToRead>,
+}
+
+impl ThreadError {
+ fn new(return_fn: fn(&mut InnerThread, Ref<ThreadError>)) -> Self {
+ Self {
+ error_code: AtomicU32::new(BR_OK),
+ return_fn,
+ links: Links::new(),
+ }
+ }
+}
+
+impl DeliverToRead for ThreadError {
+ fn do_work(self: Ref<Self>, thread: &Thread, writer: &mut UserSlicePtrWriter) -> Result<bool> {
+ // See `ThreadInner::push_existing_work` for the reason why `error_code` is up to date even
+ // though we use relaxed semantics.
+ let code = self.error_code.load(Ordering::Relaxed);
+
+ // Return the `ThreadError` to the thread.
+ (self.return_fn)(&mut *thread.inner.lock(), self);
+
+ // Deliver the error code to userspace.
+ writer.write(&code)?;
+ Ok(true)
+ }
+
+ fn get_links(&self) -> &Links<dyn DeliverToRead> {
+ &self.links
+ }
+}
diff --git a/drivers/android/transaction.rs b/drivers/android/transaction.rs
new file mode 100644
index 000000000000..6d8510e3f889
--- /dev/null
+++ b/drivers/android/transaction.rs
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use core::sync::atomic::{AtomicBool, Ordering};
+use kernel::{
+ bindings,
+ file::{File, FileDescriptorReservation},
+ io_buffer::IoBufferWriter,
+ linked_list::List,
+ linked_list::{GetLinks, Links},
+ prelude::*,
+ sync::{Ref, SpinLock, UniqueRef},
+ user_ptr::UserSlicePtrWriter,
+ Either, ScopeGuard,
+};
+
+use crate::{
+ defs::*,
+ node::NodeRef,
+ process::Process,
+ ptr_align,
+ thread::{BinderResult, Thread},
+ DeliverToRead,
+};
+
+struct TransactionInner {
+ file_list: List<Box<FileInfo>>,
+}
+
+pub(crate) struct Transaction {
+ inner: SpinLock<TransactionInner>,
+ // TODO: Node should be released when the buffer is released.
+ node_ref: Option<NodeRef>,
+ stack_next: Option<Ref<Transaction>>,
+ pub(crate) from: Ref<Thread>,
+ to: Ref<Process>,
+ free_allocation: AtomicBool,
+ code: u32,
+ pub(crate) flags: u32,
+ data_size: usize,
+ offsets_size: usize,
+ data_address: usize,
+ links: Links<dyn DeliverToRead>,
+}
+
+impl Transaction {
+ pub(crate) fn new(
+ node_ref: NodeRef,
+ stack_next: Option<Ref<Transaction>>,
+ from: &Ref<Thread>,
+ tr: &BinderTransactionData,
+ ) -> BinderResult<Ref<Self>> {
+ let allow_fds = node_ref.node.flags & FLAT_BINDER_FLAG_ACCEPTS_FDS != 0;
+ let to = node_ref.node.owner.clone();
+ let mut alloc = from.copy_transaction_data(&to, tr, allow_fds)?;
+ let data_address = alloc.ptr;
+ let file_list = alloc.take_file_list();
+ alloc.keep_alive();
+ let mut tr = Pin::from(UniqueRef::try_new(Self {
+ // SAFETY: `spinlock_init` is called below.
+ inner: unsafe { SpinLock::new(TransactionInner { file_list }) },
+ node_ref: Some(node_ref),
+ stack_next,
+ from: from.clone(),
+ to,
+ code: tr.code,
+ flags: tr.flags,
+ data_size: tr.data_size as _,
+ data_address,
+ offsets_size: tr.offsets_size as _,
+ links: Links::new(),
+ free_allocation: AtomicBool::new(true),
+ })?);
+
+ // SAFETY: `inner` is pinned when `tr` is.
+ let pinned = unsafe { tr.as_mut().map_unchecked_mut(|t| &mut t.inner) };
+ kernel::spinlock_init!(pinned, "Transaction::inner");
+
+ Ok(tr.into())
+ }
+
+ pub(crate) fn new_reply(
+ from: &Ref<Thread>,
+ to: Ref<Process>,
+ tr: &BinderTransactionData,
+ allow_fds: bool,
+ ) -> BinderResult<Ref<Self>> {
+ let mut alloc = from.copy_transaction_data(&to, tr, allow_fds)?;
+ let data_address = alloc.ptr;
+ let file_list = alloc.take_file_list();
+ alloc.keep_alive();
+ let mut tr = Pin::from(UniqueRef::try_new(Self {
+ // SAFETY: `spinlock_init` is called below.
+ inner: unsafe { SpinLock::new(TransactionInner { file_list }) },
+ node_ref: None,
+ stack_next: None,
+ from: from.clone(),
+ to,
+ code: tr.code,
+ flags: tr.flags,
+ data_size: tr.data_size as _,
+ data_address,
+ offsets_size: tr.offsets_size as _,
+ links: Links::new(),
+ free_allocation: AtomicBool::new(true),
+ })?);
+
+ // SAFETY: `inner` is pinned when `tr` is.
+ let pinned = unsafe { tr.as_mut().map_unchecked_mut(|t| &mut t.inner) };
+ kernel::spinlock_init!(pinned, "Transaction::inner");
+
+ Ok(tr.into())
+ }
+
+ /// Determines if the transaction is stacked on top of the given transaction.
+ pub(crate) fn is_stacked_on(&self, onext: &Option<Ref<Self>>) -> bool {
+ match (&self.stack_next, onext) {
+ (None, None) => true,
+ (Some(stack_next), Some(next)) => Ref::ptr_eq(stack_next, next),
+ _ => false,
+ }
+ }
+
+ /// Returns a pointer to the next transaction on the transaction stack, if there is one.
+ pub(crate) fn clone_next(&self) -> Option<Ref<Self>> {
+ let next = self.stack_next.as_ref()?;
+ Some(next.clone())
+ }
+
+ /// Searches in the transaction stack for a thread that belongs to the target process. This is
+ /// useful when finding a target for a new transaction: if the node belongs to a process that
+ /// is already part of the transaction stack, we reuse the thread.
+ fn find_target_thread(&self) -> Option<Ref<Thread>> {
+ let process = &self.node_ref.as_ref()?.node.owner;
+
+ let mut it = &self.stack_next;
+ while let Some(transaction) = it {
+ if Ref::ptr_eq(&transaction.from.process, process) {
+ return Some(transaction.from.clone());
+ }
+ it = &transaction.stack_next;
+ }
+ None
+ }
+
+ /// Searches in the transaction stack for a transaction originating at the given thread.
+ pub(crate) fn find_from(&self, thread: &Thread) -> Option<Ref<Transaction>> {
+ let mut it = &self.stack_next;
+ while let Some(transaction) = it {
+ if core::ptr::eq(thread, transaction.from.as_ref()) {
+ return Some(transaction.clone());
+ }
+
+ it = &transaction.stack_next;
+ }
+ None
+ }
+
+ /// Submits the transaction to a work queue. Use a thread if there is one in the transaction
+ /// stack, otherwise use the destination process.
+ pub(crate) fn submit(self: Ref<Self>) -> BinderResult {
+ if let Some(thread) = self.find_target_thread() {
+ thread.push_work(self)
+ } else {
+ let process = self.to.clone();
+ process.push_work(self)
+ }
+ }
+
+ /// Prepares the file list for delivery to the caller.
+ fn prepare_file_list(&self) -> Result<List<Box<FileInfo>>> {
+ // Get list of files that are being transferred as part of the transaction.
+ let mut file_list = core::mem::replace(&mut self.inner.lock().file_list, List::new());
+
+ // If the list is non-empty, prepare the buffer.
+ if !file_list.is_empty() {
+ let alloc = self.to.buffer_get(self.data_address).ok_or(ESRCH)?;
+ let cleanup = ScopeGuard::new(|| {
+ self.free_allocation.store(false, Ordering::Relaxed);
+ });
+
+ let mut it = file_list.cursor_front_mut();
+ while let Some(file_info) = it.current() {
+ let reservation = FileDescriptorReservation::new(bindings::O_CLOEXEC)?;
+ alloc.write(file_info.buffer_offset, &reservation.reserved_fd())?;
+ file_info.reservation = Some(reservation);
+ it.move_next();
+ }
+
+ alloc.keep_alive();
+ cleanup.dismiss();
+ }
+
+ Ok(file_list)
+ }
+}
+
+impl DeliverToRead for Transaction {
+ fn do_work(self: Ref<Self>, thread: &Thread, writer: &mut UserSlicePtrWriter) -> Result<bool> {
+ // TODO: Initialise the following fields from `tr`:
+ // - `pub sender_pid: pid_t`.
+ // - `pub sender_euid: uid_t`.
+
+ let send_failed_reply = ScopeGuard::new(|| {
+ if self.node_ref.is_some() && self.flags & TF_ONE_WAY == 0 {
+ let reply = Either::Right(BR_FAILED_REPLY);
+ self.from.deliver_reply(reply, &self);
+ }
+ });
+ let mut file_list = if let Ok(list) = self.prepare_file_list() {
+ list
+ } else {
+ // On failure to process the list, we send a reply back to the sender and ignore the
+ // transaction on the recipient.
+ return Ok(true);
+ };
+
+ let mut tr = BinderTransactionData::default();
+
+ if let Some(nref) = &self.node_ref {
+ let (ptr, cookie) = nref.node.get_id();
+ tr.target.ptr = ptr as _;
+ tr.cookie = cookie as _;
+ };
+
+ tr.code = self.code;
+ tr.flags = self.flags;
+ tr.data_size = self.data_size as _;
+ tr.data.ptr.buffer = self.data_address as _;
+ tr.offsets_size = self.offsets_size as _;
+ if tr.offsets_size > 0 {
+ tr.data.ptr.offsets = (self.data_address + ptr_align(self.data_size)) as _;
+ }
+
+ let code = if self.node_ref.is_none() {
+ BR_REPLY
+ } else {
+ BR_TRANSACTION
+ };
+
+ // Write the transaction code and data to the user buffer.
+ writer.write(&code)?;
+ writer.write(&tr)?;
+
+ // Dismiss the completion of transaction with a failure. No failure paths are allowed from
+ // here on out.
+ send_failed_reply.dismiss();
+
+ // Commit all files.
+ {
+ let mut it = file_list.cursor_front_mut();
+ while let Some(file_info) = it.current() {
+ if let Some(reservation) = file_info.reservation.take() {
+ if let Some(file) = file_info.file.take() {
+ reservation.commit(file);
+ }
+ }
+
+ it.move_next();
+ }
+ }
+
+ // When `drop` is called, we don't want the allocation to be freed because it is now the
+ // user's reponsibility to free it.
+ //
+ // `drop` is guaranteed to see this relaxed store because `Ref` guarantess that everything
+ // that happens when an object is referenced happens-before the eventual `drop`.
+ self.free_allocation.store(false, Ordering::Relaxed);
+
+ // When this is not a reply and not an async transaction, update `current_transaction`. If
+ // it's a reply, `current_transaction` has already been updated appropriately.
+ if self.node_ref.is_some() && tr.flags & TF_ONE_WAY == 0 {
+ thread.set_current_transaction(self);
+ }
+
+ Ok(false)
+ }
+
+ fn cancel(self: Ref<Self>) {
+ let reply = Either::Right(BR_DEAD_REPLY);
+ self.from.deliver_reply(reply, &self);
+ }
+
+ fn get_links(&self) -> &Links<dyn DeliverToRead> {
+ &self.links
+ }
+}
+
+impl Drop for Transaction {
+ fn drop(&mut self) {
+ if self.free_allocation.load(Ordering::Relaxed) {
+ self.to.buffer_get(self.data_address);
+ }
+ }
+}
+
+pub(crate) struct FileInfo {
+ links: Links<FileInfo>,
+
+ /// The file for which a descriptor will be created in the recipient process.
+ file: Option<ARef<File>>,
+
+ /// The file descriptor reservation on the recipient process.
+ reservation: Option<FileDescriptorReservation>,
+
+ /// The offset in the buffer where the file descriptor is stored.
+ buffer_offset: usize,
+}
+
+impl FileInfo {
+ pub(crate) fn new(file: ARef<File>, buffer_offset: usize) -> Self {
+ Self {
+ file: Some(file),
+ reservation: None,
+ buffer_offset,
+ links: Links::new(),
+ }
+ }
+}
+
+impl GetLinks for FileInfo {
+ type EntryType = Self;
+
+ fn get_links(data: &Self::EntryType) -> &Links<Self::EntryType> {
+ &data.links
+ }
+}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 0642f579196f..0499c2facbb0 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -482,6 +482,14 @@ config GPIO_PL061
help
Say yes here to support the PrimeCell PL061 GPIO device.
+config GPIO_PL061_RUST
+ tristate "PrimeCell PL061 GPIO support written in Rust"
+ depends on ARM_AMBA && RUST
+ select IRQ_DOMAIN
+ select GPIOLIB_IRQCHIP
+ help
+ Say yes here to support the PrimeCell PL061 GPIO device
+
config GPIO_PMIC_EIC_SPRD
tristate "Spreadtrum PMIC EIC support"
depends on MFD_SC27XX_PMIC || COMPILE_TEST
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index a0985d30f51b..3fa4f3f93d85 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -119,6 +119,7 @@ obj-$(CONFIG_GPIO_PCIE_IDIO_24) += gpio-pcie-idio-24.o
obj-$(CONFIG_GPIO_PCI_IDIO_16) += gpio-pci-idio-16.o
obj-$(CONFIG_GPIO_PISOSR) += gpio-pisosr.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
+obj-$(CONFIG_GPIO_PL061_RUST) += gpio_pl061_rust.o
obj-$(CONFIG_GPIO_PMIC_EIC_SPRD) += gpio-pmic-eic-sprd.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
obj-$(CONFIG_GPIO_RASPBERRYPI_EXP) += gpio-raspberrypi-exp.o
diff --git a/drivers/gpio/gpio_pl061_rust.rs b/drivers/gpio/gpio_pl061_rust.rs
new file mode 100644
index 000000000000..d417fa3b0abc
--- /dev/null
+++ b/drivers/gpio/gpio_pl061_rust.rs
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Driver for the ARM PrimeCell(tm) General Purpose Input/Output (PL061).
+//!
+//! Based on the C driver written by Baruch Siach <baruch@tkos.co.il>.
+
+use kernel::{
+ amba, bit, bits_iter, define_amba_id_table, device, gpio,
+ io_mem::IoMem,
+ irq::{self, ExtraResult, IrqData, LockedIrqData},
+ power,
+ prelude::*,
+ sync::{RawSpinLock, Ref, RefBorrow},
+};
+
+const GPIODIR: usize = 0x400;
+const GPIOIS: usize = 0x404;
+const GPIOIBE: usize = 0x408;
+const GPIOIEV: usize = 0x40C;
+const GPIOIE: usize = 0x410;
+const GPIOMIS: usize = 0x418;
+const GPIOIC: usize = 0x41C;
+const GPIO_SIZE: usize = 0x1000;
+
+const PL061_GPIO_NR: u16 = 8;
+
+#[derive(Default)]
+struct ContextSaveRegs {
+ gpio_data: u8,
+ gpio_dir: u8,
+ gpio_is: u8,
+ gpio_ibe: u8,
+ gpio_iev: u8,
+ gpio_ie: u8,
+}
+
+#[derive(Default)]
+struct PL061DataInner {
+ csave_regs: ContextSaveRegs,
+}
+
+struct PL061Data {
+ dev: device::Device,
+ inner: RawSpinLock<PL061DataInner>,
+}
+
+struct PL061Resources {
+ base: IoMem<GPIO_SIZE>,
+ parent_irq: u32,
+}
+
+type PL061Registrations = gpio::RegistrationWithIrqChip<PL061Device>;
+
+type DeviceData = device::Data<PL061Registrations, PL061Resources, PL061Data>;
+
+struct PL061Device;
+
+#[vtable]
+impl gpio::Chip for PL061Device {
+ type Data = Ref<DeviceData>;
+
+ fn get_direction(data: RefBorrow<'_, DeviceData>, offset: u32) -> Result<gpio::LineDirection> {
+ let pl061 = data.resources().ok_or(ENXIO)?;
+ Ok(if pl061.base.readb(GPIODIR) & bit(offset) != 0 {
+ gpio::LineDirection::Out
+ } else {
+ gpio::LineDirection::In
+ })
+ }
+
+ fn direction_input(data: RefBorrow<'_, DeviceData>, offset: u32) -> Result {
+ let _guard = data.inner.lock_irqdisable();
+ let pl061 = data.resources().ok_or(ENXIO)?;
+ let mut gpiodir = pl061.base.readb(GPIODIR);
+ gpiodir &= !bit(offset);
+ pl061.base.writeb(gpiodir, GPIODIR);
+ Ok(())
+ }
+
+ fn direction_output(data: RefBorrow<'_, DeviceData>, offset: u32, value: bool) -> Result {
+ let woffset = bit(offset + 2).into();
+ let _guard = data.inner.lock_irqdisable();
+ let pl061 = data.resources().ok_or(ENXIO)?;
+ pl061.base.try_writeb((value as u8) << offset, woffset)?;
+ let mut gpiodir = pl061.base.readb(GPIODIR);
+ gpiodir |= bit(offset);
+ pl061.base.writeb(gpiodir, GPIODIR);
+
+ // gpio value is set again, because pl061 doesn't allow to set value of a gpio pin before
+ // configuring it in OUT mode.
+ pl061.base.try_writeb((value as u8) << offset, woffset)?;
+ Ok(())
+ }
+
+ fn get(data: RefBorrow<'_, DeviceData>, offset: u32) -> Result<bool> {
+ let pl061 = data.resources().ok_or(ENXIO)?;
+ Ok(pl061.base.try_readb(bit(offset + 2).into())? != 0)
+ }
+
+ fn set(data: RefBorrow<'_, DeviceData>, offset: u32, value: bool) {
+ if let Some(pl061) = data.resources() {
+ let woffset = bit(offset + 2).into();
+ let _ = pl061.base.try_writeb((value as u8) << offset, woffset);
+ }
+ }
+}
+
+impl gpio::ChipWithIrqChip for PL061Device {
+ fn handle_irq_flow(
+ data: RefBorrow<'_, DeviceData>,
+ desc: &irq::Descriptor,
+ domain: &irq::Domain,
+ ) {
+ let chained = desc.enter_chained();
+
+ if let Some(pl061) = data.resources() {
+ let pending = pl061.base.readb(GPIOMIS);
+ for offset in bits_iter(pending) {
+ domain.generic_handle_chained(offset, &chained);
+ }
+ }
+ }
+}
+
+#[vtable]
+impl irq::Chip for PL061Device {
+ type Data = Ref<DeviceData>;
+
+ fn set_type(
+ data: RefBorrow<'_, DeviceData>,
+ irq_data: &mut LockedIrqData,
+ trigger: u32,
+ ) -> Result<ExtraResult> {
+ let offset = irq_data.hwirq();
+ let bit = bit(offset);
+
+ if offset >= PL061_GPIO_NR.into() {
+ return Err(EINVAL);
+ }
+
+ if trigger & (irq::Type::LEVEL_HIGH | irq::Type::LEVEL_LOW) != 0
+ && trigger & (irq::Type::EDGE_RISING | irq::Type::EDGE_FALLING) != 0
+ {
+ dev_err!(
+ data.dev,
+ "trying to configure line {} for both level and edge detection, choose one!\n",
+ offset
+ );
+ return Err(EINVAL);
+ }
+
+ let _guard = data.inner.lock_irqdisable();
+ let pl061 = data.resources().ok_or(ENXIO)?;
+
+ let mut gpioiev = pl061.base.readb(GPIOIEV);
+ let mut gpiois = pl061.base.readb(GPIOIS);
+ let mut gpioibe = pl061.base.readb(GPIOIBE);
+
+ if trigger & (irq::Type::LEVEL_HIGH | irq::Type::LEVEL_LOW) != 0 {
+ let polarity = trigger & irq::Type::LEVEL_HIGH != 0;
+
+ // Disable edge detection.
+ gpioibe &= !bit;
+ // Enable level detection.
+ gpiois |= bit;
+ // Select polarity.
+ if polarity {
+ gpioiev |= bit;
+ } else {
+ gpioiev &= !bit;
+ }
+ irq_data.set_level_handler();
+ dev_dbg!(
+ data.dev,
+ "line {}: IRQ on {} level\n",
+ offset,
+ if polarity { "HIGH" } else { "LOW" }
+ );
+ } else if (trigger & irq::Type::EDGE_BOTH) == irq::Type::EDGE_BOTH {
+ // Disable level detection.
+ gpiois &= !bit;
+ // Select both edges, settings this makes GPIOEV be ignored.
+ gpioibe |= bit;
+ irq_data.set_edge_handler();
+ dev_dbg!(data.dev, "line {}: IRQ on both edges\n", offset);
+ } else if trigger & (irq::Type::EDGE_RISING | irq::Type::EDGE_FALLING) != 0 {
+ let rising = trigger & irq::Type::EDGE_RISING != 0;
+
+ // Disable level detection.
+ gpiois &= !bit;
+ // Clear detection on both edges.
+ gpioibe &= !bit;
+ // Select edge.
+ if rising {
+ gpioiev |= bit;
+ } else {
+ gpioiev &= !bit;
+ }
+ irq_data.set_edge_handler();
+ dev_dbg!(
+ data.dev,
+ "line {}: IRQ on {} edge\n",
+ offset,
+ if rising { "RISING" } else { "FALLING}" }
+ );
+ } else {
+ // No trigger: disable everything.
+ gpiois &= !bit;
+ gpioibe &= !bit;
+ gpioiev &= !bit;
+ irq_data.set_bad_handler();
+ dev_warn!(data.dev, "no trigger selected for line {}\n", offset);
+ }
+
+ pl061.base.writeb(gpiois, GPIOIS);
+ pl061.base.writeb(gpioibe, GPIOIBE);
+ pl061.base.writeb(gpioiev, GPIOIEV);
+
+ Ok(ExtraResult::None)
+ }
+
+ fn mask(data: RefBorrow<'_, DeviceData>, irq_data: &IrqData) {
+ let mask = bit(irq_data.hwirq() % irq::HwNumber::from(PL061_GPIO_NR));
+ let _guard = data.inner.lock();
+ if let Some(pl061) = data.resources() {
+ let gpioie = pl061.base.readb(GPIOIE) & !mask;
+ pl061.base.writeb(gpioie, GPIOIE);
+ }
+ }
+
+ fn unmask(data: RefBorrow<'_, DeviceData>, irq_data: &IrqData) {
+ let mask = bit(irq_data.hwirq() % irq::HwNumber::from(PL061_GPIO_NR));
+ let _guard = data.inner.lock();
+ if let Some(pl061) = data.resources() {
+ let gpioie = pl061.base.readb(GPIOIE) | mask;
+ pl061.base.writeb(gpioie, GPIOIE);
+ }
+ }
+
+ // This gets called from the edge IRQ handler to ACK the edge IRQ in the GPIOIC
+ // (interrupt-clear) register. For level IRQs this is not needed: these go away when the level
+ // signal goes away.
+ fn ack(data: RefBorrow<'_, DeviceData>, irq_data: &IrqData) {
+ let mask = bit(irq_data.hwirq() % irq::HwNumber::from(PL061_GPIO_NR));
+ let _guard = data.inner.lock();
+ if let Some(pl061) = data.resources() {
+ pl061.base.writeb(mask.into(), GPIOIC);
+ }
+ }
+
+ fn set_wake(data: RefBorrow<'_, DeviceData>, _irq_data: &IrqData, on: bool) -> Result {
+ let pl061 = data.resources().ok_or(ENXIO)?;
+ irq::set_wake(pl061.parent_irq, on)
+ }
+}
+
+impl amba::Driver for PL061Device {
+ type Data = Ref<DeviceData>;
+ type PowerOps = Self;
+
+ define_amba_id_table! {(), [
+ ({id: 0x00041061, mask: 0x000fffff}, None),
+ ]}
+
+ fn probe(dev: &mut amba::Device, _data: Option<&Self::IdInfo>) -> Result<Ref<DeviceData>> {
+ let res = dev.take_resource().ok_or(ENXIO)?;
+ let irq = dev.irq(0).ok_or(ENXIO)?;
+
+ let mut data = kernel::new_device_data!(
+ gpio::RegistrationWithIrqChip::new(),
+ PL061Resources {
+ // SAFETY: This device doesn't support DMA.
+ base: unsafe { IoMem::try_new(res)? },
+ parent_irq: irq,
+ },
+ PL061Data {
+ dev: device::Device::from_dev(dev),
+ // SAFETY: We call `rawspinlock_init` below.
+ inner: unsafe { RawSpinLock::new(PL061DataInner::default()) },
+ },
+ "PL061::Registrations"
+ )?;
+
+ // SAFETY: General part of the data is pinned when `data` is.
+ let gen_inner = unsafe { data.as_mut().map_unchecked_mut(|d| &mut (**d).inner) };
+ kernel::rawspinlock_init!(gen_inner, "PL061Data::inner");
+
+ let data = Ref::<DeviceData>::from(data);
+
+ data.resources().ok_or(ENXIO)?.base.writeb(0, GPIOIE); // disable irqs
+
+ kernel::gpio_irq_chip_register!(
+ data.registrations().ok_or(ENXIO)?.as_pinned_mut(),
+ Self,
+ PL061_GPIO_NR,
+ None,
+ dev,
+ data.clone(),
+ irq
+ )?;
+
+ dev_info!(data.dev, "PL061 GPIO chip registered\n");
+
+ Ok(data)
+ }
+}
+
+impl power::Operations for PL061Device {
+ type Data = Ref<DeviceData>;
+
+ fn suspend(data: RefBorrow<'_, DeviceData>) -> Result {
+ let mut inner = data.inner.lock();
+ let pl061 = data.resources().ok_or(ENXIO)?;
+ inner.csave_regs.gpio_data = 0;
+ inner.csave_regs.gpio_dir = pl061.base.readb(GPIODIR);
+ inner.csave_regs.gpio_is = pl061.base.readb(GPIOIS);
+ inner.csave_regs.gpio_ibe = pl061.base.readb(GPIOIBE);
+ inner.csave_regs.gpio_iev = pl061.base.readb(GPIOIEV);
+ inner.csave_regs.gpio_ie = pl061.base.readb(GPIOIE);
+
+ for offset in 0..PL061_GPIO_NR {
+ if inner.csave_regs.gpio_dir & bit(offset) != 0 {
+ if let Ok(v) = <Self as gpio::Chip>::get(data, offset.into()) {
+ inner.csave_regs.gpio_data |= (v as u8) << offset;
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ fn resume(data: RefBorrow<'_, DeviceData>) -> Result {
+ let inner = data.inner.lock();
+ let pl061 = data.resources().ok_or(ENXIO)?;
+
+ for offset in 0..PL061_GPIO_NR {
+ if inner.csave_regs.gpio_dir & bit(offset) != 0 {
+ let value = inner.csave_regs.gpio_data & bit(offset) != 0;
+ let _ = <Self as gpio::Chip>::direction_output(data, offset.into(), value);
+ } else {
+ let _ = <Self as gpio::Chip>::direction_input(data, offset.into());
+ }
+ }
+
+ pl061.base.writeb(inner.csave_regs.gpio_is, GPIOIS);
+ pl061.base.writeb(inner.csave_regs.gpio_ibe, GPIOIBE);
+ pl061.base.writeb(inner.csave_regs.gpio_iev, GPIOIEV);
+ pl061.base.writeb(inner.csave_regs.gpio_ie, GPIOIE);
+
+ Ok(())
+ }
+
+ fn freeze(data: RefBorrow<'_, DeviceData>) -> Result {
+ Self::suspend(data)
+ }
+
+ fn restore(data: RefBorrow<'_, DeviceData>) -> Result {
+ Self::resume(data)
+ }
+}
+
+module_amba_driver! {
+ type: PL061Device,
+ name: b"pl061_gpio",
+ author: b"Wedson Almeida Filho",
+ license: b"GPL",
+}
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 4f2a819fd60a..50b3f6b9502e 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -4,8 +4,12 @@
#ifndef __ASSEMBLY__
+/*
+ * Skipped when running bindgen due to a libclang issue;
+ * see https://github.com/rust-lang/rust-bindgen/issues/2244.
+ */
#if defined(CONFIG_DEBUG_INFO_BTF) && defined(CONFIG_PAHOLE_HAS_BTF_TAG) && \
- __has_attribute(btf_type_tag)
+ __has_attribute(btf_type_tag) && !defined(__BINDGEN__)
# define BTF_TYPE_TAG(value) __attribute__((btf_type_tag(#value)))
#else
# define BTF_TYPE_TAG(value) /* nothing */
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index ad39636e0c3f..649faac31ddb 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -15,7 +15,7 @@
#include <asm/sections.h>
-#define KSYM_NAME_LEN 128
+#define KSYM_NAME_LEN 512
#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s %s]") + \
(KSYM_NAME_LEN - 1) + \
2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + \
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 5c0c5174155d..27df5380c1e4 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -99,11 +99,17 @@
extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key, short inner);
+static inline void _raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+ struct lock_class_key *key)
+{
+ __raw_spin_lock_init(lock, name, key, LD_WAIT_SPIN);
+}
+
# define raw_spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
- __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
+ _raw_spin_lock_init((lock), #lock, &__key); \
} while (0)
#else
@@ -326,12 +332,17 @@ static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
#ifdef CONFIG_DEBUG_SPINLOCK
+static inline void __spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key)
+{
+ __raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG);
+}
+
# define spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
- __raw_spin_lock_init(spinlock_check(lock), \
- #lock, &__key, LD_WAIT_CONFIG); \
+ __spin_lock_init(lock, #lock, &__key); \
} while (0)
#else
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index a0143dd24430..b6cc20b56560 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -221,24 +221,31 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
* to generate better code.
*/
#ifdef CONFIG_LOCKDEP
-#define __INIT_WORK(_work, _func, _onstack) \
+#define __INIT_WORK_WITH_KEY(_work, _func, _onstack, _key) \
do { \
- static struct lock_class_key __key; \
- \
__init_work((_work), _onstack); \
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
- lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
+ lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, _key, 0); \
INIT_LIST_HEAD(&(_work)->entry); \
(_work)->func = (_func); \
} while (0)
-#else
+
#define __INIT_WORK(_work, _func, _onstack) \
do { \
+ static struct lock_class_key __key; \
+ __INIT_WORK_WITH_KEY(_work, _func, _onstack, &__key); \
+ } while (0)
+#else
+#define __INIT_WORK_WITH_KEY(_work, _func, _onstack, _key) \
+ do { \
__init_work((_work), _onstack); \
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
INIT_LIST_HEAD(&(_work)->entry); \
(_work)->func = (_func); \
} while (0)
+
+#define __INIT_WORK(_work, _func, _onstack) \
+ __INIT_WORK_WITH_KEY(_work, _func, _onstack, NULL)
#endif
#define INIT_WORK(_work, _func) \
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index e72e4de8f452..0b8ff0850093 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -251,20 +251,22 @@ struct binder_extended_error {
__s32 param;
};
-#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
-#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
-#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
-#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
-#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
-#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
-#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
-#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
-#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
-#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
-#define BINDER_FREEZE _IOW('b', 14, struct binder_freeze_info)
-#define BINDER_GET_FROZEN_INFO _IOWR('b', 15, struct binder_frozen_status_info)
-#define BINDER_ENABLE_ONEWAY_SPAM_DETECTION _IOW('b', 16, __u32)
-#define BINDER_GET_EXTENDED_ERROR _IOWR('b', 17, struct binder_extended_error)
+enum {
+ BINDER_WRITE_READ = _IOWR('b', 1, struct binder_write_read),
+ BINDER_SET_IDLE_TIMEOUT = _IOW('b', 3, __s64),
+ BINDER_SET_MAX_THREADS = _IOW('b', 5, __u32),
+ BINDER_SET_IDLE_PRIORITY = _IOW('b', 6, __s32),
+ BINDER_SET_CONTEXT_MGR = _IOW('b', 7, __s32),
+ BINDER_THREAD_EXIT = _IOW('b', 8, __s32),
+ BINDER_VERSION = _IOWR('b', 9, struct binder_version),
+ BINDER_GET_NODE_DEBUG_INFO = _IOWR('b', 11, struct binder_node_debug_info),
+ BINDER_GET_NODE_INFO_FOR_REF = _IOWR('b', 12, struct binder_node_info_for_ref),
+ BINDER_SET_CONTEXT_MGR_EXT = _IOW('b', 13, struct flat_binder_object),
+ BINDER_FREEZE = _IOW('b', 14, struct binder_freeze_info),
+ BINDER_GET_FROZEN_INFO = _IOWR('b', 15, struct binder_frozen_status_info),
+ BINDER_ENABLE_ONEWAY_SPAM_DETECTION = _IOW('b', 16, __u32),
+ BINDER_GET_EXTENDED_ERROR = _IOWR('b', 17, struct binder_extended_error),
+};
/*
* NOTE: Two special error codes you should check for when calling
diff --git a/init/Kconfig b/init/Kconfig
index 80fe60fa77fb..1dcfc714b1db 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -60,6 +60,23 @@ config LLD_VERSION
default $(ld-version) if LD_IS_LLD
default 0
+config RUST_IS_AVAILABLE
+ # Because some common tools like 'diff' don't support permissions of
+ # the files, 'rust-is-available.sh' in some trees that managed with such
+ # tools result in having no execution permission. As a temporal work
+ # around, we specify the interpreter ('/bin/sh'). It will be unneeded
+ # once 'rust-is-available.sh' is merged in the mainline with its execution
+ # permission.
+ def_bool $(success,/bin/sh $(srctree)/scripts/rust-is-available.sh)
+ help
+ This shows whether a suitable Rust toolchain is available (found).
+
+ Please see Documentation/rust/quick-start.rst for instructions on how
+ to satify the build requirements of Rust support.
+
+ In particular, the Makefile target 'rustavailable' is useful to check
+ why the Rust toolchain is not being detected.
+
config CC_CAN_LINK
bool
default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m64-flag)) if 64BIT
@@ -151,7 +168,8 @@ config WERROR
default COMPILE_TEST
help
A kernel build should not cause any compiler warnings, and this
- enables the '-Werror' flag to enforce that rule by default.
+ enables the '-Werror' (for C) and '-Dwarnings' (for Rust) flags
+ to enforce that rule by default.
However, if you have a new (or very old) compiler with odd and
unusual warnings, or you have some architecture with problems,
@@ -1903,6 +1921,38 @@ config PROFILING
Say Y here to enable the extended profiling support mechanisms used
by profilers.
+config RUST
+ bool "Rust support"
+ depends on HAVE_RUST
+ depends on RUST_IS_AVAILABLE
+ depends on !MODVERSIONS
+ depends on !GCC_PLUGINS
+ depends on !RANDSTRUCT
+ depends on !DEBUG_INFO_BTF
+ select CONSTRUCTORS
+ help
+ Enables Rust support in the kernel.
+
+ This allows other Rust-related options, like drivers written in Rust,
+ to be selected.
+
+ It is also required to be able to load external kernel modules
+ written in Rust.
+
+ See Documentation/rust/ for more information.
+
+ If unsure, say N.
+
+config RUSTC_VERSION_TEXT
+ string
+ depends on RUST
+ default $(shell,command -v $(RUSTC) >/dev/null 2>&1 && $(RUSTC) --version || echo n)
+
+config BINDGEN_VERSION_TEXT
+ string
+ depends on RUST
+ default $(shell,command -v $(BINDGEN) >/dev/null 2>&1 && $(BINDGEN) --version || echo n)
+
#
# Place an empty function call at each tracepoint site. Can be
# dynamically changed for a probe function.
diff --git a/kernel/configs/rust.config b/kernel/configs/rust.config
new file mode 100644
index 000000000000..38a7c5362c9c
--- /dev/null
+++ b/kernel/configs/rust.config
@@ -0,0 +1 @@
+CONFIG_RUST=y
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 79a85834ce9d..671de0351720 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -71,12 +71,20 @@ static unsigned int kallsyms_expand_symbol(unsigned int off,
data = &kallsyms_names[off];
len = *data;
data++;
+ off++;
+
+ /* If MSB is 1, it is a "big" symbol, so needs an additional byte. */
+ if ((len & 0x80) != 0) {
+ len = (len & 0x7F) | (*data << 7);
+ data++;
+ off++;
+ }
/*
* Update the offset to return the offset for the next symbol on
* the compressed stream.
*/
- off += len + 1;
+ off += len;
/*
* For every byte on the compressed symbol data, copy the table
@@ -129,7 +137,7 @@ static char kallsyms_get_symbol_type(unsigned int off)
static unsigned int get_symbol_offset(unsigned long pos)
{
const u8 *name;
- int i;
+ int i, len;
/*
* Use the closest marker we have. We have markers every 256 positions,
@@ -143,8 +151,18 @@ static unsigned int get_symbol_offset(unsigned long pos)
* so we just need to add the len to the current pointer for every
* symbol we wish to skip.
*/
- for (i = 0; i < (pos & 0xFF); i++)
- name = name + (*name) + 1;
+ for (i = 0; i < (pos & 0xFF); i++) {
+ len = *name;
+
+ /*
+ * If MSB is 1, it is a "big" symbol, so we need to look into
+ * the next byte (and skip it, too).
+ */
+ if ((len & 0x80) != 0)
+ len = ((len & 0x7F) | (name[1] << 7)) + 1;
+
+ name = name + len + 1;
+ }
return name - kallsyms_names;
}
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index bc475e62279d..ec06ce59d728 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -213,7 +213,7 @@ static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab,
* we use the smallest/strictest upper bound possible (56, based on
* the current definition of MODULE_NAME_LEN) to prevent overflows.
*/
- BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
+ BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 512);
relas = (Elf_Rela *) relasec->sh_addr;
/* For each rela in this klp relocation section */
@@ -227,7 +227,7 @@ static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab,
/* Format: .klp.sym.sym_objname.sym_name,sympos */
cnt = sscanf(strtab + sym->st_name,
- ".klp.sym.%55[^.].%127[^,],%lu",
+ ".klp.sym.%55[^.].%511[^,],%lu",
sym_objname, sym_name, &sympos);
if (cnt != 3) {
pr_err("symbol %s has an incorrectly formatted name\n",
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 35cd8287642a..74d56efc7416 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2688,6 +2688,88 @@ config HYPERV_TESTING
endmenu # "Kernel Testing and Coverage"
+menu "Rust hacking"
+
+config RUST_DEBUG_ASSERTIONS
+ bool "Debug assertions"
+ depends on RUST
+ help
+ Enables rustc's `-Cdebug-assertions` codegen option.
+
+ This flag lets you turn `cfg(debug_assertions)` conditional
+ compilation on or off. This can be used to enable extra debugging
+ code in development but not in production. For example, it controls
+ the behavior of the standard library's `debug_assert!` macro.
+
+ Note that this will apply to all Rust code, including `core`.
+
+ If unsure, say N.
+
+config RUST_OVERFLOW_CHECKS
+ bool "Overflow checks"
+ default y
+ depends on RUST
+ help
+ Enables rustc's `-Coverflow-checks` codegen option.
+
+ This flag allows you to control the behavior of runtime integer
+ overflow. When overflow-checks are enabled, a Rust panic will occur
+ on overflow.
+
+ Note that this will apply to all Rust code, including `core`.
+
+ If unsure, say Y.
+
+choice
+ prompt "Build-time assertions"
+ default RUST_BUILD_ASSERT_DENY
+ depends on RUST
+ help
+ Controls how are `build_error!` and `build_assert!` handled during build.
+
+ If calls to them exist in the binary, it may indicate a violated invariant
+ or that the optimizer failed to verify the invariant during compilation.
+ You can choose to abort compilation or ignore them during build and let the
+ check be carried to runtime.
+
+ If optimizations are turned off, you cannot select "Deny".
+
+ If unsure, say "Deny".
+
+config RUST_BUILD_ASSERT_ALLOW
+ bool "Allow"
+ help
+ Unoptimized calls to `build_error!` will be converted to `panic!`
+ and checked at runtime.
+
+config RUST_BUILD_ASSERT_WARN
+ bool "Warn"
+ help
+ Unoptimized calls to `build_error!` will be converted to `panic!`
+ and checked at runtime, but warnings will be generated when building.
+
+config RUST_BUILD_ASSERT_DENY
+ bool "Deny"
+ help
+ Unoptimized calls to `build_error!` will abort compilation.
+
+endchoice
+
+config RUST_KERNEL_KUNIT_TEST
+ bool "KUnit test for the `kernel` crate" if !KUNIT_ALL_TESTS
+ depends on RUST && KUNIT=y
+ default KUNIT_ALL_TESTS
+ help
+ This builds the documentation tests of the `kernel` crate
+ as KUnit tests.
+
+ For more information on KUnit and unit tests in general,
+ please refer to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+endmenu # "Rust"
+
source "Documentation/Kconfig"
endmenu # Kernel hacking
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 3c1853a9d1c0..c414a8d9f1ea 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -2246,6 +2246,9 @@ int __init no_hash_pointers_enable(char *str)
}
early_param("no_hash_pointers", no_hash_pointers_enable);
+/* Used for Rust formatting ('%pA'). */
+char *rust_fmt_argument(char *buf, char *end, void *ptr);
+
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
@@ -2372,6 +2375,10 @@ early_param("no_hash_pointers", no_hash_pointers_enable);
*
* Note: The default behaviour (unadorned %p) is to hash the address,
* rendering it useful as a unique identifier.
+ *
+ * There is also a '%pA' format specifier, but it is only intended to be used
+ * from Rust code to format core::fmt::Arguments. Do *not* use it from C.
+ * See rust/kernel/print.rs for details.
*/
static noinline_for_stack
char *pointer(const char *fmt, char *buf, char *end, void *ptr,
@@ -2444,6 +2451,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
return device_node_string(buf, end, ptr, spec, fmt + 1);
case 'f':
return fwnode_string(buf, end, ptr, spec, fmt + 1);
+ case 'A':
+ if (!IS_ENABLED(CONFIG_RUST)) {
+ WARN_ONCE(1, "Please remove %%pA from non-Rust code\n");
+ return error_string(buf, end, "(%pA?)", spec);
+ }
+ return rust_fmt_argument(buf, end, ptr);
case 'x':
return pointer_string(buf, end, ptr, spec);
case 'e':
diff --git a/rust/.gitignore b/rust/.gitignore
new file mode 100644
index 000000000000..89b602d91109
--- /dev/null
+++ b/rust/.gitignore
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+target.json
+bindings_generated.rs
+bindings_helpers_generated.rs
+exports_*_generated.h
+doctests_kernel_generated.rs
+doctests_kernel_generated_kunit.c
+doc/
+test/
diff --git a/rust/Makefile b/rust/Makefile
new file mode 100644
index 000000000000..30ce13fa8dad
--- /dev/null
+++ b/rust/Makefile
@@ -0,0 +1,415 @@
+# SPDX-License-Identifier: GPL-2.0
+
+always-$(CONFIG_RUST) += target.json
+no-clean-files += target.json
+
+obj-$(CONFIG_RUST) += core.o compiler_builtins.o
+always-$(CONFIG_RUST) += exports_core_generated.h
+
+# Missing prototypes are expected in the helpers since these are exported
+# for Rust only, thus there is no header nor prototypes.
+obj-$(CONFIG_RUST) += helpers.o
+CFLAGS_REMOVE_helpers.o = -Wmissing-prototypes -Wmissing-declarations
+
+always-$(CONFIG_RUST) += libmacros.so
+no-clean-files += libmacros.so
+
+always-$(CONFIG_RUST) += bindings/bindings_generated.rs bindings/bindings_helpers_generated.rs
+obj-$(CONFIG_RUST) += alloc.o bindings.o kernel.o
+always-$(CONFIG_RUST) += exports_alloc_generated.h exports_bindings_generated.h \
+ exports_kernel_generated.h
+
+ifdef CONFIG_RUST_BUILD_ASSERT_DENY
+always-$(CONFIG_RUST) += build_error.o
+else
+obj-$(CONFIG_RUST) += build_error.o
+endif
+
+obj-$(CONFIG_RUST) += exports.o
+
+obj-$(CONFIG_RUST_KERNEL_KUNIT_TEST) += doctests_kernel_generated.o
+obj-$(CONFIG_RUST_KERNEL_KUNIT_TEST) += doctests_kernel_generated_kunit.o
+
+# Avoids running `$(RUSTC)` for the sysroot when it may not be available.
+ifdef CONFIG_RUST
+
+# `$(rust_flags)` is passed in case the user added `--sysroot`.
+rustc_sysroot := $(shell $(RUSTC) $(rust_flags) --print sysroot)
+rustc_host_target := $(shell $(RUSTC) --version --verbose | grep -F 'host: ' | cut -d' ' -f2)
+RUST_LIB_SRC ?= $(rustc_sysroot)/lib/rustlib/src/rust/library
+
+ifeq ($(quiet),silent_)
+cargo_quiet=-q
+rust_test_quiet=-q
+rustdoc_test_quiet=--test-args -q
+rustdoc_test_kernel_quiet=>/dev/null
+else ifeq ($(quiet),quiet_)
+rust_test_quiet=-q
+rustdoc_test_quiet=--test-args -q
+rustdoc_test_kernel_quiet=>/dev/null
+else
+cargo_quiet=--verbose
+endif
+
+core-cfgs = \
+ --cfg no_fp_fmt_parse
+
+alloc-cfgs = \
+ --cfg no_global_oom_handling \
+ --cfg no_rc \
+ --cfg no_sync
+
+quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
+ cmd_rustdoc = \
+ OBJTREE=$(abspath $(objtree)) \
+ $(RUSTDOC) $(if $(rustdoc_host),$(rust_common_flags),$(rust_flags)) \
+ $(rustc_target_flags) -L$(objtree)/$(obj) \
+ --output $(objtree)/$(obj)/doc \
+ --crate-name $(subst rustdoc-,,$@) \
+ @$(objtree)/include/generated/rustc_cfg $<
+
+# The `html_logo_url` and `html_favicon_url` forms of the `doc` attribute
+# can be used to specify a custom logo. However:
+# - The given value is used as-is, thus it cannot be relative or a local file
+# (unlike the non-custom case) since the generated docs have subfolders.
+# - It requires adding it to every crate.
+# - It requires changing `core` which comes from the sysroot.
+#
+# Using `-Zcrate-attr` would solve the last two points, but not the first.
+# The https://github.com/rust-lang/rfcs/pull/3226 RFC suggests two new
+# command-like flags to solve the issue. Meanwhile, we use the non-custom case
+# and then retouch the generated files.
+rustdoc: rustdoc-core rustdoc-macros rustdoc-compiler_builtins \
+ rustdoc-alloc rustdoc-kernel
+ $(Q)cp $(srctree)/Documentation/images/logo.svg $(objtree)/$(obj)/doc
+ $(Q)cp $(srctree)/Documentation/images/COPYING-logo $(objtree)/$(obj)/doc
+ $(Q)find $(objtree)/$(obj)/doc -name '*.html' -type f -print0 | xargs -0 sed -Ei \
+ -e 's:rust-logo\.svg:logo.svg:g' \
+ -e 's:rust-logo\.png:logo.svg:g' \
+ -e 's:favicon\.svg:logo.svg:g' \
+ -e 's:<link rel="alternate icon" type="image/png" href="[./]*favicon-(16x16|32x32)\.png">::g'
+ $(Q)echo '.logo-container > img { object-fit: contain; }' \
+ >> $(objtree)/$(obj)/doc/rustdoc.css
+
+rustdoc-macros: private rustdoc_host = yes
+rustdoc-macros: private rustc_target_flags = --crate-type proc-macro \
+ --extern proc_macro
+rustdoc-macros: $(src)/macros/lib.rs FORCE
+ $(call if_changed,rustdoc)
+
+rustdoc-core: private rustc_target_flags = $(core-cfgs)
+rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs FORCE
+ $(call if_changed,rustdoc)
+
+rustdoc-compiler_builtins: $(src)/compiler_builtins.rs rustdoc-core FORCE
+ $(call if_changed,rustdoc)
+
+# We need to allow `rustdoc::broken_intra_doc_links` because some
+# `no_global_oom_handling` functions refer to non-`no_global_oom_handling`
+# functions. Ideally `rustdoc` would have a way to distinguish broken links
+# due to things that are "configured out" vs. entirely non-existing ones.
+rustdoc-alloc: private rustc_target_flags = $(alloc-cfgs) \
+ -Arustdoc::broken_intra_doc_links
+rustdoc-alloc: $(src)/alloc/lib.rs rustdoc-core rustdoc-compiler_builtins FORCE
+ $(call if_changed,rustdoc)
+
+rustdoc-kernel: private rustc_target_flags = --extern alloc \
+ --extern build_error --extern macros=$(objtree)/$(obj)/libmacros.so \
+ --extern bindings
+rustdoc-kernel: $(src)/kernel/lib.rs rustdoc-core rustdoc-macros \
+ rustdoc-compiler_builtins rustdoc-alloc $(obj)/libmacros.so \
+ $(obj)/bindings.o FORCE
+ $(call if_changed,rustdoc)
+
+quiet_cmd_rustc_test_library = RUSTC TL $<
+ cmd_rustc_test_library = \
+ OBJTREE=$(abspath $(objtree)) \
+ $(RUSTC) $(rust_common_flags) \
+ @$(objtree)/include/generated/rustc_cfg $(rustc_target_flags) \
+ --crate-type $(if $(rustc_test_library_proc),proc-macro,rlib) \
+ --out-dir $(objtree)/$(obj)/test --cfg testlib \
+ --sysroot $(objtree)/$(obj)/test/sysroot \
+ -L$(objtree)/$(obj)/test \
+ --crate-name $(subst rusttest-,,$(subst rusttestlib-,,$@)) $<
+
+rusttestlib-build_error: $(src)/build_error.rs rusttest-prepare FORCE
+ $(call if_changed,rustc_test_library)
+
+rusttestlib-macros: private rustc_target_flags = --extern proc_macro
+rusttestlib-macros: private rustc_test_library_proc = yes
+rusttestlib-macros: $(src)/macros/lib.rs rusttest-prepare FORCE
+ $(call if_changed,rustc_test_library)
+
+rusttestlib-bindings: $(src)/bindings/lib.rs rusttest-prepare FORCE
+ $(call if_changed,rustc_test_library)
+
+quiet_cmd_rustdoc_test = RUSTDOC T $<
+ cmd_rustdoc_test = \
+ OBJTREE=$(abspath $(objtree)) \
+ $(RUSTDOC) --test $(rust_common_flags) \
+ @$(objtree)/include/generated/rustc_cfg \
+ $(rustc_target_flags) $(rustdoc_test_target_flags) \
+ --sysroot $(objtree)/$(obj)/test/sysroot $(rustdoc_test_quiet) \
+ -L$(objtree)/$(obj)/test --output $(objtree)/$(obj)/doc \
+ --crate-name $(subst rusttest-,,$@) $<
+
+quiet_cmd_rustdoc_test_kernel = RUSTDOC TK $<
+ cmd_rustdoc_test_kernel = \
+ rm -rf $(objtree)/$(obj)/test/doctests/kernel; \
+ mkdir -p $(objtree)/$(obj)/test/doctests/kernel; \
+ OBJTREE=$(abspath $(objtree)) \
+ $(RUSTDOC) --test $(rust_flags) \
+ @$(objtree)/include/generated/rustc_cfg \
+ -L$(objtree)/$(obj) --extern alloc --extern kernel \
+ --extern build_error --extern macros \
+ --extern bindings \
+ --no-run --crate-name kernel -Zunstable-options \
+ --test-builder $(srctree)/scripts/rustdoc_test_builder.py \
+ $< $(rustdoc_test_kernel_quiet); \
+ $(srctree)/scripts/rustdoc_test_gen.py
+
+%/doctests_kernel_generated.rs %/doctests_kernel_generated_kunit.c: $(src)/kernel/lib.rs $(obj)/kernel.o FORCE
+ $(call if_changed,rustdoc_test_kernel)
+
+# We cannot use `-Zpanic-abort-tests` because some tests are dynamic,
+# so for the moment we skip `-Cpanic=abort`.
+quiet_cmd_rustc_test = RUSTC T $<
+ cmd_rustc_test = \
+ OBJTREE=$(abspath $(objtree)) \
+ $(RUSTC) --test $(rust_common_flags) \
+ @$(objtree)/include/generated/rustc_cfg \
+ $(rustc_target_flags) --out-dir $(objtree)/$(obj)/test \
+ --sysroot $(objtree)/$(obj)/test/sysroot \
+ -L$(objtree)/$(obj)/test \
+ --crate-name $(subst rusttest-,,$@) $<; \
+ $(objtree)/$(obj)/test/$(subst rusttest-,,$@) $(rust_test_quiet) \
+ $(rustc_test_run_flags)
+
+rusttest: rusttest-macros rusttest-kernel
+
+# This prepares a custom sysroot with our custom `alloc` instead of
+# the standard one.
+#
+# This requires several hacks:
+# - Unlike `core` and `alloc`, `std` depends on more than a dozen crates,
+# including third-party crates that need to be downloaded, plus custom
+# `build.rs` steps. Thus hardcoding things here is not maintainable.
+# - `cargo` knows how to build the standard library, but it is an unstable
+# feature so far (`-Zbuild-std`).
+# - `cargo` only considers the use case of building the standard library
+# to use it in a given package. Thus we need to create a dummy package
+# and pick the generated libraries from there.
+# - Since we only keep a subset of upstream `alloc` in-tree, we need
+# to recreate it on the fly by putting our sources on top.
+# - The usual ways of modifying the dependency graph in `cargo` do not seem
+# to apply for the `-Zbuild-std` steps, thus we have to mislead it
+# by modifying the sources in the sysroot.
+# - To avoid messing with the user's Rust installation, we create a clone
+# of the sysroot. However, `cargo` ignores `RUSTFLAGS` in the `-Zbuild-std`
+# steps, thus we use a wrapper binary passed via `RUSTC` to pass the flag.
+#
+# In the future, we hope to avoid the whole ordeal by either:
+# - Making the `test` crate not depend on `std` (either improving upstream
+# or having our own custom crate).
+# - Making the tests run in kernel space (requires the previous point).
+# - Making `std` and friends be more like a "normal" crate, so that
+# `-Zbuild-std` and related hacks are not needed.
+quiet_cmd_rustsysroot = RUSTSYSROOT
+ cmd_rustsysroot = \
+ rm -rf $(objtree)/$(obj)/test; \
+ mkdir -p $(objtree)/$(obj)/test; \
+ cp -a $(rustc_sysroot) $(objtree)/$(obj)/test/sysroot; \
+ cp -r $(srctree)/$(src)/alloc/* \
+ $(objtree)/$(obj)/test/sysroot/lib/rustlib/src/rust/library/alloc/src; \
+ echo '\#!/bin/sh' > $(objtree)/$(obj)/test/rustc_sysroot; \
+ echo "$(RUSTC) --sysroot=$(abspath $(objtree)/$(obj)/test/sysroot) \"\$$@\"" \
+ >> $(objtree)/$(obj)/test/rustc_sysroot; \
+ chmod u+x $(objtree)/$(obj)/test/rustc_sysroot; \
+ $(CARGO) -q new $(objtree)/$(obj)/test/dummy; \
+ RUSTC=$(objtree)/$(obj)/test/rustc_sysroot $(CARGO) $(cargo_quiet) \
+ test -Zbuild-std --target $(rustc_host_target) \
+ --manifest-path $(objtree)/$(obj)/test/dummy/Cargo.toml; \
+ rm $(objtree)/$(obj)/test/sysroot/lib/rustlib/$(rustc_host_target)/lib/*; \
+ cp $(objtree)/$(obj)/test/dummy/target/$(rustc_host_target)/debug/deps/* \
+ $(objtree)/$(obj)/test/sysroot/lib/rustlib/$(rustc_host_target)/lib
+
+rusttest-prepare: FORCE
+ $(call if_changed,rustsysroot)
+
+rusttest-macros: private rustc_target_flags = --extern proc_macro
+rusttest-macros: private rustdoc_test_target_flags = --crate-type proc-macro
+rusttest-macros: $(src)/macros/lib.rs rusttest-prepare FORCE
+ $(call if_changed,rustc_test)
+ $(call if_changed,rustdoc_test)
+
+rusttest-kernel: private rustc_target_flags = --extern alloc \
+ --extern build_error --extern macros --extern bindings
+rusttest-kernel: $(src)/kernel/lib.rs rusttest-prepare \
+ rusttestlib-build_error rusttestlib-macros rusttestlib-bindings FORCE
+ $(call if_changed,rustc_test)
+ $(call if_changed,rustc_test_library)
+
+filechk_rust_target = $(objtree)/scripts/generate_rust_target < $<
+
+$(obj)/target.json: $(objtree)/include/config/auto.conf FORCE
+ $(call filechk,rust_target)
+
+ifdef CONFIG_CC_IS_CLANG
+bindgen_c_flags = $(c_flags)
+else
+# bindgen relies on libclang to parse C. Ideally, bindgen would support a GCC
+# plugin backend and/or the Clang driver would be perfectly compatible with GCC.
+#
+# For the moment, here we are tweaking the flags on the fly. This is a hack,
+# and some kernel configurations may not work (e.g. `GCC_PLUGIN_RANDSTRUCT`
+# if we end up using one of those structs).
+bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
+ -mskip-rax-setup -mgeneral-regs-only -msign-return-address=% \
+ -mindirect-branch=thunk-extern -mindirect-branch-register \
+ -mfunction-return=thunk-extern -mrecord-mcount -mabi=lp64 \
+ -mstack-protector-guard% -mtraceback=no \
+ -mno-pointers-to-nested-functions -mno-string \
+ -mno-strict-align -mstrict-align \
+ -fconserve-stack -falign-jumps=% -falign-loops=% \
+ -femit-struct-debug-baseonly -fno-ipa-cp-clone -fno-ipa-sra \
+ -fno-partial-inlining -fplugin-arg-arm_ssp_per_task_plugin-% \
+ -fno-reorder-blocks -fno-allow-store-data-races -fasan-shadow-offset=% \
+ -fzero-call-used-regs=% -fno-stack-clash-protection \
+ -fno-inline-functions-called-once \
+ --param=% --param asan-%
+
+# Derived from `scripts/Makefile.clang`.
+BINDGEN_TARGET_arm := arm-linux-gnueabi
+BINDGEN_TARGET_arm64 := aarch64-linux-gnu
+BINDGEN_TARGET_powerpc := powerpc64le-linux-gnu
+BINDGEN_TARGET_riscv := riscv64-linux-gnu
+BINDGEN_TARGET_x86 := x86_64-linux-gnu
+BINDGEN_TARGET := $(BINDGEN_TARGET_$(SRCARCH))
+
+# All warnings are inhibited since GCC builds are very experimental,
+# many GCC warnings are not supported by Clang, they may only appear in
+# some configurations, with new GCC versions, etc.
+bindgen_extra_c_flags = -w --target=$(BINDGEN_TARGET)
+
+bindgen_c_flags = $(filter-out $(bindgen_skip_c_flags), $(c_flags)) \
+ $(bindgen_extra_c_flags)
+endif
+
+ifdef CONFIG_LTO
+bindgen_c_flags_lto = $(filter-out $(CC_FLAGS_LTO), $(bindgen_c_flags))
+else
+bindgen_c_flags_lto = $(bindgen_c_flags)
+endif
+
+bindgen_c_flags_final = $(bindgen_c_flags_lto) -D__BINDGEN__
+
+quiet_cmd_bindgen = BINDGEN $@
+ cmd_bindgen = \
+ $(BINDGEN) $< $(bindgen_target_flags) \
+ --use-core --with-derive-default --ctypes-prefix core::ffi --no-layout-tests \
+ --no-debug '.*' \
+ --size_t-is-usize -o $@ -- $(bindgen_c_flags_final) -DMODULE \
+ $(bindgen_target_cflags) $(bindgen_target_extra)
+
+$(obj)/bindings/bindings_generated.rs: private bindgen_target_flags = \
+ $(shell grep -v '^\#\|^$$' $(srctree)/$(src)/bindgen_parameters)
+$(obj)/bindings/bindings_generated.rs: $(src)/bindings/bindings_helper.h \
+ $(src)/bindgen_parameters FORCE
+ $(call if_changed_dep,bindgen)
+
+# See `CFLAGS_REMOVE_helpers.o` above. In addition, Clang on C does not warn
+# with `-Wmissing-declarations` (unlike GCC), so it is not strictly needed here
+# given it is `libclang`; but for consistency, future Clang changes and/or
+# a potential future GCC backend for `bindgen`, we disable it too.
+$(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_flags = \
+ --blacklist-type '.*' --whitelist-var '' \
+ --whitelist-function 'rust_helper_.*'
+$(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_cflags = \
+ -I$(objtree)/$(obj) -Wno-missing-prototypes -Wno-missing-declarations
+$(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_extra = ; \
+ sed -Ei 's/pub fn rust_helper_([a-zA-Z0-9_]*)/#[link_name="rust_helper_\1"]\n pub fn \1/g' $@
+$(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers.c FORCE
+ $(call if_changed_dep,bindgen)
+
+quiet_cmd_exports = EXPORTS $@
+ cmd_exports = \
+ $(NM) -p --defined-only $< \
+ | grep -E ' (T|R|D) ' | cut -d ' ' -f 3 \
+ | xargs -Isymbol \
+ echo 'EXPORT_SYMBOL_RUST_GPL(symbol);' > $@
+
+$(obj)/exports_core_generated.h: $(obj)/core.o FORCE
+ $(call if_changed,exports)
+
+$(obj)/exports_alloc_generated.h: $(obj)/alloc.o FORCE
+ $(call if_changed,exports)
+
+$(obj)/exports_bindings_generated.h: $(obj)/bindings.o FORCE
+ $(call if_changed,exports)
+
+$(obj)/exports_kernel_generated.h: $(obj)/kernel.o FORCE
+ $(call if_changed,exports)
+
+quiet_cmd_rustc_procmacro = $(RUSTC_OR_CLIPPY_QUIET) P $@
+ cmd_rustc_procmacro = \
+ $(RUSTC_OR_CLIPPY) $(rust_common_flags) \
+ --emit=dep-info,link --extern proc_macro \
+ --crate-type proc-macro --out-dir $(objtree)/$(obj) \
+ --crate-name $(patsubst lib%.so,%,$(notdir $@)) $<; \
+ mv $(objtree)/$(obj)/$(patsubst lib%.so,%,$(notdir $@)).d $(depfile); \
+ sed -i '/^\#/d' $(depfile)
+
+# Procedural macros can only be used with the `rustc` that compiled it.
+# Therefore, to get `libmacros.so` automatically recompiled when the compiler
+# version changes, we add `core.o` as a dependency (even if it is not needed).
+$(obj)/libmacros.so: $(src)/macros/lib.rs $(obj)/core.o FORCE
+ $(call if_changed_dep,rustc_procmacro)
+
+quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L $@
+ cmd_rustc_library = \
+ OBJTREE=$(abspath $(objtree)) \
+ $(if $(skip_clippy),$(RUSTC),$(RUSTC_OR_CLIPPY)) \
+ $(filter-out $(skip_flags),$(rust_flags) $(rustc_target_flags)) \
+ --emit=dep-info,obj,metadata --crate-type rlib \
+ --out-dir $(objtree)/$(obj) -L$(objtree)/$(obj) \
+ --crate-name $(patsubst %.o,%,$(notdir $@)) $<; \
+ mv $(objtree)/$(obj)/$(patsubst %.o,%,$(notdir $@)).d $(depfile); \
+ sed -i '/^\#/d' $(depfile) \
+ $(if $(rustc_objcopy),;$(OBJCOPY) $(rustc_objcopy) $@)
+
+rust-analyzer:
+ $(Q)$(srctree)/scripts/generate_rust_analyzer.py $(srctree) $(objtree) \
+ $(RUST_LIB_SRC) > $(objtree)/rust-project.json
+
+$(obj)/core.o: private skip_clippy = 1
+$(obj)/core.o: private skip_flags = -Dunreachable_pub
+$(obj)/core.o: private rustc_target_flags = $(core-cfgs)
+$(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs $(obj)/target.json FORCE
+ $(call if_changed_dep,rustc_library)
+
+$(obj)/compiler_builtins.o: private rustc_objcopy = -w -W '__*'
+$(obj)/compiler_builtins.o: $(src)/compiler_builtins.rs $(obj)/core.o FORCE
+ $(call if_changed_dep,rustc_library)
+
+$(obj)/alloc.o: private skip_clippy = 1
+$(obj)/alloc.o: private skip_flags = -Dunreachable_pub
+$(obj)/alloc.o: private rustc_target_flags = $(alloc-cfgs)
+$(obj)/alloc.o: $(src)/alloc/lib.rs $(obj)/compiler_builtins.o FORCE
+ $(call if_changed_dep,rustc_library)
+
+$(obj)/build_error.o: $(src)/build_error.rs $(obj)/compiler_builtins.o FORCE
+ $(call if_changed_dep,rustc_library)
+
+$(obj)/bindings.o: $(src)/bindings/lib.rs \
+ $(obj)/compiler_builtins.o \
+ $(obj)/bindings/bindings_generated.rs \
+ $(obj)/bindings/bindings_helpers_generated.rs FORCE
+ $(call if_changed_dep,rustc_library)
+
+$(obj)/kernel.o: private rustc_target_flags = --extern alloc \
+ --extern build_error --extern macros --extern bindings
+$(obj)/kernel.o: $(src)/kernel/lib.rs $(obj)/alloc.o $(obj)/build_error.o \
+ $(obj)/libmacros.so $(obj)/bindings.o FORCE
+ $(call if_changed_dep,rustc_library)
+
+endif # CONFIG_RUST
diff --git a/rust/alloc/README.md b/rust/alloc/README.md
new file mode 100644
index 000000000000..c89c753720b5
--- /dev/null
+++ b/rust/alloc/README.md
@@ -0,0 +1,33 @@
+# `alloc`
+
+These source files come from the Rust standard library, hosted in
+the <https://github.com/rust-lang/rust> repository, licensed under
+"Apache-2.0 OR MIT" and adapted for kernel use. For copyright details,
+see <https://github.com/rust-lang/rust/blob/master/COPYRIGHT>.
+
+Please note that these files should be kept as close as possible to
+upstream. In general, only additions should be performed (e.g. new
+methods). Eventually, changes should make it into upstream so that,
+at some point, this fork can be dropped from the kernel tree.
+
+
+## Rationale
+
+On one hand, kernel folks wanted to keep `alloc` in-tree to have more
+freedom in both workflow and actual features if actually needed
+(e.g. receiver types if we ended up using them), which is reasonable.
+
+On the other hand, Rust folks wanted to keep `alloc` as close as
+upstream as possible and avoid as much divergence as possible, which
+is also reasonable.
+
+We agreed on a middle-ground: we would keep a subset of `alloc`
+in-tree that would be as small and as close as possible to upstream.
+Then, upstream can start adding the functions that we add to `alloc`
+etc., until we reach a point where the kernel already knows exactly
+what it needs in `alloc` and all the new methods are merged into
+upstream, so that we can drop `alloc` from the kernel tree and go back
+to using the upstream one.
+
+By doing this, the kernel can go a bit faster now, and Rust can
+slowly incorporate and discuss the changes as needed.
diff --git a/rust/alloc/alloc.rs b/rust/alloc/alloc.rs
new file mode 100644
index 000000000000..ca224a541770
--- /dev/null
+++ b/rust/alloc/alloc.rs
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! Memory allocation APIs
+
+#![stable(feature = "alloc_module", since = "1.28.0")]
+
+#[cfg(not(test))]
+use core::intrinsics;
+use core::intrinsics::{min_align_of_val, size_of_val};
+
+use core::ptr::Unique;
+#[cfg(not(test))]
+use core::ptr::{self, NonNull};
+
+#[stable(feature = "alloc_module", since = "1.28.0")]
+#[doc(inline)]
+pub use core::alloc::*;
+
+use core::marker::Destruct;
+
+#[cfg(test)]
+mod tests;
+
+extern "Rust" {
+ // These are the magic symbols to call the global allocator. rustc generates
+ // them to call `__rg_alloc` etc. if there is a `#[global_allocator]` attribute
+ // (the code expanding that attribute macro generates those functions), or to call
+ // the default implementations in libstd (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
+ // otherwise.
+ // The rustc fork of LLVM also special-cases these function names to be able to optimize them
+ // like `malloc`, `realloc`, and `free`, respectively.
+ #[rustc_allocator]
+ #[rustc_allocator_nounwind]
+ fn __rust_alloc(size: usize, align: usize) -> *mut u8;
+ #[rustc_allocator_nounwind]
+ fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
+ #[rustc_allocator_nounwind]
+ fn __rust_realloc(ptr: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8;
+ #[rustc_allocator_nounwind]
+ fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
+}
+
+/// The global memory allocator.
+///
+/// This type implements the [`Allocator`] trait by forwarding calls
+/// to the allocator registered with the `#[global_allocator]` attribute
+/// if there is one, or the `std` crate’s default.
+///
+/// Note: while this type is unstable, the functionality it provides can be
+/// accessed through the [free functions in `alloc`](self#functions).
+#[unstable(feature = "allocator_api", issue = "32838")]
+#[derive(Copy, Clone, Default, Debug)]
+#[cfg(not(test))]
+pub struct Global;
+
+#[cfg(test)]
+pub use std::alloc::Global;
+
+/// Allocate memory with the global allocator.
+///
+/// This function forwards calls to the [`GlobalAlloc::alloc`] method
+/// of the allocator registered with the `#[global_allocator]` attribute
+/// if there is one, or the `std` crate’s default.
+///
+/// This function is expected to be deprecated in favor of the `alloc` method
+/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
+///
+/// # Safety
+///
+/// See [`GlobalAlloc::alloc`].
+///
+/// # Examples
+///
+/// ```
+/// use std::alloc::{alloc, dealloc, Layout};
+///
+/// unsafe {
+/// let layout = Layout::new::<u16>();
+/// let ptr = alloc(layout);
+///
+/// *(ptr as *mut u16) = 42;
+/// assert_eq!(*(ptr as *mut u16), 42);
+///
+/// dealloc(ptr, layout);
+/// }
+/// ```
+#[stable(feature = "global_alloc", since = "1.28.0")]
+#[must_use = "losing the pointer will leak memory"]
+#[inline]
+pub unsafe fn alloc(layout: Layout) -> *mut u8 {
+ unsafe { __rust_alloc(layout.size(), layout.align()) }
+}
+
+/// Deallocate memory with the global allocator.
+///
+/// This function forwards calls to the [`GlobalAlloc::dealloc`] method
+/// of the allocator registered with the `#[global_allocator]` attribute
+/// if there is one, or the `std` crate’s default.
+///
+/// This function is expected to be deprecated in favor of the `dealloc` method
+/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
+///
+/// # Safety
+///
+/// See [`GlobalAlloc::dealloc`].
+#[stable(feature = "global_alloc", since = "1.28.0")]
+#[inline]
+pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
+ unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) }
+}
+
+/// Reallocate memory with the global allocator.
+///
+/// This function forwards calls to the [`GlobalAlloc::realloc`] method
+/// of the allocator registered with the `#[global_allocator]` attribute
+/// if there is one, or the `std` crate’s default.
+///
+/// This function is expected to be deprecated in favor of the `realloc` method
+/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
+///
+/// # Safety
+///
+/// See [`GlobalAlloc::realloc`].
+#[stable(feature = "global_alloc", since = "1.28.0")]
+#[must_use = "losing the pointer will leak memory"]
+#[inline]
+pub unsafe fn realloc(ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ unsafe { __rust_realloc(ptr, layout.size(), layout.align(), new_size) }
+}
+
+/// Allocate zero-initialized memory with the global allocator.
+///
+/// This function forwards calls to the [`GlobalAlloc::alloc_zeroed`] method
+/// of the allocator registered with the `#[global_allocator]` attribute
+/// if there is one, or the `std` crate’s default.
+///
+/// This function is expected to be deprecated in favor of the `alloc_zeroed` method
+/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
+///
+/// # Safety
+///
+/// See [`GlobalAlloc::alloc_zeroed`].
+///
+/// # Examples
+///
+/// ```
+/// use std::alloc::{alloc_zeroed, dealloc, Layout};
+///
+/// unsafe {
+/// let layout = Layout::new::<u16>();
+/// let ptr = alloc_zeroed(layout);
+///
+/// assert_eq!(*(ptr as *mut u16), 0);
+///
+/// dealloc(ptr, layout);
+/// }
+/// ```
+#[stable(feature = "global_alloc", since = "1.28.0")]
+#[must_use = "losing the pointer will leak memory"]
+#[inline]
+pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 {
+ unsafe { __rust_alloc_zeroed(layout.size(), layout.align()) }
+}
+
+#[cfg(not(test))]
+impl Global {
+ #[inline]
+ fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError> {
+ match layout.size() {
+ 0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)),
+ // SAFETY: `layout` is non-zero in size,
+ size => unsafe {
+ let raw_ptr = if zeroed { alloc_zeroed(layout) } else { alloc(layout) };
+ let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
+ Ok(NonNull::slice_from_raw_parts(ptr, size))
+ },
+ }
+ }
+
+ // SAFETY: Same as `Allocator::grow`
+ #[inline]
+ unsafe fn grow_impl(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ zeroed: bool,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() >= old_layout.size(),
+ "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
+ );
+
+ match old_layout.size() {
+ 0 => self.alloc_impl(new_layout, zeroed),
+
+ // SAFETY: `new_size` is non-zero as `old_size` is greater than or equal to `new_size`
+ // as required by safety conditions. Other conditions must be upheld by the caller
+ old_size if old_layout.align() == new_layout.align() => unsafe {
+ let new_size = new_layout.size();
+
+ // `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
+ intrinsics::assume(new_size >= old_layout.size());
+
+ let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
+ let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
+ if zeroed {
+ raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
+ }
+ Ok(NonNull::slice_from_raw_parts(ptr, new_size))
+ },
+
+ // SAFETY: because `new_layout.size()` must be greater than or equal to `old_size`,
+ // both the old and new memory allocation are valid for reads and writes for `old_size`
+ // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
+ // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
+ // for `dealloc` must be upheld by the caller.
+ old_size => unsafe {
+ let new_ptr = self.alloc_impl(new_layout, zeroed)?;
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_size);
+ self.deallocate(ptr, old_layout);
+ Ok(new_ptr)
+ },
+ }
+ }
+}
+
+#[unstable(feature = "allocator_api", issue = "32838")]
+#[cfg(not(test))]
+unsafe impl Allocator for Global {
+ #[inline]
+ fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ self.alloc_impl(layout, false)
+ }
+
+ #[inline]
+ fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ self.alloc_impl(layout, true)
+ }
+
+ #[inline]
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
+ if layout.size() != 0 {
+ // SAFETY: `layout` is non-zero in size,
+ // other conditions must be upheld by the caller
+ unsafe { dealloc(ptr.as_ptr(), layout) }
+ }
+ }
+
+ #[inline]
+ unsafe fn grow(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: all conditions must be upheld by the caller
+ unsafe { self.grow_impl(ptr, old_layout, new_layout, false) }
+ }
+
+ #[inline]
+ unsafe fn grow_zeroed(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: all conditions must be upheld by the caller
+ unsafe { self.grow_impl(ptr, old_layout, new_layout, true) }
+ }
+
+ #[inline]
+ unsafe fn shrink(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() <= old_layout.size(),
+ "`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
+ );
+
+ match new_layout.size() {
+ // SAFETY: conditions must be upheld by the caller
+ 0 => unsafe {
+ self.deallocate(ptr, old_layout);
+ Ok(NonNull::slice_from_raw_parts(new_layout.dangling(), 0))
+ },
+
+ // SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
+ new_size if old_layout.align() == new_layout.align() => unsafe {
+ // `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
+ intrinsics::assume(new_size <= old_layout.size());
+
+ let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
+ let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
+ Ok(NonNull::slice_from_raw_parts(ptr, new_size))
+ },
+
+ // SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`,
+ // both the old and new memory allocation are valid for reads and writes for `new_size`
+ // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
+ // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
+ // for `dealloc` must be upheld by the caller.
+ new_size => unsafe {
+ let new_ptr = self.allocate(new_layout)?;
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_size);
+ self.deallocate(ptr, old_layout);
+ Ok(new_ptr)
+ },
+ }
+ }
+}
+
+/// The allocator for unique pointers.
+#[cfg(all(not(no_global_oom_handling), not(test)))]
+#[lang = "exchange_malloc"]
+#[inline]
+unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
+ let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
+ match Global.allocate(layout) {
+ Ok(ptr) => ptr.as_mut_ptr(),
+ Err(_) => handle_alloc_error(layout),
+ }
+}
+
+#[cfg_attr(not(test), lang = "box_free")]
+#[inline]
+#[rustc_const_unstable(feature = "const_box", issue = "92521")]
+// This signature has to be the same as `Box`, otherwise an ICE will happen.
+// When an additional parameter to `Box` is added (like `A: Allocator`), this has to be added here as
+// well.
+// For example if `Box` is changed to `struct Box<T: ?Sized, A: Allocator>(Unique<T>, A)`,
+// this function has to be changed to `fn box_free<T: ?Sized, A: Allocator>(Unique<T>, A)` as well.
+pub(crate) const unsafe fn box_free<T: ?Sized, A: ~const Allocator + ~const Destruct>(
+ ptr: Unique<T>,
+ alloc: A,
+) {
+ unsafe {
+ let size = size_of_val(ptr.as_ref());
+ let align = min_align_of_val(ptr.as_ref());
+ let layout = Layout::from_size_align_unchecked(size, align);
+ alloc.deallocate(From::from(ptr.cast()), layout)
+ }
+}
+
+// # Allocation error handler
+
+#[cfg(not(no_global_oom_handling))]
+extern "Rust" {
+ // This is the magic symbol to call the global alloc error handler. rustc generates
+ // it to call `__rg_oom` if there is a `#[alloc_error_handler]`, or to call the
+ // default implementations below (`__rdl_oom`) otherwise.
+ fn __rust_alloc_error_handler(size: usize, align: usize) -> !;
+}
+
+/// Abort on memory allocation error or failure.
+///
+/// Callers of memory allocation APIs wishing to abort computation
+/// in response to an allocation error are encouraged to call this function,
+/// rather than directly invoking `panic!` or similar.
+///
+/// The default behavior of this function is to print a message to standard error
+/// and abort the process.
+/// It can be replaced with [`set_alloc_error_hook`] and [`take_alloc_error_hook`].
+///
+/// [`set_alloc_error_hook`]: ../../std/alloc/fn.set_alloc_error_hook.html
+/// [`take_alloc_error_hook`]: ../../std/alloc/fn.take_alloc_error_hook.html
+#[stable(feature = "global_alloc", since = "1.28.0")]
+#[rustc_const_unstable(feature = "const_alloc_error", issue = "92523")]
+#[cfg(all(not(no_global_oom_handling), not(test)))]
+#[cold]
+pub const fn handle_alloc_error(layout: Layout) -> ! {
+ const fn ct_error(_: Layout) -> ! {
+ panic!("allocation failed");
+ }
+
+ fn rt_error(layout: Layout) -> ! {
+ unsafe {
+ __rust_alloc_error_handler(layout.size(), layout.align());
+ }
+ }
+
+ unsafe { core::intrinsics::const_eval_select((layout,), ct_error, rt_error) }
+}
+
+// For alloc test `std::alloc::handle_alloc_error` can be used directly.
+#[cfg(all(not(no_global_oom_handling), test))]
+pub use std::alloc::handle_alloc_error;
+
+#[cfg(all(not(no_global_oom_handling), not(test)))]
+#[doc(hidden)]
+#[allow(unused_attributes)]
+#[unstable(feature = "alloc_internals", issue = "none")]
+pub mod __alloc_error_handler {
+ use crate::alloc::Layout;
+
+ // called via generated `__rust_alloc_error_handler`
+
+ // if there is no `#[alloc_error_handler]`
+ #[rustc_std_internal_symbol]
+ pub unsafe extern "C-unwind" fn __rdl_oom(size: usize, _align: usize) -> ! {
+ panic!("memory allocation of {size} bytes failed")
+ }
+
+ // if there is an `#[alloc_error_handler]`
+ #[rustc_std_internal_symbol]
+ pub unsafe extern "C-unwind" fn __rg_oom(size: usize, align: usize) -> ! {
+ let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
+ extern "Rust" {
+ #[lang = "oom"]
+ fn oom_impl(layout: Layout) -> !;
+ }
+ unsafe { oom_impl(layout) }
+ }
+}
+
+/// Specialize clones into pre-allocated, uninitialized memory.
+/// Used by `Box::clone` and `Rc`/`Arc::make_mut`.
+pub(crate) trait WriteCloneIntoRaw: Sized {
+ unsafe fn write_clone_into_raw(&self, target: *mut Self);
+}
+
+impl<T: Clone> WriteCloneIntoRaw for T {
+ #[inline]
+ default unsafe fn write_clone_into_raw(&self, target: *mut Self) {
+ // Having allocated *first* may allow the optimizer to create
+ // the cloned value in-place, skipping the local and move.
+ unsafe { target.write(self.clone()) };
+ }
+}
+
+impl<T: Copy> WriteCloneIntoRaw for T {
+ #[inline]
+ unsafe fn write_clone_into_raw(&self, target: *mut Self) {
+ // We can always copy in-place, without ever involving a local value.
+ unsafe { target.copy_from_nonoverlapping(self, 1) };
+ }
+}
diff --git a/rust/alloc/borrow.rs b/rust/alloc/borrow.rs
new file mode 100644
index 000000000000..ca8e3dfa7004
--- /dev/null
+++ b/rust/alloc/borrow.rs
@@ -0,0 +1,498 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! A module for working with borrowed data.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use core::cmp::Ordering;
+use core::hash::{Hash, Hasher};
+use core::ops::Deref;
+#[cfg(not(no_global_oom_handling))]
+use core::ops::{Add, AddAssign};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::borrow::{Borrow, BorrowMut};
+
+use crate::fmt;
+#[cfg(not(no_global_oom_handling))]
+use crate::string::String;
+
+use Cow::*;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, B: ?Sized> Borrow<B> for Cow<'a, B>
+where
+ B: ToOwned,
+ <B as ToOwned>::Owned: 'a,
+{
+ fn borrow(&self) -> &B {
+ &**self
+ }
+}
+
+/// A generalization of `Clone` to borrowed data.
+///
+/// Some types make it possible to go from borrowed to owned, usually by
+/// implementing the `Clone` trait. But `Clone` works only for going from `&T`
+/// to `T`. The `ToOwned` trait generalizes `Clone` to construct owned data
+/// from any borrow of a given type.
+#[cfg_attr(not(test), rustc_diagnostic_item = "ToOwned")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait ToOwned {
+ /// The resulting type after obtaining ownership.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Owned: Borrow<Self>;
+
+ /// Creates owned data from borrowed data, usually by cloning.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s: &str = "a";
+ /// let ss: String = s.to_owned();
+ ///
+ /// let v: &[i32] = &[1, 2];
+ /// let vv: Vec<i32> = v.to_owned();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "cloning is often expensive and is not expected to have side effects"]
+ fn to_owned(&self) -> Self::Owned;
+
+ /// Uses borrowed data to replace owned data, usually by cloning.
+ ///
+ /// This is borrow-generalized version of `Clone::clone_from`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(toowned_clone_into)]
+ /// let mut s: String = String::new();
+ /// "hello".clone_into(&mut s);
+ ///
+ /// let mut v: Vec<i32> = Vec::new();
+ /// [1, 2][..].clone_into(&mut v);
+ /// ```
+ #[unstable(feature = "toowned_clone_into", reason = "recently added", issue = "41263")]
+ fn clone_into(&self, target: &mut Self::Owned) {
+ *target = self.to_owned();
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ToOwned for T
+where
+ T: Clone,
+{
+ type Owned = T;
+ fn to_owned(&self) -> T {
+ self.clone()
+ }
+
+ fn clone_into(&self, target: &mut T) {
+ target.clone_from(self);
+ }
+}
+
+/// A clone-on-write smart pointer.
+///
+/// The type `Cow` is a smart pointer providing clone-on-write functionality: it
+/// can enclose and provide immutable access to borrowed data, and clone the
+/// data lazily when mutation or ownership is required. The type is designed to
+/// work with general borrowed data via the `Borrow` trait.
+///
+/// `Cow` implements `Deref`, which means that you can call
+/// non-mutating methods directly on the data it encloses. If mutation
+/// is desired, `to_mut` will obtain a mutable reference to an owned
+/// value, cloning if necessary.
+///
+/// If you need reference-counting pointers, note that
+/// [`Rc::make_mut`][crate::rc::Rc::make_mut] and
+/// [`Arc::make_mut`][crate::sync::Arc::make_mut] can provide clone-on-write
+/// functionality as well.
+///
+/// # Examples
+///
+/// ```
+/// use std::borrow::Cow;
+///
+/// fn abs_all(input: &mut Cow<[i32]>) {
+/// for i in 0..input.len() {
+/// let v = input[i];
+/// if v < 0 {
+/// // Clones into a vector if not already owned.
+/// input.to_mut()[i] = -v;
+/// }
+/// }
+/// }
+///
+/// // No clone occurs because `input` doesn't need to be mutated.
+/// let slice = [0, 1, 2];
+/// let mut input = Cow::from(&slice[..]);
+/// abs_all(&mut input);
+///
+/// // Clone occurs because `input` needs to be mutated.
+/// let slice = [-1, 0, 1];
+/// let mut input = Cow::from(&slice[..]);
+/// abs_all(&mut input);
+///
+/// // No clone occurs because `input` is already owned.
+/// let mut input = Cow::from(vec![-1, 0, 1]);
+/// abs_all(&mut input);
+/// ```
+///
+/// Another example showing how to keep `Cow` in a struct:
+///
+/// ```
+/// use std::borrow::Cow;
+///
+/// struct Items<'a, X: 'a> where [X]: ToOwned<Owned = Vec<X>> {
+/// values: Cow<'a, [X]>,
+/// }
+///
+/// impl<'a, X: Clone + 'a> Items<'a, X> where [X]: ToOwned<Owned = Vec<X>> {
+/// fn new(v: Cow<'a, [X]>) -> Self {
+/// Items { values: v }
+/// }
+/// }
+///
+/// // Creates a container from borrowed values of a slice
+/// let readonly = [1, 2];
+/// let borrowed = Items::new((&readonly[..]).into());
+/// match borrowed {
+/// Items { values: Cow::Borrowed(b) } => println!("borrowed {b:?}"),
+/// _ => panic!("expect borrowed value"),
+/// }
+///
+/// let mut clone_on_write = borrowed;
+/// // Mutates the data from slice into owned vec and pushes a new value on top
+/// clone_on_write.values.to_mut().push(3);
+/// println!("clone_on_write = {:?}", clone_on_write.values);
+///
+/// // The data was mutated. Let's check it out.
+/// match clone_on_write {
+/// Items { values: Cow::Owned(_) } => println!("clone_on_write contains owned data"),
+/// _ => panic!("expect owned data"),
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "Cow")]
+pub enum Cow<'a, B: ?Sized + 'a>
+where
+ B: ToOwned,
+{
+ /// Borrowed data.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Borrowed(#[stable(feature = "rust1", since = "1.0.0")] &'a B),
+
+ /// Owned data.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Owned(#[stable(feature = "rust1", since = "1.0.0")] <B as ToOwned>::Owned),
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: ?Sized + ToOwned> Clone for Cow<'_, B> {
+ fn clone(&self) -> Self {
+ match *self {
+ Borrowed(b) => Borrowed(b),
+ Owned(ref o) => {
+ let b: &B = o.borrow();
+ Owned(b.to_owned())
+ }
+ }
+ }
+
+ fn clone_from(&mut self, source: &Self) {
+ match (self, source) {
+ (&mut Owned(ref mut dest), &Owned(ref o)) => o.borrow().clone_into(dest),
+ (t, s) => *t = s.clone(),
+ }
+ }
+}
+
+impl<B: ?Sized + ToOwned> Cow<'_, B> {
+ /// Returns true if the data is borrowed, i.e. if `to_mut` would require additional work.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(cow_is_borrowed)]
+ /// use std::borrow::Cow;
+ ///
+ /// let cow = Cow::Borrowed("moo");
+ /// assert!(cow.is_borrowed());
+ ///
+ /// let bull: Cow<'_, str> = Cow::Owned("...moo?".to_string());
+ /// assert!(!bull.is_borrowed());
+ /// ```
+ #[unstable(feature = "cow_is_borrowed", issue = "65143")]
+ #[rustc_const_unstable(feature = "const_cow_is_borrowed", issue = "65143")]
+ pub const fn is_borrowed(&self) -> bool {
+ match *self {
+ Borrowed(_) => true,
+ Owned(_) => false,
+ }
+ }
+
+ /// Returns true if the data is owned, i.e. if `to_mut` would be a no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(cow_is_borrowed)]
+ /// use std::borrow::Cow;
+ ///
+ /// let cow: Cow<'_, str> = Cow::Owned("moo".to_string());
+ /// assert!(cow.is_owned());
+ ///
+ /// let bull = Cow::Borrowed("...moo?");
+ /// assert!(!bull.is_owned());
+ /// ```
+ #[unstable(feature = "cow_is_borrowed", issue = "65143")]
+ #[rustc_const_unstable(feature = "const_cow_is_borrowed", issue = "65143")]
+ pub const fn is_owned(&self) -> bool {
+ !self.is_borrowed()
+ }
+
+ /// Acquires a mutable reference to the owned form of the data.
+ ///
+ /// Clones the data if it is not already owned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::borrow::Cow;
+ ///
+ /// let mut cow = Cow::Borrowed("foo");
+ /// cow.to_mut().make_ascii_uppercase();
+ ///
+ /// assert_eq!(
+ /// cow,
+ /// Cow::Owned(String::from("FOO")) as Cow<str>
+ /// );
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn to_mut(&mut self) -> &mut <B as ToOwned>::Owned {
+ match *self {
+ Borrowed(borrowed) => {
+ *self = Owned(borrowed.to_owned());
+ match *self {
+ Borrowed(..) => unreachable!(),
+ Owned(ref mut owned) => owned,
+ }
+ }
+ Owned(ref mut owned) => owned,
+ }
+ }
+
+ /// Extracts the owned data.
+ ///
+ /// Clones the data if it is not already owned.
+ ///
+ /// # Examples
+ ///
+ /// Calling `into_owned` on a `Cow::Borrowed` returns a clone of the borrowed data:
+ ///
+ /// ```
+ /// use std::borrow::Cow;
+ ///
+ /// let s = "Hello world!";
+ /// let cow = Cow::Borrowed(s);
+ ///
+ /// assert_eq!(
+ /// cow.into_owned(),
+ /// String::from(s)
+ /// );
+ /// ```
+ ///
+ /// Calling `into_owned` on a `Cow::Owned` returns the owned data. The data is moved out of the
+ /// `Cow` without being cloned.
+ ///
+ /// ```
+ /// use std::borrow::Cow;
+ ///
+ /// let s = "Hello world!";
+ /// let cow: Cow<str> = Cow::Owned(String::from(s));
+ ///
+ /// assert_eq!(
+ /// cow.into_owned(),
+ /// String::from(s)
+ /// );
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_owned(self) -> <B as ToOwned>::Owned {
+ match self {
+ Borrowed(borrowed) => borrowed.to_owned(),
+ Owned(owned) => owned,
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_deref", issue = "88955")]
+impl<B: ?Sized + ToOwned> const Deref for Cow<'_, B>
+where
+ B::Owned: ~const Borrow<B>,
+{
+ type Target = B;
+
+ fn deref(&self) -> &B {
+ match *self {
+ Borrowed(borrowed) => borrowed,
+ Owned(ref owned) => owned.borrow(),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: ?Sized> Eq for Cow<'_, B> where B: Eq + ToOwned {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: ?Sized> Ord for Cow<'_, B>
+where
+ B: Ord + ToOwned,
+{
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ Ord::cmp(&**self, &**other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, 'b, B: ?Sized, C: ?Sized> PartialEq<Cow<'b, C>> for Cow<'a, B>
+where
+ B: PartialEq<C> + ToOwned,
+ C: ToOwned,
+{
+ #[inline]
+ fn eq(&self, other: &Cow<'b, C>) -> bool {
+ PartialEq::eq(&**self, &**other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, B: ?Sized> PartialOrd for Cow<'a, B>
+where
+ B: PartialOrd + ToOwned,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &Cow<'a, B>) -> Option<Ordering> {
+ PartialOrd::partial_cmp(&**self, &**other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: ?Sized> fmt::Debug for Cow<'_, B>
+where
+ B: fmt::Debug + ToOwned<Owned: fmt::Debug>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ Borrowed(ref b) => fmt::Debug::fmt(b, f),
+ Owned(ref o) => fmt::Debug::fmt(o, f),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: ?Sized> fmt::Display for Cow<'_, B>
+where
+ B: fmt::Display + ToOwned<Owned: fmt::Display>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ Borrowed(ref b) => fmt::Display::fmt(b, f),
+ Owned(ref o) => fmt::Display::fmt(o, f),
+ }
+ }
+}
+
+#[stable(feature = "default", since = "1.11.0")]
+impl<B: ?Sized> Default for Cow<'_, B>
+where
+ B: ToOwned<Owned: Default>,
+{
+ /// Creates an owned Cow<'a, B> with the default value for the contained owned value.
+ fn default() -> Self {
+ Owned(<B as ToOwned>::Owned::default())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: ?Sized> Hash for Cow<'_, B>
+where
+ B: Hash + ToOwned,
+{
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ Hash::hash(&**self, state)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + ToOwned> AsRef<T> for Cow<'_, T> {
+ fn as_ref(&self) -> &T {
+ self
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_add", since = "1.14.0")]
+impl<'a> Add<&'a str> for Cow<'a, str> {
+ type Output = Cow<'a, str>;
+
+ #[inline]
+ fn add(mut self, rhs: &'a str) -> Self::Output {
+ self += rhs;
+ self
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_add", since = "1.14.0")]
+impl<'a> Add<Cow<'a, str>> for Cow<'a, str> {
+ type Output = Cow<'a, str>;
+
+ #[inline]
+ fn add(mut self, rhs: Cow<'a, str>) -> Self::Output {
+ self += rhs;
+ self
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_add", since = "1.14.0")]
+impl<'a> AddAssign<&'a str> for Cow<'a, str> {
+ fn add_assign(&mut self, rhs: &'a str) {
+ if self.is_empty() {
+ *self = Cow::Borrowed(rhs)
+ } else if !rhs.is_empty() {
+ if let Cow::Borrowed(lhs) = *self {
+ let mut s = String::with_capacity(lhs.len() + rhs.len());
+ s.push_str(lhs);
+ *self = Cow::Owned(s);
+ }
+ self.to_mut().push_str(rhs);
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_add", since = "1.14.0")]
+impl<'a> AddAssign<Cow<'a, str>> for Cow<'a, str> {
+ fn add_assign(&mut self, rhs: Cow<'a, str>) {
+ if self.is_empty() {
+ *self = rhs
+ } else if !rhs.is_empty() {
+ if let Cow::Borrowed(lhs) = *self {
+ let mut s = String::with_capacity(lhs.len() + rhs.len());
+ s.push_str(lhs);
+ *self = Cow::Owned(s);
+ }
+ self.to_mut().push_str(&rhs);
+ }
+ }
+}
diff --git a/rust/alloc/boxed.rs b/rust/alloc/boxed.rs
new file mode 100644
index 000000000000..8fd296421dec
--- /dev/null
+++ b/rust/alloc/boxed.rs
@@ -0,0 +1,2026 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! A pointer type for heap allocation.
+//!
+//! [`Box<T>`], casually referred to as a 'box', provides the simplest form of
+//! heap allocation in Rust. Boxes provide ownership for this allocation, and
+//! drop their contents when they go out of scope. Boxes also ensure that they
+//! never allocate more than `isize::MAX` bytes.
+//!
+//! # Examples
+//!
+//! Move a value from the stack to the heap by creating a [`Box`]:
+//!
+//! ```
+//! let val: u8 = 5;
+//! let boxed: Box<u8> = Box::new(val);
+//! ```
+//!
+//! Move a value from a [`Box`] back to the stack by [dereferencing]:
+//!
+//! ```
+//! let boxed: Box<u8> = Box::new(5);
+//! let val: u8 = *boxed;
+//! ```
+//!
+//! Creating a recursive data structure:
+//!
+//! ```
+//! #[derive(Debug)]
+//! enum List<T> {
+//! Cons(T, Box<List<T>>),
+//! Nil,
+//! }
+//!
+//! let list: List<i32> = List::Cons(1, Box::new(List::Cons(2, Box::new(List::Nil))));
+//! println!("{list:?}");
+//! ```
+//!
+//! This will print `Cons(1, Cons(2, Nil))`.
+//!
+//! Recursive structures must be boxed, because if the definition of `Cons`
+//! looked like this:
+//!
+//! ```compile_fail,E0072
+//! # enum List<T> {
+//! Cons(T, List<T>),
+//! # }
+//! ```
+//!
+//! It wouldn't work. This is because the size of a `List` depends on how many
+//! elements are in the list, and so we don't know how much memory to allocate
+//! for a `Cons`. By introducing a [`Box<T>`], which has a defined size, we know how
+//! big `Cons` needs to be.
+//!
+//! # Memory layout
+//!
+//! For non-zero-sized values, a [`Box`] will use the [`Global`] allocator for
+//! its allocation. It is valid to convert both ways between a [`Box`] and a
+//! raw pointer allocated with the [`Global`] allocator, given that the
+//! [`Layout`] used with the allocator is correct for the type. More precisely,
+//! a `value: *mut T` that has been allocated with the [`Global`] allocator
+//! with `Layout::for_value(&*value)` may be converted into a box using
+//! [`Box::<T>::from_raw(value)`]. Conversely, the memory backing a `value: *mut
+//! T` obtained from [`Box::<T>::into_raw`] may be deallocated using the
+//! [`Global`] allocator with [`Layout::for_value(&*value)`].
+//!
+//! For zero-sized values, the `Box` pointer still has to be [valid] for reads
+//! and writes and sufficiently aligned. In particular, casting any aligned
+//! non-zero integer literal to a raw pointer produces a valid pointer, but a
+//! pointer pointing into previously allocated memory that since got freed is
+//! not valid. The recommended way to build a Box to a ZST if `Box::new` cannot
+//! be used is to use [`ptr::NonNull::dangling`].
+//!
+//! So long as `T: Sized`, a `Box<T>` is guaranteed to be represented
+//! as a single pointer and is also ABI-compatible with C pointers
+//! (i.e. the C type `T*`). This means that if you have extern "C"
+//! Rust functions that will be called from C, you can define those
+//! Rust functions using `Box<T>` types, and use `T*` as corresponding
+//! type on the C side. As an example, consider this C header which
+//! declares functions that create and destroy some kind of `Foo`
+//! value:
+//!
+//! ```c
+//! /* C header */
+//!
+//! /* Returns ownership to the caller */
+//! struct Foo* foo_new(void);
+//!
+//! /* Takes ownership from the caller; no-op when invoked with null */
+//! void foo_delete(struct Foo*);
+//! ```
+//!
+//! These two functions might be implemented in Rust as follows. Here, the
+//! `struct Foo*` type from C is translated to `Box<Foo>`, which captures
+//! the ownership constraints. Note also that the nullable argument to
+//! `foo_delete` is represented in Rust as `Option<Box<Foo>>`, since `Box<Foo>`
+//! cannot be null.
+//!
+//! ```
+//! #[repr(C)]
+//! pub struct Foo;
+//!
+//! #[no_mangle]
+//! pub extern "C" fn foo_new() -> Box<Foo> {
+//! Box::new(Foo)
+//! }
+//!
+//! #[no_mangle]
+//! pub extern "C" fn foo_delete(_: Option<Box<Foo>>) {}
+//! ```
+//!
+//! Even though `Box<T>` has the same representation and C ABI as a C pointer,
+//! this does not mean that you can convert an arbitrary `T*` into a `Box<T>`
+//! and expect things to work. `Box<T>` values will always be fully aligned,
+//! non-null pointers. Moreover, the destructor for `Box<T>` will attempt to
+//! free the value with the global allocator. In general, the best practice
+//! is to only use `Box<T>` for pointers that originated from the global
+//! allocator.
+//!
+//! **Important.** At least at present, you should avoid using
+//! `Box<T>` types for functions that are defined in C but invoked
+//! from Rust. In those cases, you should directly mirror the C types
+//! as closely as possible. Using types like `Box<T>` where the C
+//! definition is just using `T*` can lead to undefined behavior, as
+//! described in [rust-lang/unsafe-code-guidelines#198][ucg#198].
+//!
+//! [ucg#198]: https://github.com/rust-lang/unsafe-code-guidelines/issues/198
+//! [dereferencing]: core::ops::Deref
+//! [`Box::<T>::from_raw(value)`]: Box::from_raw
+//! [`Global`]: crate::alloc::Global
+//! [`Layout`]: crate::alloc::Layout
+//! [`Layout::for_value(&*value)`]: crate::alloc::Layout::for_value
+//! [valid]: ptr#safety
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use core::any::Any;
+use core::async_iter::AsyncIterator;
+use core::borrow;
+use core::cmp::Ordering;
+use core::convert::{From, TryFrom};
+use core::fmt;
+use core::future::Future;
+use core::hash::{Hash, Hasher};
+#[cfg(not(no_global_oom_handling))]
+use core::iter::FromIterator;
+use core::iter::{FusedIterator, Iterator};
+use core::marker::{Destruct, Unpin, Unsize};
+use core::mem;
+use core::ops::{
+ CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Generator, GeneratorState, Receiver,
+};
+use core::pin::Pin;
+use core::ptr::{self, Unique};
+use core::task::{Context, Poll};
+
+#[cfg(not(no_global_oom_handling))]
+use crate::alloc::{handle_alloc_error, WriteCloneIntoRaw};
+use crate::alloc::{AllocError, Allocator, Global, Layout};
+#[cfg(not(no_global_oom_handling))]
+use crate::borrow::Cow;
+use crate::raw_vec::RawVec;
+#[cfg(not(no_global_oom_handling))]
+use crate::str::from_boxed_utf8_unchecked;
+#[cfg(not(no_global_oom_handling))]
+use crate::vec::Vec;
+
+#[unstable(feature = "thin_box", issue = "92791")]
+pub use thin::ThinBox;
+
+mod thin;
+
+/// A pointer type for heap allocation.
+///
+/// See the [module-level documentation](../../std/boxed/index.html) for more.
+#[lang = "owned_box"]
+#[fundamental]
+#[stable(feature = "rust1", since = "1.0.0")]
+// The declaration of the `Box` struct must be kept in sync with the
+// `alloc::alloc::box_free` function or ICEs will happen. See the comment
+// on `box_free` for more details.
+pub struct Box<
+ T: ?Sized,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+>(Unique<T>, A);
+
+impl<T> Box<T> {
+ /// Allocates memory on the heap and then places `x` into it.
+ ///
+ /// This doesn't actually allocate if `T` is zero-sized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let five = Box::new(5);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn new(x: T) -> Self {
+ box x
+ }
+
+ /// Constructs a new box with uninitialized contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let mut five = Box::<u32>::new_uninit();
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ #[inline]
+ pub fn new_uninit() -> Box<mem::MaybeUninit<T>> {
+ Self::new_uninit_in(Global)
+ }
+
+ /// Constructs a new `Box` with uninitialized contents, with the memory
+ /// being filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let zero = Box::<u32>::new_zeroed();
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0)
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub fn new_zeroed() -> Box<mem::MaybeUninit<T>> {
+ Self::new_zeroed_in(Global)
+ }
+
+ /// Constructs a new `Pin<Box<T>>`. If `T` does not implement `Unpin`, then
+ /// `x` will be pinned in memory and unable to be moved.
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "pin", since = "1.33.0")]
+ #[must_use]
+ #[inline(always)]
+ pub fn pin(x: T) -> Pin<Box<T>> {
+ (box x).into()
+ }
+
+ /// Allocates memory on the heap then places `x` into it,
+ /// returning an error if the allocation fails
+ ///
+ /// This doesn't actually allocate if `T` is zero-sized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// let five = Box::try_new(5)?;
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn try_new(x: T) -> Result<Self, AllocError> {
+ Self::try_new_in(x, Global)
+ }
+
+ /// Constructs a new box with uninitialized contents on the heap,
+ /// returning an error if the allocation fails
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// let mut five = Box::<u32>::try_new_uninit()?;
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub fn try_new_uninit() -> Result<Box<mem::MaybeUninit<T>>, AllocError> {
+ Box::try_new_uninit_in(Global)
+ }
+
+ /// Constructs a new `Box` with uninitialized contents, with the memory
+ /// being filled with `0` bytes on the heap
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// let zero = Box::<u32>::try_new_zeroed()?;
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub fn try_new_zeroed() -> Result<Box<mem::MaybeUninit<T>>, AllocError> {
+ Box::try_new_zeroed_in(Global)
+ }
+}
+
+impl<T, A: Allocator> Box<T, A> {
+ /// Allocates memory in the given allocator then places `x` into it.
+ ///
+ /// This doesn't actually allocate if `T` is zero-sized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let five = Box::new_in(5, System);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[must_use]
+ #[inline]
+ pub const fn new_in(x: T, alloc: A) -> Self
+ where
+ A: ~const Allocator + ~const Destruct,
+ {
+ let mut boxed = Self::new_uninit_in(alloc);
+ unsafe {
+ boxed.as_mut_ptr().write(x);
+ boxed.assume_init()
+ }
+ }
+
+ /// Allocates memory in the given allocator then places `x` into it,
+ /// returning an error if the allocation fails
+ ///
+ /// This doesn't actually allocate if `T` is zero-sized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let five = Box::try_new_in(5, System)?;
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[inline]
+ pub const fn try_new_in(x: T, alloc: A) -> Result<Self, AllocError>
+ where
+ T: ~const Destruct,
+ A: ~const Allocator + ~const Destruct,
+ {
+ let mut boxed = Self::try_new_uninit_in(alloc)?;
+ unsafe {
+ boxed.as_mut_ptr().write(x);
+ Ok(boxed.assume_init())
+ }
+ }
+
+ /// Constructs a new box with uninitialized contents in the provided allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut five = Box::<u32, _>::new_uninit_in(System);
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[cfg(not(no_global_oom_handling))]
+ #[must_use]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ pub const fn new_uninit_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
+ where
+ A: ~const Allocator + ~const Destruct,
+ {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ // NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
+ // That would make code size bigger.
+ match Box::try_new_uninit_in(alloc) {
+ Ok(m) => m,
+ Err(_) => handle_alloc_error(layout),
+ }
+ }
+
+ /// Constructs a new box with uninitialized contents in the provided allocator,
+ /// returning an error if the allocation fails
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut five = Box::<u32, _>::try_new_uninit_in(System)?;
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ pub const fn try_new_uninit_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
+ where
+ A: ~const Allocator + ~const Destruct,
+ {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ let ptr = alloc.allocate(layout)?.cast();
+ unsafe { Ok(Box::from_raw_in(ptr.as_ptr(), alloc)) }
+ }
+
+ /// Constructs a new `Box` with uninitialized contents, with the memory
+ /// being filled with `0` bytes in the provided allocator.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let zero = Box::<u32, _>::new_zeroed_in(System);
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0)
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[cfg(not(no_global_oom_handling))]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub const fn new_zeroed_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
+ where
+ A: ~const Allocator + ~const Destruct,
+ {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ // NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
+ // That would make code size bigger.
+ match Box::try_new_zeroed_in(alloc) {
+ Ok(m) => m,
+ Err(_) => handle_alloc_error(layout),
+ }
+ }
+
+ /// Constructs a new `Box` with uninitialized contents, with the memory
+ /// being filled with `0` bytes in the provided allocator,
+ /// returning an error if the allocation fails,
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let zero = Box::<u32, _>::try_new_zeroed_in(System)?;
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ pub const fn try_new_zeroed_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
+ where
+ A: ~const Allocator + ~const Destruct,
+ {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ let ptr = alloc.allocate_zeroed(layout)?.cast();
+ unsafe { Ok(Box::from_raw_in(ptr.as_ptr(), alloc)) }
+ }
+
+ /// Constructs a new `Pin<Box<T, A>>`. If `T` does not implement `Unpin`, then
+ /// `x` will be pinned in memory and unable to be moved.
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[must_use]
+ #[inline(always)]
+ pub const fn pin_in(x: T, alloc: A) -> Pin<Self>
+ where
+ A: 'static + ~const Allocator + ~const Destruct,
+ {
+ Self::into_pin(Self::new_in(x, alloc))
+ }
+
+ /// Converts a `Box<T>` into a `Box<[T]>`
+ ///
+ /// This conversion does not allocate on the heap and happens in place.
+ #[unstable(feature = "box_into_boxed_slice", issue = "71582")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ pub const fn into_boxed_slice(boxed: Self) -> Box<[T], A> {
+ let (raw, alloc) = Box::into_raw_with_allocator(boxed);
+ unsafe { Box::from_raw_in(raw as *mut [T; 1], alloc) }
+ }
+
+ /// Consumes the `Box`, returning the wrapped value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(box_into_inner)]
+ ///
+ /// let c = Box::new(5);
+ ///
+ /// assert_eq!(Box::into_inner(c), 5);
+ /// ```
+ #[unstable(feature = "box_into_inner", issue = "80437")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[inline]
+ pub const fn into_inner(boxed: Self) -> T
+ where
+ Self: ~const Destruct,
+ {
+ *boxed
+ }
+}
+
+impl<T> Box<[T]> {
+ /// Constructs a new boxed slice with uninitialized contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let mut values = Box::<[u32]>::new_uninit_slice(3);
+ ///
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// values[0].as_mut_ptr().write(1);
+ /// values[1].as_mut_ptr().write(2);
+ /// values[2].as_mut_ptr().write(3);
+ ///
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub fn new_uninit_slice(len: usize) -> Box<[mem::MaybeUninit<T>]> {
+ unsafe { RawVec::with_capacity(len).into_box(len) }
+ }
+
+ /// Constructs a new boxed slice with uninitialized contents, with the memory
+ /// being filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let values = Box::<[u32]>::new_zeroed_slice(3);
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [0, 0, 0])
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub fn new_zeroed_slice(len: usize) -> Box<[mem::MaybeUninit<T>]> {
+ unsafe { RawVec::with_capacity_zeroed(len).into_box(len) }
+ }
+
+ /// Constructs a new boxed slice with uninitialized contents. Returns an error if
+ /// the allocation fails
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// let mut values = Box::<[u32]>::try_new_uninit_slice(3)?;
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// values[0].as_mut_ptr().write(1);
+ /// values[1].as_mut_ptr().write(2);
+ /// values[2].as_mut_ptr().write(3);
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3]);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn try_new_uninit_slice(len: usize) -> Result<Box<[mem::MaybeUninit<T>]>, AllocError> {
+ unsafe {
+ let layout = match Layout::array::<mem::MaybeUninit<T>>(len) {
+ Ok(l) => l,
+ Err(_) => return Err(AllocError),
+ };
+ let ptr = Global.allocate(layout)?;
+ Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len))
+ }
+ }
+
+ /// Constructs a new boxed slice with uninitialized contents, with the memory
+ /// being filled with `0` bytes. Returns an error if the allocation fails
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// let values = Box::<[u32]>::try_new_zeroed_slice(3)?;
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [0, 0, 0]);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn try_new_zeroed_slice(len: usize) -> Result<Box<[mem::MaybeUninit<T>]>, AllocError> {
+ unsafe {
+ let layout = match Layout::array::<mem::MaybeUninit<T>>(len) {
+ Ok(l) => l,
+ Err(_) => return Err(AllocError),
+ };
+ let ptr = Global.allocate_zeroed(layout)?;
+ Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len))
+ }
+ }
+}
+
+impl<T, A: Allocator> Box<[T], A> {
+ /// Constructs a new boxed slice with uninitialized contents in the provided allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut values = Box::<[u32], _>::new_uninit_slice_in(3, System);
+ ///
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// values[0].as_mut_ptr().write(1);
+ /// values[1].as_mut_ptr().write(2);
+ /// values[2].as_mut_ptr().write(3);
+ ///
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub fn new_uninit_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit<T>], A> {
+ unsafe { RawVec::with_capacity_in(len, alloc).into_box(len) }
+ }
+
+ /// Constructs a new boxed slice with uninitialized contents in the provided allocator,
+ /// with the memory being filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let values = Box::<[u32], _>::new_zeroed_slice_in(3, System);
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [0, 0, 0])
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit<T>], A> {
+ unsafe { RawVec::with_capacity_zeroed_in(len, alloc).into_box(len) }
+ }
+}
+
+impl<T, A: Allocator> Box<mem::MaybeUninit<T>, A> {
+ /// Converts to `Box<T, A>`.
+ ///
+ /// # Safety
+ ///
+ /// As with [`MaybeUninit::assume_init`],
+ /// it is up to the caller to guarantee that the value
+ /// really is in an initialized state.
+ /// Calling this when the content is not yet fully initialized
+ /// causes immediate undefined behavior.
+ ///
+ /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let mut five = Box::<u32>::new_uninit();
+ ///
+ /// let five: Box<u32> = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[inline]
+ pub const unsafe fn assume_init(self) -> Box<T, A> {
+ let (raw, alloc) = Box::into_raw_with_allocator(self);
+ unsafe { Box::from_raw_in(raw as *mut T, alloc) }
+ }
+
+ /// Writes the value and converts to `Box<T, A>`.
+ ///
+ /// This method converts the box similarly to [`Box::assume_init`] but
+ /// writes `value` into it before conversion thus guaranteeing safety.
+ /// In some scenarios use of this method may improve performance because
+ /// the compiler may be able to optimize copying from stack.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let big_box = Box::<[usize; 1024]>::new_uninit();
+ ///
+ /// let mut array = [0; 1024];
+ /// for (i, place) in array.iter_mut().enumerate() {
+ /// *place = i;
+ /// }
+ ///
+ /// // The optimizer may be able to elide this copy, so previous code writes
+ /// // to heap directly.
+ /// let big_box = Box::write(big_box, array);
+ ///
+ /// for (i, x) in big_box.iter().enumerate() {
+ /// assert_eq!(*x, i);
+ /// }
+ /// ```
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[inline]
+ pub const fn write(mut boxed: Self, value: T) -> Box<T, A> {
+ unsafe {
+ (*boxed).write(value);
+ boxed.assume_init()
+ }
+ }
+}
+
+impl<T, A: Allocator> Box<[mem::MaybeUninit<T>], A> {
+ /// Converts to `Box<[T], A>`.
+ ///
+ /// # Safety
+ ///
+ /// As with [`MaybeUninit::assume_init`],
+ /// it is up to the caller to guarantee that the values
+ /// really are in an initialized state.
+ /// Calling this when the content is not yet fully initialized
+ /// causes immediate undefined behavior.
+ ///
+ /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let mut values = Box::<[u32]>::new_uninit_slice(3);
+ ///
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// values[0].as_mut_ptr().write(1);
+ /// values[1].as_mut_ptr().write(2);
+ /// values[2].as_mut_ptr().write(3);
+ ///
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub unsafe fn assume_init(self) -> Box<[T], A> {
+ let (raw, alloc) = Box::into_raw_with_allocator(self);
+ unsafe { Box::from_raw_in(raw as *mut [T], alloc) }
+ }
+}
+
+impl<T: ?Sized> Box<T> {
+ /// Constructs a box from a raw pointer.
+ ///
+ /// After calling this function, the raw pointer is owned by the
+ /// resulting `Box`. Specifically, the `Box` destructor will call
+ /// the destructor of `T` and free the allocated memory. For this
+ /// to be safe, the memory must have been allocated in accordance
+ /// with the [memory layout] used by `Box` .
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because improper use may lead to
+ /// memory problems. For example, a double-free may occur if the
+ /// function is called twice on the same raw pointer.
+ ///
+ /// The safety conditions are described in the [memory layout] section.
+ ///
+ /// # Examples
+ ///
+ /// Recreate a `Box` which was previously converted to a raw pointer
+ /// using [`Box::into_raw`]:
+ /// ```
+ /// let x = Box::new(5);
+ /// let ptr = Box::into_raw(x);
+ /// let x = unsafe { Box::from_raw(ptr) };
+ /// ```
+ /// Manually create a `Box` from scratch by using the global allocator:
+ /// ```
+ /// use std::alloc::{alloc, Layout};
+ ///
+ /// unsafe {
+ /// let ptr = alloc(Layout::new::<i32>()) as *mut i32;
+ /// // In general .write is required to avoid attempting to destruct
+ /// // the (uninitialized) previous contents of `ptr`, though for this
+ /// // simple example `*ptr = 5` would have worked as well.
+ /// ptr.write(5);
+ /// let x = Box::from_raw(ptr);
+ /// }
+ /// ```
+ ///
+ /// [memory layout]: self#memory-layout
+ /// [`Layout`]: crate::Layout
+ #[stable(feature = "box_raw", since = "1.4.0")]
+ #[inline]
+ pub unsafe fn from_raw(raw: *mut T) -> Self {
+ unsafe { Self::from_raw_in(raw, Global) }
+ }
+}
+
+impl<T: ?Sized, A: Allocator> Box<T, A> {
+ /// Constructs a box from a raw pointer in the given allocator.
+ ///
+ /// After calling this function, the raw pointer is owned by the
+ /// resulting `Box`. Specifically, the `Box` destructor will call
+ /// the destructor of `T` and free the allocated memory. For this
+ /// to be safe, the memory must have been allocated in accordance
+ /// with the [memory layout] used by `Box` .
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because improper use may lead to
+ /// memory problems. For example, a double-free may occur if the
+ /// function is called twice on the same raw pointer.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Recreate a `Box` which was previously converted to a raw pointer
+ /// using [`Box::into_raw_with_allocator`]:
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let x = Box::new_in(5, System);
+ /// let (ptr, alloc) = Box::into_raw_with_allocator(x);
+ /// let x = unsafe { Box::from_raw_in(ptr, alloc) };
+ /// ```
+ /// Manually create a `Box` from scratch by using the system allocator:
+ /// ```
+ /// #![feature(allocator_api, slice_ptr_get)]
+ ///
+ /// use std::alloc::{Allocator, Layout, System};
+ ///
+ /// unsafe {
+ /// let ptr = System.allocate(Layout::new::<i32>())?.as_mut_ptr() as *mut i32;
+ /// // In general .write is required to avoid attempting to destruct
+ /// // the (uninitialized) previous contents of `ptr`, though for this
+ /// // simple example `*ptr = 5` would have worked as well.
+ /// ptr.write(5);
+ /// let x = Box::from_raw_in(ptr, System);
+ /// }
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [memory layout]: self#memory-layout
+ /// [`Layout`]: crate::Layout
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[inline]
+ pub const unsafe fn from_raw_in(raw: *mut T, alloc: A) -> Self {
+ Box(unsafe { Unique::new_unchecked(raw) }, alloc)
+ }
+
+ /// Consumes the `Box`, returning a wrapped raw pointer.
+ ///
+ /// The pointer will be properly aligned and non-null.
+ ///
+ /// After calling this function, the caller is responsible for the
+ /// memory previously managed by the `Box`. In particular, the
+ /// caller should properly destroy `T` and release the memory, taking
+ /// into account the [memory layout] used by `Box`. The easiest way to
+ /// do this is to convert the raw pointer back into a `Box` with the
+ /// [`Box::from_raw`] function, allowing the `Box` destructor to perform
+ /// the cleanup.
+ ///
+ /// Note: this is an associated function, which means that you have
+ /// to call it as `Box::into_raw(b)` instead of `b.into_raw()`. This
+ /// is so that there is no conflict with a method on the inner type.
+ ///
+ /// # Examples
+ /// Converting the raw pointer back into a `Box` with [`Box::from_raw`]
+ /// for automatic cleanup:
+ /// ```
+ /// let x = Box::new(String::from("Hello"));
+ /// let ptr = Box::into_raw(x);
+ /// let x = unsafe { Box::from_raw(ptr) };
+ /// ```
+ /// Manual cleanup by explicitly running the destructor and deallocating
+ /// the memory:
+ /// ```
+ /// use std::alloc::{dealloc, Layout};
+ /// use std::ptr;
+ ///
+ /// let x = Box::new(String::from("Hello"));
+ /// let p = Box::into_raw(x);
+ /// unsafe {
+ /// ptr::drop_in_place(p);
+ /// dealloc(p as *mut u8, Layout::new::<String>());
+ /// }
+ /// ```
+ ///
+ /// [memory layout]: self#memory-layout
+ #[stable(feature = "box_raw", since = "1.4.0")]
+ #[inline]
+ pub fn into_raw(b: Self) -> *mut T {
+ Self::into_raw_with_allocator(b).0
+ }
+
+ /// Consumes the `Box`, returning a wrapped raw pointer and the allocator.
+ ///
+ /// The pointer will be properly aligned and non-null.
+ ///
+ /// After calling this function, the caller is responsible for the
+ /// memory previously managed by the `Box`. In particular, the
+ /// caller should properly destroy `T` and release the memory, taking
+ /// into account the [memory layout] used by `Box`. The easiest way to
+ /// do this is to convert the raw pointer back into a `Box` with the
+ /// [`Box::from_raw_in`] function, allowing the `Box` destructor to perform
+ /// the cleanup.
+ ///
+ /// Note: this is an associated function, which means that you have
+ /// to call it as `Box::into_raw_with_allocator(b)` instead of `b.into_raw_with_allocator()`. This
+ /// is so that there is no conflict with a method on the inner type.
+ ///
+ /// # Examples
+ /// Converting the raw pointer back into a `Box` with [`Box::from_raw_in`]
+ /// for automatic cleanup:
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let x = Box::new_in(String::from("Hello"), System);
+ /// let (ptr, alloc) = Box::into_raw_with_allocator(x);
+ /// let x = unsafe { Box::from_raw_in(ptr, alloc) };
+ /// ```
+ /// Manual cleanup by explicitly running the destructor and deallocating
+ /// the memory:
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::{Allocator, Layout, System};
+ /// use std::ptr::{self, NonNull};
+ ///
+ /// let x = Box::new_in(String::from("Hello"), System);
+ /// let (ptr, alloc) = Box::into_raw_with_allocator(x);
+ /// unsafe {
+ /// ptr::drop_in_place(ptr);
+ /// let non_null = NonNull::new_unchecked(ptr);
+ /// alloc.deallocate(non_null.cast(), Layout::new::<String>());
+ /// }
+ /// ```
+ ///
+ /// [memory layout]: self#memory-layout
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[inline]
+ pub const fn into_raw_with_allocator(b: Self) -> (*mut T, A) {
+ let (leaked, alloc) = Box::into_unique(b);
+ (leaked.as_ptr(), alloc)
+ }
+
+ #[unstable(
+ feature = "ptr_internals",
+ issue = "none",
+ reason = "use `Box::leak(b).into()` or `Unique::from(Box::leak(b))` instead"
+ )]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[inline]
+ #[doc(hidden)]
+ pub const fn into_unique(b: Self) -> (Unique<T>, A) {
+ // Box is recognized as a "unique pointer" by Stacked Borrows, but internally it is a
+ // raw pointer for the type system. Turning it directly into a raw pointer would not be
+ // recognized as "releasing" the unique pointer to permit aliased raw accesses,
+ // so all raw pointer methods have to go through `Box::leak`. Turning *that* to a raw pointer
+ // behaves correctly.
+ let alloc = unsafe { ptr::read(&b.1) };
+ (Unique::from(Box::leak(b)), alloc)
+ }
+
+ /// Returns a reference to the underlying allocator.
+ ///
+ /// Note: this is an associated function, which means that you have
+ /// to call it as `Box::allocator(&b)` instead of `b.allocator()`. This
+ /// is so that there is no conflict with a method on the inner type.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[inline]
+ pub const fn allocator(b: &Self) -> &A {
+ &b.1
+ }
+
+ /// Consumes and leaks the `Box`, returning a mutable reference,
+ /// `&'a mut T`. Note that the type `T` must outlive the chosen lifetime
+ /// `'a`. If the type has only static references, or none at all, then this
+ /// may be chosen to be `'static`.
+ ///
+ /// This function is mainly useful for data that lives for the remainder of
+ /// the program's life. Dropping the returned reference will cause a memory
+ /// leak. If this is not acceptable, the reference should first be wrapped
+ /// with the [`Box::from_raw`] function producing a `Box`. This `Box` can
+ /// then be dropped which will properly destroy `T` and release the
+ /// allocated memory.
+ ///
+ /// Note: this is an associated function, which means that you have
+ /// to call it as `Box::leak(b)` instead of `b.leak()`. This
+ /// is so that there is no conflict with a method on the inner type.
+ ///
+ /// # Examples
+ ///
+ /// Simple usage:
+ ///
+ /// ```
+ /// let x = Box::new(41);
+ /// let static_ref: &'static mut usize = Box::leak(x);
+ /// *static_ref += 1;
+ /// assert_eq!(*static_ref, 42);
+ /// ```
+ ///
+ /// Unsized data:
+ ///
+ /// ```
+ /// let x = vec![1, 2, 3].into_boxed_slice();
+ /// let static_ref = Box::leak(x);
+ /// static_ref[0] = 4;
+ /// assert_eq!(*static_ref, [4, 2, 3]);
+ /// ```
+ #[stable(feature = "box_leak", since = "1.26.0")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[inline]
+ pub const fn leak<'a>(b: Self) -> &'a mut T
+ where
+ A: 'a,
+ {
+ unsafe { &mut *mem::ManuallyDrop::new(b).0.as_ptr() }
+ }
+
+ /// Converts a `Box<T>` into a `Pin<Box<T>>`
+ ///
+ /// This conversion does not allocate on the heap and happens in place.
+ ///
+ /// This is also available via [`From`].
+ #[unstable(feature = "box_into_pin", issue = "62370")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ pub const fn into_pin(boxed: Self) -> Pin<Self>
+ where
+ A: 'static,
+ {
+ // It's not possible to move or replace the insides of a `Pin<Box<T>>`
+ // when `T: !Unpin`, so it's safe to pin it directly without any
+ // additional requirements.
+ unsafe { Pin::new_unchecked(boxed) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Box<T, A> {
+ fn drop(&mut self) {
+ // FIXME: Do nothing, drop is currently performed by compiler.
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Default> Default for Box<T> {
+ /// Creates a `Box<T>`, with the `Default` value for T.
+ fn default() -> Self {
+ box T::default()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+impl<T> const Default for Box<[T]> {
+ fn default() -> Self {
+ let ptr: Unique<[T]> = Unique::<[T; 0]>::dangling();
+ Box(ptr, Global)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "default_box_extra", since = "1.17.0")]
+#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+impl const Default for Box<str> {
+ fn default() -> Self {
+ // SAFETY: This is the same as `Unique::cast<U>` but with an unsized `U = str`.
+ let ptr: Unique<str> = unsafe {
+ let bytes: Unique<[u8]> = Unique::<[u8; 0]>::dangling();
+ Unique::new_unchecked(bytes.as_ptr() as *mut str)
+ };
+ Box(ptr, Global)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone, A: Allocator + Clone> Clone for Box<T, A> {
+ /// Returns a new box with a `clone()` of this box's contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Box::new(5);
+ /// let y = x.clone();
+ ///
+ /// // The value is the same
+ /// assert_eq!(x, y);
+ ///
+ /// // But they are unique objects
+ /// assert_ne!(&*x as *const i32, &*y as *const i32);
+ /// ```
+ #[inline]
+ fn clone(&self) -> Self {
+ // Pre-allocate memory to allow writing the cloned value directly.
+ let mut boxed = Self::new_uninit_in(self.1.clone());
+ unsafe {
+ (**self).write_clone_into_raw(boxed.as_mut_ptr());
+ boxed.assume_init()
+ }
+ }
+
+ /// Copies `source`'s contents into `self` without creating a new allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Box::new(5);
+ /// let mut y = Box::new(10);
+ /// let yp: *const i32 = &*y;
+ ///
+ /// y.clone_from(&x);
+ ///
+ /// // The value is the same
+ /// assert_eq!(x, y);
+ ///
+ /// // And no allocation occurred
+ /// assert_eq!(yp, &*y);
+ /// ```
+ #[inline]
+ fn clone_from(&mut self, source: &Self) {
+ (**self).clone_from(&(**source));
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_slice_clone", since = "1.3.0")]
+impl Clone for Box<str> {
+ fn clone(&self) -> Self {
+ // this makes a copy of the data
+ let buf: Box<[u8]> = self.as_bytes().into();
+ unsafe { from_boxed_utf8_unchecked(buf) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for Box<T, A> {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ PartialEq::eq(&**self, &**other)
+ }
+ #[inline]
+ fn ne(&self, other: &Self) -> bool {
+ PartialEq::ne(&**self, &**other)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for Box<T, A> {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ PartialOrd::partial_cmp(&**self, &**other)
+ }
+ #[inline]
+ fn lt(&self, other: &Self) -> bool {
+ PartialOrd::lt(&**self, &**other)
+ }
+ #[inline]
+ fn le(&self, other: &Self) -> bool {
+ PartialOrd::le(&**self, &**other)
+ }
+ #[inline]
+ fn ge(&self, other: &Self) -> bool {
+ PartialOrd::ge(&**self, &**other)
+ }
+ #[inline]
+ fn gt(&self, other: &Self) -> bool {
+ PartialOrd::gt(&**self, &**other)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Ord, A: Allocator> Ord for Box<T, A> {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ Ord::cmp(&**self, &**other)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Eq, A: Allocator> Eq for Box<T, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Hash, A: Allocator> Hash for Box<T, A> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ (**self).hash(state);
+ }
+}
+
+#[stable(feature = "indirect_hasher_impl", since = "1.22.0")]
+impl<T: ?Sized + Hasher, A: Allocator> Hasher for Box<T, A> {
+ fn finish(&self) -> u64 {
+ (**self).finish()
+ }
+ fn write(&mut self, bytes: &[u8]) {
+ (**self).write(bytes)
+ }
+ fn write_u8(&mut self, i: u8) {
+ (**self).write_u8(i)
+ }
+ fn write_u16(&mut self, i: u16) {
+ (**self).write_u16(i)
+ }
+ fn write_u32(&mut self, i: u32) {
+ (**self).write_u32(i)
+ }
+ fn write_u64(&mut self, i: u64) {
+ (**self).write_u64(i)
+ }
+ fn write_u128(&mut self, i: u128) {
+ (**self).write_u128(i)
+ }
+ fn write_usize(&mut self, i: usize) {
+ (**self).write_usize(i)
+ }
+ fn write_i8(&mut self, i: i8) {
+ (**self).write_i8(i)
+ }
+ fn write_i16(&mut self, i: i16) {
+ (**self).write_i16(i)
+ }
+ fn write_i32(&mut self, i: i32) {
+ (**self).write_i32(i)
+ }
+ fn write_i64(&mut self, i: i64) {
+ (**self).write_i64(i)
+ }
+ fn write_i128(&mut self, i: i128) {
+ (**self).write_i128(i)
+ }
+ fn write_isize(&mut self, i: isize) {
+ (**self).write_isize(i)
+ }
+ fn write_length_prefix(&mut self, len: usize) {
+ (**self).write_length_prefix(len)
+ }
+ fn write_str(&mut self, s: &str) {
+ (**self).write_str(s)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "from_for_ptrs", since = "1.6.0")]
+impl<T> From<T> for Box<T> {
+ /// Converts a `T` into a `Box<T>`
+ ///
+ /// The conversion allocates on the heap and moves `t`
+ /// from the stack into it.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// let x = 5;
+ /// let boxed = Box::new(5);
+ ///
+ /// assert_eq!(Box::from(x), boxed);
+ /// ```
+ fn from(t: T) -> Self {
+ Box::new(t)
+ }
+}
+
+#[stable(feature = "pin", since = "1.33.0")]
+#[rustc_const_unstable(feature = "const_box", issue = "92521")]
+impl<T: ?Sized, A: Allocator> const From<Box<T, A>> for Pin<Box<T, A>>
+where
+ A: 'static,
+{
+ /// Converts a `Box<T>` into a `Pin<Box<T>>`
+ ///
+ /// This conversion does not allocate on the heap and happens in place.
+ fn from(boxed: Box<T, A>) -> Self {
+ Box::into_pin(boxed)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_from_slice", since = "1.17.0")]
+impl<T: Copy> From<&[T]> for Box<[T]> {
+ /// Converts a `&[T]` into a `Box<[T]>`
+ ///
+ /// This conversion allocates on the heap
+ /// and performs a copy of `slice`.
+ ///
+ /// # Examples
+ /// ```rust
+ /// // create a &[u8] which will be used to create a Box<[u8]>
+ /// let slice: &[u8] = &[104, 101, 108, 108, 111];
+ /// let boxed_slice: Box<[u8]> = Box::from(slice);
+ ///
+ /// println!("{boxed_slice:?}");
+ /// ```
+ fn from(slice: &[T]) -> Box<[T]> {
+ let len = slice.len();
+ let buf = RawVec::with_capacity(len);
+ unsafe {
+ ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len);
+ buf.into_box(slice.len()).assume_init()
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_from_cow", since = "1.45.0")]
+impl<T: Copy> From<Cow<'_, [T]>> for Box<[T]> {
+ /// Converts a `Cow<'_, [T]>` into a `Box<[T]>`
+ ///
+ /// When `cow` is the `Cow::Borrowed` variant, this
+ /// conversion allocates on the heap and copies the
+ /// underlying slice. Otherwise, it will try to reuse the owned
+ /// `Vec`'s allocation.
+ #[inline]
+ fn from(cow: Cow<'_, [T]>) -> Box<[T]> {
+ match cow {
+ Cow::Borrowed(slice) => Box::from(slice),
+ Cow::Owned(slice) => Box::from(slice),
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_from_slice", since = "1.17.0")]
+impl From<&str> for Box<str> {
+ /// Converts a `&str` into a `Box<str>`
+ ///
+ /// This conversion allocates on the heap
+ /// and performs a copy of `s`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// let boxed: Box<str> = Box::from("hello");
+ /// println!("{boxed}");
+ /// ```
+ #[inline]
+ fn from(s: &str) -> Box<str> {
+ unsafe { from_boxed_utf8_unchecked(Box::from(s.as_bytes())) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_from_cow", since = "1.45.0")]
+impl From<Cow<'_, str>> for Box<str> {
+ /// Converts a `Cow<'_, str>` into a `Box<str>`
+ ///
+ /// When `cow` is the `Cow::Borrowed` variant, this
+ /// conversion allocates on the heap and copies the
+ /// underlying `str`. Otherwise, it will try to reuse the owned
+ /// `String`'s allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::borrow::Cow;
+ ///
+ /// let unboxed = Cow::Borrowed("hello");
+ /// let boxed: Box<str> = Box::from(unboxed);
+ /// println!("{boxed}");
+ /// ```
+ ///
+ /// ```rust
+ /// # use std::borrow::Cow;
+ /// let unboxed = Cow::Owned("hello".to_string());
+ /// let boxed: Box<str> = Box::from(unboxed);
+ /// println!("{boxed}");
+ /// ```
+ #[inline]
+ fn from(cow: Cow<'_, str>) -> Box<str> {
+ match cow {
+ Cow::Borrowed(s) => Box::from(s),
+ Cow::Owned(s) => Box::from(s),
+ }
+ }
+}
+
+#[stable(feature = "boxed_str_conv", since = "1.19.0")]
+impl<A: Allocator> From<Box<str, A>> for Box<[u8], A> {
+ /// Converts a `Box<str>` into a `Box<[u8]>`
+ ///
+ /// This conversion does not allocate on the heap and happens in place.
+ ///
+ /// # Examples
+ /// ```rust
+ /// // create a Box<str> which will be used to create a Box<[u8]>
+ /// let boxed: Box<str> = Box::from("hello");
+ /// let boxed_str: Box<[u8]> = Box::from(boxed);
+ ///
+ /// // create a &[u8] which will be used to create a Box<[u8]>
+ /// let slice: &[u8] = &[104, 101, 108, 108, 111];
+ /// let boxed_slice = Box::from(slice);
+ ///
+ /// assert_eq!(boxed_slice, boxed_str);
+ /// ```
+ #[inline]
+ fn from(s: Box<str, A>) -> Self {
+ let (raw, alloc) = Box::into_raw_with_allocator(s);
+ unsafe { Box::from_raw_in(raw as *mut [u8], alloc) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_from_array", since = "1.45.0")]
+impl<T, const N: usize> From<[T; N]> for Box<[T]> {
+ /// Converts a `[T; N]` into a `Box<[T]>`
+ ///
+ /// This conversion moves the array to newly heap-allocated memory.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// let boxed: Box<[u8]> = Box::from([4, 2]);
+ /// println!("{boxed:?}");
+ /// ```
+ fn from(array: [T; N]) -> Box<[T]> {
+ box array
+ }
+}
+
+#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
+impl<T, const N: usize> TryFrom<Box<[T]>> for Box<[T; N]> {
+ type Error = Box<[T]>;
+
+ /// Attempts to convert a `Box<[T]>` into a `Box<[T; N]>`.
+ ///
+ /// The conversion occurs in-place and does not require a
+ /// new memory allocation.
+ ///
+ /// # Errors
+ ///
+ /// Returns the old `Box<[T]>` in the `Err` variant if
+ /// `boxed_slice.len()` does not equal `N`.
+ fn try_from(boxed_slice: Box<[T]>) -> Result<Self, Self::Error> {
+ if boxed_slice.len() == N {
+ Ok(unsafe { Box::from_raw(Box::into_raw(boxed_slice) as *mut [T; N]) })
+ } else {
+ Err(boxed_slice)
+ }
+ }
+}
+
+impl<A: Allocator> Box<dyn Any, A> {
+ /// Attempt to downcast the box to a concrete type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn print_if_string(value: Box<dyn Any>) {
+ /// if let Ok(string) = value.downcast::<String>() {
+ /// println!("String ({}): {}", string.len(), string);
+ /// }
+ /// }
+ ///
+ /// let my_string = "Hello World".to_string();
+ /// print_if_string(Box::new(my_string));
+ /// print_if_string(Box::new(0i8));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn downcast<T: Any>(self) -> Result<Box<T, A>, Self> {
+ if self.is::<T>() { unsafe { Ok(self.downcast_unchecked::<T>()) } } else { Err(self) }
+ }
+
+ /// Downcasts the box to a concrete type.
+ ///
+ /// For a safe alternative see [`downcast`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(downcast_unchecked)]
+ ///
+ /// use std::any::Any;
+ ///
+ /// let x: Box<dyn Any> = Box::new(1_usize);
+ ///
+ /// unsafe {
+ /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
+ /// }
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// The contained value must be of type `T`. Calling this method
+ /// with the incorrect type is *undefined behavior*.
+ ///
+ /// [`downcast`]: Self::downcast
+ #[inline]
+ #[unstable(feature = "downcast_unchecked", issue = "90850")]
+ pub unsafe fn downcast_unchecked<T: Any>(self) -> Box<T, A> {
+ debug_assert!(self.is::<T>());
+ unsafe {
+ let (raw, alloc): (*mut dyn Any, _) = Box::into_raw_with_allocator(self);
+ Box::from_raw_in(raw as *mut T, alloc)
+ }
+ }
+}
+
+impl<A: Allocator> Box<dyn Any + Send, A> {
+ /// Attempt to downcast the box to a concrete type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn print_if_string(value: Box<dyn Any + Send>) {
+ /// if let Ok(string) = value.downcast::<String>() {
+ /// println!("String ({}): {}", string.len(), string);
+ /// }
+ /// }
+ ///
+ /// let my_string = "Hello World".to_string();
+ /// print_if_string(Box::new(my_string));
+ /// print_if_string(Box::new(0i8));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn downcast<T: Any>(self) -> Result<Box<T, A>, Self> {
+ if self.is::<T>() { unsafe { Ok(self.downcast_unchecked::<T>()) } } else { Err(self) }
+ }
+
+ /// Downcasts the box to a concrete type.
+ ///
+ /// For a safe alternative see [`downcast`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(downcast_unchecked)]
+ ///
+ /// use std::any::Any;
+ ///
+ /// let x: Box<dyn Any + Send> = Box::new(1_usize);
+ ///
+ /// unsafe {
+ /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
+ /// }
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// The contained value must be of type `T`. Calling this method
+ /// with the incorrect type is *undefined behavior*.
+ ///
+ /// [`downcast`]: Self::downcast
+ #[inline]
+ #[unstable(feature = "downcast_unchecked", issue = "90850")]
+ pub unsafe fn downcast_unchecked<T: Any>(self) -> Box<T, A> {
+ debug_assert!(self.is::<T>());
+ unsafe {
+ let (raw, alloc): (*mut (dyn Any + Send), _) = Box::into_raw_with_allocator(self);
+ Box::from_raw_in(raw as *mut T, alloc)
+ }
+ }
+}
+
+impl<A: Allocator> Box<dyn Any + Send + Sync, A> {
+ /// Attempt to downcast the box to a concrete type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn print_if_string(value: Box<dyn Any + Send + Sync>) {
+ /// if let Ok(string) = value.downcast::<String>() {
+ /// println!("String ({}): {}", string.len(), string);
+ /// }
+ /// }
+ ///
+ /// let my_string = "Hello World".to_string();
+ /// print_if_string(Box::new(my_string));
+ /// print_if_string(Box::new(0i8));
+ /// ```
+ #[inline]
+ #[stable(feature = "box_send_sync_any_downcast", since = "1.51.0")]
+ pub fn downcast<T: Any>(self) -> Result<Box<T, A>, Self> {
+ if self.is::<T>() { unsafe { Ok(self.downcast_unchecked::<T>()) } } else { Err(self) }
+ }
+
+ /// Downcasts the box to a concrete type.
+ ///
+ /// For a safe alternative see [`downcast`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(downcast_unchecked)]
+ ///
+ /// use std::any::Any;
+ ///
+ /// let x: Box<dyn Any + Send + Sync> = Box::new(1_usize);
+ ///
+ /// unsafe {
+ /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
+ /// }
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// The contained value must be of type `T`. Calling this method
+ /// with the incorrect type is *undefined behavior*.
+ ///
+ /// [`downcast`]: Self::downcast
+ #[inline]
+ #[unstable(feature = "downcast_unchecked", issue = "90850")]
+ pub unsafe fn downcast_unchecked<T: Any>(self) -> Box<T, A> {
+ debug_assert!(self.is::<T>());
+ unsafe {
+ let (raw, alloc): (*mut (dyn Any + Send + Sync), _) =
+ Box::into_raw_with_allocator(self);
+ Box::from_raw_in(raw as *mut T, alloc)
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Display + ?Sized, A: Allocator> fmt::Display for Box<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Debug + ?Sized, A: Allocator> fmt::Debug for Box<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized, A: Allocator> fmt::Pointer for Box<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // It's not possible to extract the inner Uniq directly from the Box,
+ // instead we cast it to a *const which aliases the Unique
+ let ptr: *const T = &**self;
+ fmt::Pointer::fmt(&ptr, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_box", issue = "92521")]
+impl<T: ?Sized, A: Allocator> const Deref for Box<T, A> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_box", issue = "92521")]
+impl<T: ?Sized, A: Allocator> const DerefMut for Box<T, A> {
+ fn deref_mut(&mut self) -> &mut T {
+ &mut **self
+ }
+}
+
+#[unstable(feature = "receiver_trait", issue = "none")]
+impl<T: ?Sized, A: Allocator> Receiver for Box<T, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator + ?Sized, A: Allocator> Iterator for Box<I, A> {
+ type Item = I::Item;
+ fn next(&mut self) -> Option<I::Item> {
+ (**self).next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (**self).size_hint()
+ }
+ fn nth(&mut self, n: usize) -> Option<I::Item> {
+ (**self).nth(n)
+ }
+ fn last(self) -> Option<I::Item> {
+ BoxIter::last(self)
+ }
+}
+
+trait BoxIter {
+ type Item;
+ fn last(self) -> Option<Self::Item>;
+}
+
+impl<I: Iterator + ?Sized, A: Allocator> BoxIter for Box<I, A> {
+ type Item = I::Item;
+ default fn last(self) -> Option<I::Item> {
+ #[inline]
+ fn some<T>(_: Option<T>, x: T) -> Option<T> {
+ Some(x)
+ }
+
+ self.fold(None, some)
+ }
+}
+
+/// Specialization for sized `I`s that uses `I`s implementation of `last()`
+/// instead of the default.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator, A: Allocator> BoxIter for Box<I, A> {
+ fn last(self) -> Option<I::Item> {
+ (*self).last()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: DoubleEndedIterator + ?Sized, A: Allocator> DoubleEndedIterator for Box<I, A> {
+ fn next_back(&mut self) -> Option<I::Item> {
+ (**self).next_back()
+ }
+ fn nth_back(&mut self, n: usize) -> Option<I::Item> {
+ (**self).nth_back(n)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: ExactSizeIterator + ?Sized, A: Allocator> ExactSizeIterator for Box<I, A> {
+ fn len(&self) -> usize {
+ (**self).len()
+ }
+ fn is_empty(&self) -> bool {
+ (**self).is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I: FusedIterator + ?Sized, A: Allocator> FusedIterator for Box<I, A> {}
+
+#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
+impl<Args, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
+ type Output = <F as FnOnce<Args>>::Output;
+
+ extern "rust-call" fn call_once(self, args: Args) -> Self::Output {
+ <F as FnOnce<Args>>::call_once(*self, args)
+ }
+}
+
+#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
+impl<Args, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
+ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output {
+ <F as FnMut<Args>>::call_mut(self, args)
+ }
+}
+
+#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
+impl<Args, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
+ extern "rust-call" fn call(&self, args: Args) -> Self::Output {
+ <F as Fn<Args>>::call(self, args)
+ }
+}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Box<U, A>> for Box<T, A> {}
+
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T, Global> {}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "boxed_slice_from_iter", since = "1.32.0")]
+impl<I> FromIterator<I> for Box<[I]> {
+ fn from_iter<T: IntoIterator<Item = I>>(iter: T) -> Self {
+ iter.into_iter().collect::<Vec<_>>().into_boxed_slice()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_slice_clone", since = "1.3.0")]
+impl<T: Clone, A: Allocator + Clone> Clone for Box<[T], A> {
+ fn clone(&self) -> Self {
+ let alloc = Box::allocator(self).clone();
+ self.to_vec_in(alloc).into_boxed_slice()
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ if self.len() == other.len() {
+ self.clone_from_slice(&other);
+ } else {
+ *self = other.clone();
+ }
+ }
+}
+
+#[stable(feature = "box_borrow", since = "1.1.0")]
+impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for Box<T, A> {
+ fn borrow(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(feature = "box_borrow", since = "1.1.0")]
+impl<T: ?Sized, A: Allocator> borrow::BorrowMut<T> for Box<T, A> {
+ fn borrow_mut(&mut self) -> &mut T {
+ &mut **self
+ }
+}
+
+#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
+impl<T: ?Sized, A: Allocator> AsRef<T> for Box<T, A> {
+ fn as_ref(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
+impl<T: ?Sized, A: Allocator> AsMut<T> for Box<T, A> {
+ fn as_mut(&mut self) -> &mut T {
+ &mut **self
+ }
+}
+
+/* Nota bene
+ *
+ * We could have chosen not to add this impl, and instead have written a
+ * function of Pin<Box<T>> to Pin<T>. Such a function would not be sound,
+ * because Box<T> implements Unpin even when T does not, as a result of
+ * this impl.
+ *
+ * We chose this API instead of the alternative for a few reasons:
+ * - Logically, it is helpful to understand pinning in regard to the
+ * memory region being pointed to. For this reason none of the
+ * standard library pointer types support projecting through a pin
+ * (Box<T> is the only pointer type in std for which this would be
+ * safe.)
+ * - It is in practice very useful to have Box<T> be unconditionally
+ * Unpin because of trait objects, for which the structural auto
+ * trait functionality does not apply (e.g., Box<dyn Foo> would
+ * otherwise not be Unpin).
+ *
+ * Another type with the same semantics as Box but only a conditional
+ * implementation of `Unpin` (where `T: Unpin`) would be valid/safe, and
+ * could have a method to project a Pin<T> from it.
+ */
+#[stable(feature = "pin", since = "1.33.0")]
+#[rustc_const_unstable(feature = "const_box", issue = "92521")]
+impl<T: ?Sized, A: Allocator> const Unpin for Box<T, A> where A: 'static {}
+
+#[unstable(feature = "generator_trait", issue = "43122")]
+impl<G: ?Sized + Generator<R> + Unpin, R, A: Allocator> Generator<R> for Box<G, A>
+where
+ A: 'static,
+{
+ type Yield = G::Yield;
+ type Return = G::Return;
+
+ fn resume(mut self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
+ G::resume(Pin::new(&mut *self), arg)
+ }
+}
+
+#[unstable(feature = "generator_trait", issue = "43122")]
+impl<G: ?Sized + Generator<R>, R, A: Allocator> Generator<R> for Pin<Box<G, A>>
+where
+ A: 'static,
+{
+ type Yield = G::Yield;
+ type Return = G::Return;
+
+ fn resume(mut self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
+ G::resume((*self).as_mut(), arg)
+ }
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl<F: ?Sized + Future + Unpin, A: Allocator> Future for Box<F, A>
+where
+ A: 'static,
+{
+ type Output = F::Output;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ F::poll(Pin::new(&mut *self), cx)
+ }
+}
+
+#[unstable(feature = "async_iterator", issue = "79024")]
+impl<S: ?Sized + AsyncIterator + Unpin> AsyncIterator for Box<S> {
+ type Item = S::Item;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ Pin::new(&mut **self).poll_next(cx)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (**self).size_hint()
+ }
+}
diff --git a/rust/alloc/boxed/thin.rs b/rust/alloc/boxed/thin.rs
new file mode 100644
index 000000000000..9135203114fc
--- /dev/null
+++ b/rust/alloc/boxed/thin.rs
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+// Based on
+// https://github.com/matthieu-m/rfc2580/blob/b58d1d3cba0d4b5e859d3617ea2d0943aaa31329/examples/thin.rs
+// by matthieu-m
+use crate::alloc::{self, Layout, LayoutError};
+use core::fmt::{self, Debug, Display, Formatter};
+use core::marker::PhantomData;
+#[cfg(not(no_global_oom_handling))]
+use core::marker::Unsize;
+use core::mem;
+use core::ops::{Deref, DerefMut};
+use core::ptr::Pointee;
+use core::ptr::{self, NonNull};
+
+/// ThinBox.
+///
+/// A thin pointer for heap allocation, regardless of T.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(thin_box)]
+/// use std::boxed::ThinBox;
+///
+/// let five = ThinBox::new(5);
+/// let thin_slice = ThinBox::<[i32]>::new_unsize([1, 2, 3, 4]);
+///
+/// use std::mem::{size_of, size_of_val};
+/// let size_of_ptr = size_of::<*const ()>();
+/// assert_eq!(size_of_ptr, size_of_val(&five));
+/// assert_eq!(size_of_ptr, size_of_val(&thin_slice));
+/// ```
+#[unstable(feature = "thin_box", issue = "92791")]
+pub struct ThinBox<T: ?Sized> {
+ ptr: WithHeader<<T as Pointee>::Metadata>,
+ _marker: PhantomData<T>,
+}
+
+#[unstable(feature = "thin_box", issue = "92791")]
+impl<T> ThinBox<T> {
+ /// Moves a type to the heap with its `Metadata` stored in the heap allocation instead of on
+ /// the stack.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(thin_box)]
+ /// use std::boxed::ThinBox;
+ ///
+ /// let five = ThinBox::new(5);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ pub fn new(value: T) -> Self {
+ let meta = ptr::metadata(&value);
+ let ptr = WithHeader::new(meta, value);
+ ThinBox { ptr, _marker: PhantomData }
+ }
+}
+
+#[unstable(feature = "thin_box", issue = "92791")]
+impl<Dyn: ?Sized> ThinBox<Dyn> {
+ /// Moves a type to the heap with its `Metadata` stored in the heap allocation instead of on
+ /// the stack.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(thin_box)]
+ /// use std::boxed::ThinBox;
+ ///
+ /// let thin_slice = ThinBox::<[i32]>::new_unsize([1, 2, 3, 4]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ pub fn new_unsize<T>(value: T) -> Self
+ where
+ T: Unsize<Dyn>,
+ {
+ let meta = ptr::metadata(&value as &Dyn);
+ let ptr = WithHeader::new(meta, value);
+ ThinBox { ptr, _marker: PhantomData }
+ }
+}
+
+#[unstable(feature = "thin_box", issue = "92791")]
+impl<T: ?Sized + Debug> Debug for ThinBox<T> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ Debug::fmt(self.deref(), f)
+ }
+}
+
+#[unstable(feature = "thin_box", issue = "92791")]
+impl<T: ?Sized + Display> Display for ThinBox<T> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ Display::fmt(self.deref(), f)
+ }
+}
+
+#[unstable(feature = "thin_box", issue = "92791")]
+impl<T: ?Sized> Deref for ThinBox<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ let value = self.data();
+ let metadata = self.meta();
+ let pointer = ptr::from_raw_parts(value as *const (), metadata);
+ unsafe { &*pointer }
+ }
+}
+
+#[unstable(feature = "thin_box", issue = "92791")]
+impl<T: ?Sized> DerefMut for ThinBox<T> {
+ fn deref_mut(&mut self) -> &mut T {
+ let value = self.data();
+ let metadata = self.meta();
+ let pointer = ptr::from_raw_parts_mut::<T>(value as *mut (), metadata);
+ unsafe { &mut *pointer }
+ }
+}
+
+#[unstable(feature = "thin_box", issue = "92791")]
+impl<T: ?Sized> Drop for ThinBox<T> {
+ fn drop(&mut self) {
+ unsafe {
+ let value = self.deref_mut();
+ let value = value as *mut T;
+ self.ptr.drop::<T>(value);
+ }
+ }
+}
+
+#[unstable(feature = "thin_box", issue = "92791")]
+impl<T: ?Sized> ThinBox<T> {
+ fn meta(&self) -> <T as Pointee>::Metadata {
+ // Safety:
+ // - NonNull and valid.
+ unsafe { *self.ptr.header() }
+ }
+
+ fn data(&self) -> *mut u8 {
+ self.ptr.value()
+ }
+}
+
+/// A pointer to type-erased data, guaranteed to have a header `H` before the pointed-to location.
+struct WithHeader<H>(NonNull<u8>, PhantomData<H>);
+
+impl<H> WithHeader<H> {
+ #[cfg(not(no_global_oom_handling))]
+ fn new<T>(header: H, value: T) -> WithHeader<H> {
+ let value_layout = Layout::new::<T>();
+ let Ok((layout, value_offset)) = Self::alloc_layout(value_layout) else {
+ // We pass an empty layout here because we do not know which layout caused the
+ // arithmetic overflow in `Layout::extend` and `handle_alloc_error` takes `Layout` as
+ // its argument rather than `Result<Layout, LayoutError>`, also this function has been
+ // stable since 1.28 ._.
+ //
+ // On the other hand, look at this gorgeous turbofish!
+ alloc::handle_alloc_error(Layout::new::<()>());
+ };
+
+ unsafe {
+ let ptr = alloc::alloc(layout);
+
+ if ptr.is_null() {
+ alloc::handle_alloc_error(layout);
+ }
+ // Safety:
+ // - The size is at least `aligned_header_size`.
+ let ptr = ptr.add(value_offset) as *mut _;
+
+ let ptr = NonNull::new_unchecked(ptr);
+
+ let result = WithHeader(ptr, PhantomData);
+ ptr::write(result.header(), header);
+ ptr::write(result.value().cast(), value);
+
+ result
+ }
+ }
+
+ // Safety:
+ // - Assumes that `value` can be dereferenced.
+ unsafe fn drop<T: ?Sized>(&self, value: *mut T) {
+ unsafe {
+ // SAFETY: Layout must have been computable if we're in drop
+ let (layout, value_offset) =
+ Self::alloc_layout(Layout::for_value_raw(value)).unwrap_unchecked();
+
+ ptr::drop_in_place::<T>(value);
+ // We only drop the value because the Pointee trait requires that the metadata is copy
+ // aka trivially droppable
+ alloc::dealloc(self.0.as_ptr().sub(value_offset), layout);
+ }
+ }
+
+ fn header(&self) -> *mut H {
+ // Safety:
+ // - At least `size_of::<H>()` bytes are allocated ahead of the pointer.
+ // - We know that H will be aligned because the middle pointer is aligned to the greater
+ // of the alignment of the header and the data and the header size includes the padding
+ // needed to align the header. Subtracting the header size from the aligned data pointer
+ // will always result in an aligned header pointer, it just may not point to the
+ // beginning of the allocation.
+ unsafe { self.0.as_ptr().sub(Self::header_size()) as *mut H }
+ }
+
+ fn value(&self) -> *mut u8 {
+ self.0.as_ptr()
+ }
+
+ const fn header_size() -> usize {
+ mem::size_of::<H>()
+ }
+
+ fn alloc_layout(value_layout: Layout) -> Result<(Layout, usize), LayoutError> {
+ Layout::new::<H>().extend(value_layout)
+ }
+}
diff --git a/rust/alloc/collections/mod.rs b/rust/alloc/collections/mod.rs
new file mode 100644
index 000000000000..1eec265b28f8
--- /dev/null
+++ b/rust/alloc/collections/mod.rs
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! Collection types.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[cfg(not(no_global_oom_handling))]
+pub mod binary_heap;
+#[cfg(not(no_global_oom_handling))]
+mod btree;
+#[cfg(not(no_global_oom_handling))]
+pub mod linked_list;
+#[cfg(not(no_global_oom_handling))]
+pub mod vec_deque;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod btree_map {
+ //! An ordered map based on a B-Tree.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::btree::map::*;
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod btree_set {
+ //! An ordered set based on a B-Tree.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::btree::set::*;
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use binary_heap::BinaryHeap;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use btree_map::BTreeMap;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use btree_set::BTreeSet;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use linked_list::LinkedList;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use vec_deque::VecDeque;
+
+use crate::alloc::{Layout, LayoutError};
+use core::fmt::Display;
+
+/// The error type for `try_reserve` methods.
+#[derive(Clone, PartialEq, Eq, Debug)]
+#[stable(feature = "try_reserve", since = "1.57.0")]
+pub struct TryReserveError {
+ kind: TryReserveErrorKind,
+}
+
+impl TryReserveError {
+ /// Details about the allocation that caused the error
+ #[inline]
+ #[must_use]
+ #[unstable(
+ feature = "try_reserve_kind",
+ reason = "Uncertain how much info should be exposed",
+ issue = "48043"
+ )]
+ pub fn kind(&self) -> TryReserveErrorKind {
+ self.kind.clone()
+ }
+}
+
+/// Details of the allocation that caused a `TryReserveError`
+#[derive(Clone, PartialEq, Eq, Debug)]
+#[unstable(
+ feature = "try_reserve_kind",
+ reason = "Uncertain how much info should be exposed",
+ issue = "48043"
+)]
+pub enum TryReserveErrorKind {
+ /// Error due to the computed capacity exceeding the collection's maximum
+ /// (usually `isize::MAX` bytes).
+ CapacityOverflow,
+
+ /// The memory allocator returned an error
+ AllocError {
+ /// The layout of allocation request that failed
+ layout: Layout,
+
+ #[doc(hidden)]
+ #[unstable(
+ feature = "container_error_extra",
+ issue = "none",
+ reason = "\
+ Enable exposing the allocator’s custom error value \
+ if an associated type is added in the future: \
+ https://github.com/rust-lang/wg-allocators/issues/23"
+ )]
+ non_exhaustive: (),
+ },
+}
+
+#[unstable(
+ feature = "try_reserve_kind",
+ reason = "Uncertain how much info should be exposed",
+ issue = "48043"
+)]
+impl From<TryReserveErrorKind> for TryReserveError {
+ #[inline]
+ fn from(kind: TryReserveErrorKind) -> Self {
+ Self { kind }
+ }
+}
+
+#[unstable(feature = "try_reserve_kind", reason = "new API", issue = "48043")]
+impl From<LayoutError> for TryReserveErrorKind {
+ /// Always evaluates to [`TryReserveErrorKind::CapacityOverflow`].
+ #[inline]
+ fn from(_: LayoutError) -> Self {
+ TryReserveErrorKind::CapacityOverflow
+ }
+}
+
+#[stable(feature = "try_reserve", since = "1.57.0")]
+impl Display for TryReserveError {
+ fn fmt(
+ &self,
+ fmt: &mut core::fmt::Formatter<'_>,
+ ) -> core::result::Result<(), core::fmt::Error> {
+ fmt.write_str("memory allocation failed")?;
+ let reason = match self.kind {
+ TryReserveErrorKind::CapacityOverflow => {
+ " because the computed capacity exceeded the collection's maximum"
+ }
+ TryReserveErrorKind::AllocError { .. } => {
+ " because the memory allocator returned a error"
+ }
+ };
+ fmt.write_str(reason)
+ }
+}
+
+/// An intermediate trait for specialization of `Extend`.
+#[doc(hidden)]
+trait SpecExtend<I: IntoIterator> {
+ /// Extends `self` with the contents of the given iterator.
+ fn spec_extend(&mut self, iter: I);
+}
diff --git a/rust/alloc/ffi/c_str.rs b/rust/alloc/ffi/c_str.rs
new file mode 100644
index 000000000000..5e2f4073771a
--- /dev/null
+++ b/rust/alloc/ffi/c_str.rs
@@ -0,0 +1,1203 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+#[cfg(test)]
+mod tests;
+
+use crate::borrow::{Cow, ToOwned};
+use crate::boxed::Box;
+use crate::rc::Rc;
+use crate::slice::hack::into_vec;
+use crate::string::String;
+use crate::vec::Vec;
+use core::borrow::Borrow;
+use core::ffi::{c_char, CStr};
+use core::fmt;
+use core::mem;
+use core::num::NonZeroU8;
+use core::ops;
+use core::ptr;
+use core::slice;
+use core::slice::memchr;
+use core::str::{self, Utf8Error};
+
+#[cfg(target_has_atomic = "ptr")]
+use crate::sync::Arc;
+
+/// A type representing an owned, C-compatible, nul-terminated string with no nul bytes in the
+/// middle.
+///
+/// This type serves the purpose of being able to safely generate a
+/// C-compatible string from a Rust byte slice or vector. An instance of this
+/// type is a static guarantee that the underlying bytes contain no interior 0
+/// bytes ("nul characters") and that the final byte is 0 ("nul terminator").
+///
+/// `CString` is to <code>&[CStr]</code> as [`String`] is to <code>&[str]</code>: the former
+/// in each pair are owned strings; the latter are borrowed
+/// references.
+///
+/// # Creating a `CString`
+///
+/// A `CString` is created from either a byte slice or a byte vector,
+/// or anything that implements <code>[Into]<[Vec]<[u8]>></code> (for
+/// example, you can build a `CString` straight out of a [`String`] or
+/// a <code>&[str]</code>, since both implement that trait).
+///
+/// The [`CString::new`] method will actually check that the provided <code>&[[u8]]</code>
+/// does not have 0 bytes in the middle, and return an error if it
+/// finds one.
+///
+/// # Extracting a raw pointer to the whole C string
+///
+/// `CString` implements an [`as_ptr`][`CStr::as_ptr`] method through the [`Deref`]
+/// trait. This method will give you a `*const c_char` which you can
+/// feed directly to extern functions that expect a nul-terminated
+/// string, like C's `strdup()`. Notice that [`as_ptr`][`CStr::as_ptr`] returns a
+/// read-only pointer; if the C code writes to it, that causes
+/// undefined behavior.
+///
+/// # Extracting a slice of the whole C string
+///
+/// Alternatively, you can obtain a <code>&[[u8]]</code> slice from a
+/// `CString` with the [`CString::as_bytes`] method. Slices produced in this
+/// way do *not* contain the trailing nul terminator. This is useful
+/// when you will be calling an extern function that takes a `*const
+/// u8` argument which is not necessarily nul-terminated, plus another
+/// argument with the length of the string — like C's `strndup()`.
+/// You can of course get the slice's length with its
+/// [`len`][slice::len] method.
+///
+/// If you need a <code>&[[u8]]</code> slice *with* the nul terminator, you
+/// can use [`CString::as_bytes_with_nul`] instead.
+///
+/// Once you have the kind of slice you need (with or without a nul
+/// terminator), you can call the slice's own
+/// [`as_ptr`][slice::as_ptr] method to get a read-only raw pointer to pass to
+/// extern functions. See the documentation for that function for a
+/// discussion on ensuring the lifetime of the raw pointer.
+///
+/// [str]: prim@str "str"
+/// [`Deref`]: ops::Deref
+///
+/// # Examples
+///
+/// ```ignore (extern-declaration)
+/// # fn main() {
+/// use std::ffi::CString;
+/// use std::os::raw::c_char;
+///
+/// extern "C" {
+/// fn my_printer(s: *const c_char);
+/// }
+///
+/// // We are certain that our string doesn't have 0 bytes in the middle,
+/// // so we can .expect()
+/// let c_to_print = CString::new("Hello, world!").expect("CString::new failed");
+/// unsafe {
+/// my_printer(c_to_print.as_ptr());
+/// }
+/// # }
+/// ```
+///
+/// # Safety
+///
+/// `CString` is intended for working with traditional C-style strings
+/// (a sequence of non-nul bytes terminated by a single nul byte); the
+/// primary use case for these kinds of strings is interoperating with C-like
+/// code. Often you will need to transfer ownership to/from that external
+/// code. It is strongly recommended that you thoroughly read through the
+/// documentation of `CString` before use, as improper ownership management
+/// of `CString` instances can lead to invalid memory accesses, memory leaks,
+/// and other memory errors.
+#[derive(PartialEq, PartialOrd, Eq, Ord, Hash, Clone)]
+#[cfg_attr(not(test), rustc_diagnostic_item = "cstring_type")]
+#[unstable(feature = "alloc_c_string", issue = "94079")]
+pub struct CString {
+ // Invariant 1: the slice ends with a zero byte and has a length of at least one.
+ // Invariant 2: the slice contains only one zero byte.
+ // Improper usage of unsafe function can break Invariant 2, but not Invariant 1.
+ inner: Box<[u8]>,
+}
+
+/// An error indicating that an interior nul byte was found.
+///
+/// While Rust strings may contain nul bytes in the middle, C strings
+/// can't, as that byte would effectively truncate the string.
+///
+/// This error is created by the [`new`][`CString::new`] method on
+/// [`CString`]. See its documentation for more.
+///
+/// # Examples
+///
+/// ```
+/// use std::ffi::{CString, NulError};
+///
+/// let _: NulError = CString::new(b"f\0oo".to_vec()).unwrap_err();
+/// ```
+#[derive(Clone, PartialEq, Eq, Debug)]
+#[unstable(feature = "alloc_c_string", issue = "94079")]
+pub struct NulError(usize, Vec<u8>);
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+enum FromBytesWithNulErrorKind {
+ InteriorNul(usize),
+ NotNulTerminated,
+}
+
+/// An error indicating that a nul byte was not in the expected position.
+///
+/// The vector used to create a [`CString`] must have one and only one nul byte,
+/// positioned at the end.
+///
+/// This error is created by the [`CString::from_vec_with_nul`] method.
+/// See its documentation for more.
+///
+/// # Examples
+///
+/// ```
+/// use std::ffi::{CString, FromVecWithNulError};
+///
+/// let _: FromVecWithNulError = CString::from_vec_with_nul(b"f\0oo".to_vec()).unwrap_err();
+/// ```
+#[derive(Clone, PartialEq, Eq, Debug)]
+#[unstable(feature = "alloc_c_string", issue = "94079")]
+pub struct FromVecWithNulError {
+ error_kind: FromBytesWithNulErrorKind,
+ bytes: Vec<u8>,
+}
+
+#[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
+impl FromVecWithNulError {
+ /// Returns a slice of [`u8`]s bytes that were attempted to convert to a [`CString`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// // Some invalid bytes in a vector
+ /// let bytes = b"f\0oo".to_vec();
+ ///
+ /// let value = CString::from_vec_with_nul(bytes.clone());
+ ///
+ /// assert_eq!(&bytes[..], value.unwrap_err().as_bytes());
+ /// ```
+ #[must_use]
+ #[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
+ pub fn as_bytes(&self) -> &[u8] {
+ &self.bytes[..]
+ }
+
+ /// Returns the bytes that were attempted to convert to a [`CString`].
+ ///
+ /// This method is carefully constructed to avoid allocation. It will
+ /// consume the error, moving out the bytes, so that a copy of the bytes
+ /// does not need to be made.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// // Some invalid bytes in a vector
+ /// let bytes = b"f\0oo".to_vec();
+ ///
+ /// let value = CString::from_vec_with_nul(bytes.clone());
+ ///
+ /// assert_eq!(bytes, value.unwrap_err().into_bytes());
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
+ pub fn into_bytes(self) -> Vec<u8> {
+ self.bytes
+ }
+}
+
+/// An error indicating invalid UTF-8 when converting a [`CString`] into a [`String`].
+///
+/// `CString` is just a wrapper over a buffer of bytes with a nul terminator;
+/// [`CString::into_string`] performs UTF-8 validation on those bytes and may
+/// return this error.
+///
+/// This `struct` is created by [`CString::into_string()`]. See
+/// its documentation for more.
+#[derive(Clone, PartialEq, Eq, Debug)]
+#[unstable(feature = "alloc_c_string", issue = "94079")]
+pub struct IntoStringError {
+ inner: CString,
+ error: Utf8Error,
+}
+
+impl CString {
+ /// Creates a new C-compatible string from a container of bytes.
+ ///
+ /// This function will consume the provided data and use the
+ /// underlying bytes to construct a new string, ensuring that
+ /// there is a trailing 0 byte. This trailing 0 byte will be
+ /// appended by this function; the provided data should *not*
+ /// contain any 0 bytes in it.
+ ///
+ /// # Examples
+ ///
+ /// ```ignore (extern-declaration)
+ /// use std::ffi::CString;
+ /// use std::os::raw::c_char;
+ ///
+ /// extern "C" { fn puts(s: *const c_char); }
+ ///
+ /// let to_print = CString::new("Hello!").expect("CString::new failed");
+ /// unsafe {
+ /// puts(to_print.as_ptr());
+ /// }
+ /// ```
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error if the supplied bytes contain an
+ /// internal 0 byte. The [`NulError`] returned will contain the bytes as well as
+ /// the position of the nul byte.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn new<T: Into<Vec<u8>>>(t: T) -> Result<CString, NulError> {
+ trait SpecNewImpl {
+ fn spec_new_impl(self) -> Result<CString, NulError>;
+ }
+
+ impl<T: Into<Vec<u8>>> SpecNewImpl for T {
+ default fn spec_new_impl(self) -> Result<CString, NulError> {
+ let bytes: Vec<u8> = self.into();
+ match memchr::memchr(0, &bytes) {
+ Some(i) => Err(NulError(i, bytes)),
+ None => Ok(unsafe { CString::_from_vec_unchecked(bytes) }),
+ }
+ }
+ }
+
+ // Specialization for avoiding reallocation
+ #[inline(always)] // Without that it is not inlined into specializations
+ fn spec_new_impl_bytes(bytes: &[u8]) -> Result<CString, NulError> {
+ // We cannot have such large slice that we would overflow here
+ // but using `checked_add` allows LLVM to assume that capacity never overflows
+ // and generate twice shorter code.
+ // `saturating_add` doesn't help for some reason.
+ let capacity = bytes.len().checked_add(1).unwrap();
+
+ // Allocate before validation to avoid duplication of allocation code.
+ // We still need to allocate and copy memory even if we get an error.
+ let mut buffer = Vec::with_capacity(capacity);
+ buffer.extend(bytes);
+
+ // Check memory of self instead of new buffer.
+ // This allows better optimizations if lto enabled.
+ match memchr::memchr(0, bytes) {
+ Some(i) => Err(NulError(i, buffer)),
+ None => Ok(unsafe { CString::_from_vec_unchecked(buffer) }),
+ }
+ }
+
+ impl SpecNewImpl for &'_ [u8] {
+ fn spec_new_impl(self) -> Result<CString, NulError> {
+ spec_new_impl_bytes(self)
+ }
+ }
+
+ impl SpecNewImpl for &'_ str {
+ fn spec_new_impl(self) -> Result<CString, NulError> {
+ spec_new_impl_bytes(self.as_bytes())
+ }
+ }
+
+ impl SpecNewImpl for &'_ mut [u8] {
+ fn spec_new_impl(self) -> Result<CString, NulError> {
+ spec_new_impl_bytes(self)
+ }
+ }
+
+ t.spec_new_impl()
+ }
+
+ /// Creates a C-compatible string by consuming a byte vector,
+ /// without checking for interior 0 bytes.
+ ///
+ /// Trailing 0 byte will be appended by this function.
+ ///
+ /// This method is equivalent to [`CString::new`] except that no runtime
+ /// assertion is made that `v` contains no 0 bytes, and it requires an
+ /// actual byte vector, not anything that can be converted to one with Into.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let raw = b"foo".to_vec();
+ /// unsafe {
+ /// let c_string = CString::from_vec_unchecked(raw);
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub unsafe fn from_vec_unchecked(v: Vec<u8>) -> Self {
+ debug_assert!(memchr::memchr(0, &v).is_none());
+ unsafe { Self::_from_vec_unchecked(v) }
+ }
+
+ unsafe fn _from_vec_unchecked(mut v: Vec<u8>) -> Self {
+ v.reserve_exact(1);
+ v.push(0);
+ Self { inner: v.into_boxed_slice() }
+ }
+
+ /// Retakes ownership of a `CString` that was transferred to C via
+ /// [`CString::into_raw`].
+ ///
+ /// Additionally, the length of the string will be recalculated from the pointer.
+ ///
+ /// # Safety
+ ///
+ /// This should only ever be called with a pointer that was earlier
+ /// obtained by calling [`CString::into_raw`]. Other usage (e.g., trying to take
+ /// ownership of a string that was allocated by foreign code) is likely to lead
+ /// to undefined behavior or allocator corruption.
+ ///
+ /// It should be noted that the length isn't just "recomputed," but that
+ /// the recomputed length must match the original length from the
+ /// [`CString::into_raw`] call. This means the [`CString::into_raw`]/`from_raw`
+ /// methods should not be used when passing the string to C functions that can
+ /// modify the string's length.
+ ///
+ /// > **Note:** If you need to borrow a string that was allocated by
+ /// > foreign code, use [`CStr`]. If you need to take ownership of
+ /// > a string that was allocated by foreign code, you will need to
+ /// > make your own provisions for freeing it appropriately, likely
+ /// > with the foreign code's API to do that.
+ ///
+ /// # Examples
+ ///
+ /// Creates a `CString`, pass ownership to an `extern` function (via raw pointer), then retake
+ /// ownership with `from_raw`:
+ ///
+ /// ```ignore (extern-declaration)
+ /// use std::ffi::CString;
+ /// use std::os::raw::c_char;
+ ///
+ /// extern "C" {
+ /// fn some_extern_function(s: *mut c_char);
+ /// }
+ ///
+ /// let c_string = CString::new("Hello!").expect("CString::new failed");
+ /// let raw = c_string.into_raw();
+ /// unsafe {
+ /// some_extern_function(raw);
+ /// let c_string = CString::from_raw(raw);
+ /// }
+ /// ```
+ #[must_use = "call `drop(from_raw(ptr))` if you intend to drop the `CString`"]
+ #[stable(feature = "cstr_memory", since = "1.4.0")]
+ pub unsafe fn from_raw(ptr: *mut c_char) -> CString {
+ // SAFETY: This is called with a pointer that was obtained from a call
+ // to `CString::into_raw` and the length has not been modified. As such,
+ // we know there is a NUL byte (and only one) at the end and that the
+ // information about the size of the allocation is correct on Rust's
+ // side.
+ unsafe {
+ extern "C" {
+ /// Provided by libc or compiler_builtins.
+ fn strlen(s: *const c_char) -> usize;
+ }
+ let len = strlen(ptr) + 1; // Including the NUL byte
+ let slice = slice::from_raw_parts_mut(ptr, len as usize);
+ CString { inner: Box::from_raw(slice as *mut [c_char] as *mut [u8]) }
+ }
+ }
+
+ /// Consumes the `CString` and transfers ownership of the string to a C caller.
+ ///
+ /// The pointer which this function returns must be returned to Rust and reconstituted using
+ /// [`CString::from_raw`] to be properly deallocated. Specifically, one
+ /// should *not* use the standard C `free()` function to deallocate
+ /// this string.
+ ///
+ /// Failure to call [`CString::from_raw`] will lead to a memory leak.
+ ///
+ /// The C side must **not** modify the length of the string (by writing a
+ /// `null` somewhere inside the string or removing the final one) before
+ /// it makes it back into Rust using [`CString::from_raw`]. See the safety section
+ /// in [`CString::from_raw`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let c_string = CString::new("foo").expect("CString::new failed");
+ ///
+ /// let ptr = c_string.into_raw();
+ ///
+ /// unsafe {
+ /// assert_eq!(b'f', *ptr as u8);
+ /// assert_eq!(b'o', *ptr.offset(1) as u8);
+ /// assert_eq!(b'o', *ptr.offset(2) as u8);
+ /// assert_eq!(b'\0', *ptr.offset(3) as u8);
+ ///
+ /// // retake pointer to free memory
+ /// let _ = CString::from_raw(ptr);
+ /// }
+ /// ```
+ #[inline]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "cstr_memory", since = "1.4.0")]
+ pub fn into_raw(self) -> *mut c_char {
+ Box::into_raw(self.into_inner()) as *mut c_char
+ }
+
+ /// Converts the `CString` into a [`String`] if it contains valid UTF-8 data.
+ ///
+ /// On failure, ownership of the original `CString` is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let valid_utf8 = vec![b'f', b'o', b'o'];
+ /// let cstring = CString::new(valid_utf8).expect("CString::new failed");
+ /// assert_eq!(cstring.into_string().expect("into_string() call failed"), "foo");
+ ///
+ /// let invalid_utf8 = vec![b'f', 0xff, b'o', b'o'];
+ /// let cstring = CString::new(invalid_utf8).expect("CString::new failed");
+ /// let err = cstring.into_string().err().expect("into_string().err() failed");
+ /// assert_eq!(err.utf8_error().valid_up_to(), 1);
+ /// ```
+ #[stable(feature = "cstring_into", since = "1.7.0")]
+ pub fn into_string(self) -> Result<String, IntoStringError> {
+ String::from_utf8(self.into_bytes()).map_err(|e| IntoStringError {
+ error: e.utf8_error(),
+ inner: unsafe { Self::_from_vec_unchecked(e.into_bytes()) },
+ })
+ }
+
+ /// Consumes the `CString` and returns the underlying byte buffer.
+ ///
+ /// The returned buffer does **not** contain the trailing nul
+ /// terminator, and it is guaranteed to not have any interior nul
+ /// bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let c_string = CString::new("foo").expect("CString::new failed");
+ /// let bytes = c_string.into_bytes();
+ /// assert_eq!(bytes, vec![b'f', b'o', b'o']);
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "cstring_into", since = "1.7.0")]
+ pub fn into_bytes(self) -> Vec<u8> {
+ let mut vec = into_vec(self.into_inner());
+ let _nul = vec.pop();
+ debug_assert_eq!(_nul, Some(0u8));
+ vec
+ }
+
+ /// Equivalent to [`CString::into_bytes()`] except that the
+ /// returned vector includes the trailing nul terminator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let c_string = CString::new("foo").expect("CString::new failed");
+ /// let bytes = c_string.into_bytes_with_nul();
+ /// assert_eq!(bytes, vec![b'f', b'o', b'o', b'\0']);
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "cstring_into", since = "1.7.0")]
+ pub fn into_bytes_with_nul(self) -> Vec<u8> {
+ into_vec(self.into_inner())
+ }
+
+ /// Returns the contents of this `CString` as a slice of bytes.
+ ///
+ /// The returned slice does **not** contain the trailing nul
+ /// terminator, and it is guaranteed to not have any interior nul
+ /// bytes. If you need the nul terminator, use
+ /// [`CString::as_bytes_with_nul`] instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let c_string = CString::new("foo").expect("CString::new failed");
+ /// let bytes = c_string.as_bytes();
+ /// assert_eq!(bytes, &[b'f', b'o', b'o']);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn as_bytes(&self) -> &[u8] {
+ // SAFETY: CString has a length at least 1
+ unsafe { self.inner.get_unchecked(..self.inner.len() - 1) }
+ }
+
+ /// Equivalent to [`CString::as_bytes()`] except that the
+ /// returned slice includes the trailing nul terminator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let c_string = CString::new("foo").expect("CString::new failed");
+ /// let bytes = c_string.as_bytes_with_nul();
+ /// assert_eq!(bytes, &[b'f', b'o', b'o', b'\0']);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn as_bytes_with_nul(&self) -> &[u8] {
+ &self.inner
+ }
+
+ /// Extracts a [`CStr`] slice containing the entire string.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::{CString, CStr};
+ ///
+ /// let c_string = CString::new(b"foo".to_vec()).expect("CString::new failed");
+ /// let cstr = c_string.as_c_str();
+ /// assert_eq!(cstr,
+ /// CStr::from_bytes_with_nul(b"foo\0").expect("CStr::from_bytes_with_nul failed"));
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "as_c_str", since = "1.20.0")]
+ pub fn as_c_str(&self) -> &CStr {
+ &*self
+ }
+
+ /// Converts this `CString` into a boxed [`CStr`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::{CString, CStr};
+ ///
+ /// let c_string = CString::new(b"foo".to_vec()).expect("CString::new failed");
+ /// let boxed = c_string.into_boxed_c_str();
+ /// assert_eq!(&*boxed,
+ /// CStr::from_bytes_with_nul(b"foo\0").expect("CStr::from_bytes_with_nul failed"));
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "into_boxed_c_str", since = "1.20.0")]
+ pub fn into_boxed_c_str(self) -> Box<CStr> {
+ unsafe { Box::from_raw(Box::into_raw(self.into_inner()) as *mut CStr) }
+ }
+
+ /// Bypass "move out of struct which implements [`Drop`] trait" restriction.
+ #[inline]
+ fn into_inner(self) -> Box<[u8]> {
+ // Rationale: `mem::forget(self)` invalidates the previous call to `ptr::read(&self.inner)`
+ // so we use `ManuallyDrop` to ensure `self` is not dropped.
+ // Then we can return the box directly without invalidating it.
+ // See https://github.com/rust-lang/rust/issues/62553.
+ let this = mem::ManuallyDrop::new(self);
+ unsafe { ptr::read(&this.inner) }
+ }
+
+ /// Converts a <code>[Vec]<[u8]></code> to a [`CString`] without checking the
+ /// invariants on the given [`Vec`].
+ ///
+ /// # Safety
+ ///
+ /// The given [`Vec`] **must** have one nul byte as its last element.
+ /// This means it cannot be empty nor have any other nul byte anywhere else.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ /// assert_eq!(
+ /// unsafe { CString::from_vec_with_nul_unchecked(b"abc\0".to_vec()) },
+ /// unsafe { CString::from_vec_unchecked(b"abc".to_vec()) }
+ /// );
+ /// ```
+ #[must_use]
+ #[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
+ pub unsafe fn from_vec_with_nul_unchecked(v: Vec<u8>) -> Self {
+ debug_assert!(memchr::memchr(0, &v).unwrap() + 1 == v.len());
+ unsafe { Self::_from_vec_with_nul_unchecked(v) }
+ }
+
+ unsafe fn _from_vec_with_nul_unchecked(v: Vec<u8>) -> Self {
+ Self { inner: v.into_boxed_slice() }
+ }
+
+ /// Attempts to converts a <code>[Vec]<[u8]></code> to a [`CString`].
+ ///
+ /// Runtime checks are present to ensure there is only one nul byte in the
+ /// [`Vec`], its last element.
+ ///
+ /// # Errors
+ ///
+ /// If a nul byte is present and not the last element or no nul bytes
+ /// is present, an error will be returned.
+ ///
+ /// # Examples
+ ///
+ /// A successful conversion will produce the same result as [`CString::new`]
+ /// when called without the ending nul byte.
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ /// assert_eq!(
+ /// CString::from_vec_with_nul(b"abc\0".to_vec())
+ /// .expect("CString::from_vec_with_nul failed"),
+ /// CString::new(b"abc".to_vec()).expect("CString::new failed")
+ /// );
+ /// ```
+ ///
+ /// An incorrectly formatted [`Vec`] will produce an error.
+ ///
+ /// ```
+ /// use std::ffi::{CString, FromVecWithNulError};
+ /// // Interior nul byte
+ /// let _: FromVecWithNulError = CString::from_vec_with_nul(b"a\0bc".to_vec()).unwrap_err();
+ /// // No nul byte
+ /// let _: FromVecWithNulError = CString::from_vec_with_nul(b"abc".to_vec()).unwrap_err();
+ /// ```
+ #[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
+ pub fn from_vec_with_nul(v: Vec<u8>) -> Result<Self, FromVecWithNulError> {
+ let nul_pos = memchr::memchr(0, &v);
+ match nul_pos {
+ Some(nul_pos) if nul_pos + 1 == v.len() => {
+ // SAFETY: We know there is only one nul byte, at the end
+ // of the vec.
+ Ok(unsafe { Self::_from_vec_with_nul_unchecked(v) })
+ }
+ Some(nul_pos) => Err(FromVecWithNulError {
+ error_kind: FromBytesWithNulErrorKind::InteriorNul(nul_pos),
+ bytes: v,
+ }),
+ None => Err(FromVecWithNulError {
+ error_kind: FromBytesWithNulErrorKind::NotNulTerminated,
+ bytes: v,
+ }),
+ }
+ }
+}
+
+// Turns this `CString` into an empty string to prevent
+// memory-unsafe code from working by accident. Inline
+// to prevent LLVM from optimizing it away in debug builds.
+#[stable(feature = "cstring_drop", since = "1.13.0")]
+impl Drop for CString {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe {
+ *self.inner.get_unchecked_mut(0) = 0;
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Deref for CString {
+ type Target = CStr;
+
+ #[inline]
+ fn deref(&self) -> &CStr {
+ unsafe { CStr::from_bytes_with_nul_unchecked(self.as_bytes_with_nul()) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for CString {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "cstring_into", since = "1.7.0")]
+impl From<CString> for Vec<u8> {
+ /// Converts a [`CString`] into a <code>[Vec]<[u8]></code>.
+ ///
+ /// The conversion consumes the [`CString`], and removes the terminating NUL byte.
+ #[inline]
+ fn from(s: CString) -> Vec<u8> {
+ s.into_bytes()
+ }
+}
+
+#[stable(feature = "cstr_default", since = "1.10.0")]
+impl Default for CString {
+ /// Creates an empty `CString`.
+ fn default() -> CString {
+ let a: &CStr = Default::default();
+ a.to_owned()
+ }
+}
+
+#[stable(feature = "cstr_borrow", since = "1.3.0")]
+impl Borrow<CStr> for CString {
+ #[inline]
+ fn borrow(&self) -> &CStr {
+ self
+ }
+}
+
+#[stable(feature = "cstring_from_cow_cstr", since = "1.28.0")]
+impl<'a> From<Cow<'a, CStr>> for CString {
+ /// Converts a `Cow<'a, CStr>` into a `CString`, by copying the contents if they are
+ /// borrowed.
+ #[inline]
+ fn from(s: Cow<'a, CStr>) -> Self {
+ s.into_owned()
+ }
+}
+
+#[cfg(not(test))]
+#[stable(feature = "box_from_c_str", since = "1.17.0")]
+impl From<&CStr> for Box<CStr> {
+ /// Converts a `&CStr` into a `Box<CStr>`,
+ /// by copying the contents into a newly allocated [`Box`].
+ fn from(s: &CStr) -> Box<CStr> {
+ let boxed: Box<[u8]> = Box::from(s.to_bytes_with_nul());
+ unsafe { Box::from_raw(Box::into_raw(boxed) as *mut CStr) }
+ }
+}
+
+#[stable(feature = "box_from_cow", since = "1.45.0")]
+impl From<Cow<'_, CStr>> for Box<CStr> {
+ /// Converts a `Cow<'a, CStr>` into a `Box<CStr>`,
+ /// by copying the contents if they are borrowed.
+ #[inline]
+ fn from(cow: Cow<'_, CStr>) -> Box<CStr> {
+ match cow {
+ Cow::Borrowed(s) => Box::from(s),
+ Cow::Owned(s) => Box::from(s),
+ }
+ }
+}
+
+#[stable(feature = "c_string_from_box", since = "1.18.0")]
+impl From<Box<CStr>> for CString {
+ /// Converts a <code>[Box]<[CStr]></code> into a [`CString`] without copying or allocating.
+ #[inline]
+ fn from(s: Box<CStr>) -> CString {
+ let raw = Box::into_raw(s) as *mut [u8];
+ CString { inner: unsafe { Box::from_raw(raw) } }
+ }
+}
+
+#[stable(feature = "cstring_from_vec_of_nonzerou8", since = "1.43.0")]
+impl From<Vec<NonZeroU8>> for CString {
+ /// Converts a <code>[Vec]<[NonZeroU8]></code> into a [`CString`] without
+ /// copying nor checking for inner null bytes.
+ #[inline]
+ fn from(v: Vec<NonZeroU8>) -> CString {
+ unsafe {
+ // Transmute `Vec<NonZeroU8>` to `Vec<u8>`.
+ let v: Vec<u8> = {
+ // SAFETY:
+ // - transmuting between `NonZeroU8` and `u8` is sound;
+ // - `alloc::Layout<NonZeroU8> == alloc::Layout<u8>`.
+ let (ptr, len, cap): (*mut NonZeroU8, _, _) = Vec::into_raw_parts(v);
+ Vec::from_raw_parts(ptr.cast::<u8>(), len, cap)
+ };
+ // SAFETY: `v` cannot contain null bytes, given the type-level
+ // invariant of `NonZeroU8`.
+ Self::_from_vec_unchecked(v)
+ }
+ }
+}
+
+#[cfg(not(test))]
+#[stable(feature = "more_box_slice_clone", since = "1.29.0")]
+impl Clone for Box<CStr> {
+ #[inline]
+ fn clone(&self) -> Self {
+ (**self).into()
+ }
+}
+
+#[stable(feature = "box_from_c_string", since = "1.20.0")]
+impl From<CString> for Box<CStr> {
+ /// Converts a [`CString`] into a <code>[Box]<[CStr]></code> without copying or allocating.
+ #[inline]
+ fn from(s: CString) -> Box<CStr> {
+ s.into_boxed_c_str()
+ }
+}
+
+#[stable(feature = "cow_from_cstr", since = "1.28.0")]
+impl<'a> From<CString> for Cow<'a, CStr> {
+ /// Converts a [`CString`] into an owned [`Cow`] without copying or allocating.
+ #[inline]
+ fn from(s: CString) -> Cow<'a, CStr> {
+ Cow::Owned(s)
+ }
+}
+
+#[stable(feature = "cow_from_cstr", since = "1.28.0")]
+impl<'a> From<&'a CStr> for Cow<'a, CStr> {
+ /// Converts a [`CStr`] into a borrowed [`Cow`] without copying or allocating.
+ #[inline]
+ fn from(s: &'a CStr) -> Cow<'a, CStr> {
+ Cow::Borrowed(s)
+ }
+}
+
+#[stable(feature = "cow_from_cstr", since = "1.28.0")]
+impl<'a> From<&'a CString> for Cow<'a, CStr> {
+ /// Converts a `&`[`CString`] into a borrowed [`Cow`] without copying or allocating.
+ #[inline]
+ fn from(s: &'a CString) -> Cow<'a, CStr> {
+ Cow::Borrowed(s.as_c_str())
+ }
+}
+
+#[cfg(target_has_atomic = "ptr")]
+#[stable(feature = "shared_from_slice2", since = "1.24.0")]
+impl From<CString> for Arc<CStr> {
+ /// Converts a [`CString`] into an <code>[Arc]<[CStr]></code> by moving the [`CString`]
+ /// data into a new [`Arc`] buffer.
+ #[inline]
+ fn from(s: CString) -> Arc<CStr> {
+ let arc: Arc<[u8]> = Arc::from(s.into_inner());
+ unsafe { Arc::from_raw(Arc::into_raw(arc) as *const CStr) }
+ }
+}
+
+#[cfg(target_has_atomic = "ptr")]
+#[stable(feature = "shared_from_slice2", since = "1.24.0")]
+impl From<&CStr> for Arc<CStr> {
+ /// Converts a `&CStr` into a `Arc<CStr>`,
+ /// by copying the contents into a newly allocated [`Arc`].
+ #[inline]
+ fn from(s: &CStr) -> Arc<CStr> {
+ let arc: Arc<[u8]> = Arc::from(s.to_bytes_with_nul());
+ unsafe { Arc::from_raw(Arc::into_raw(arc) as *const CStr) }
+ }
+}
+
+#[stable(feature = "shared_from_slice2", since = "1.24.0")]
+impl From<CString> for Rc<CStr> {
+ /// Converts a [`CString`] into an <code>[Rc]<[CStr]></code> by moving the [`CString`]
+ /// data into a new [`Arc`] buffer.
+ #[inline]
+ fn from(s: CString) -> Rc<CStr> {
+ let rc: Rc<[u8]> = Rc::from(s.into_inner());
+ unsafe { Rc::from_raw(Rc::into_raw(rc) as *const CStr) }
+ }
+}
+
+#[stable(feature = "shared_from_slice2", since = "1.24.0")]
+impl From<&CStr> for Rc<CStr> {
+ /// Converts a `&CStr` into a `Rc<CStr>`,
+ /// by copying the contents into a newly allocated [`Rc`].
+ #[inline]
+ fn from(s: &CStr) -> Rc<CStr> {
+ let rc: Rc<[u8]> = Rc::from(s.to_bytes_with_nul());
+ unsafe { Rc::from_raw(Rc::into_raw(rc) as *const CStr) }
+ }
+}
+
+#[cfg(not(test))]
+#[stable(feature = "default_box_extra", since = "1.17.0")]
+impl Default for Box<CStr> {
+ fn default() -> Box<CStr> {
+ let boxed: Box<[u8]> = Box::from([0]);
+ unsafe { Box::from_raw(Box::into_raw(boxed) as *mut CStr) }
+ }
+}
+
+impl NulError {
+ /// Returns the position of the nul byte in the slice that caused
+ /// [`CString::new`] to fail.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let nul_error = CString::new("foo\0bar").unwrap_err();
+ /// assert_eq!(nul_error.nul_position(), 3);
+ ///
+ /// let nul_error = CString::new("foo bar\0").unwrap_err();
+ /// assert_eq!(nul_error.nul_position(), 7);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn nul_position(&self) -> usize {
+ self.0
+ }
+
+ /// Consumes this error, returning the underlying vector of bytes which
+ /// generated the error in the first place.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let nul_error = CString::new("foo\0bar").unwrap_err();
+ /// assert_eq!(nul_error.into_vec(), b"foo\0bar");
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_vec(self) -> Vec<u8> {
+ self.1
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for NulError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "nul byte found in provided data at position: {}", self.0)
+ }
+}
+
+#[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
+impl fmt::Display for FromVecWithNulError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.error_kind {
+ FromBytesWithNulErrorKind::InteriorNul(pos) => {
+ write!(f, "data provided contains an interior nul byte at pos {pos}")
+ }
+ FromBytesWithNulErrorKind::NotNulTerminated => {
+ write!(f, "data provided is not nul terminated")
+ }
+ }
+ }
+}
+
+impl IntoStringError {
+ /// Consumes this error, returning original [`CString`] which generated the
+ /// error.
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "cstring_into", since = "1.7.0")]
+ pub fn into_cstring(self) -> CString {
+ self.inner
+ }
+
+ /// Access the underlying UTF-8 error that was the cause of this error.
+ #[must_use]
+ #[stable(feature = "cstring_into", since = "1.7.0")]
+ pub fn utf8_error(&self) -> Utf8Error {
+ self.error
+ }
+
+ #[doc(hidden)]
+ #[unstable(feature = "cstr_internals", issue = "none")]
+ pub fn __source(&self) -> &Utf8Error {
+ &self.error
+ }
+}
+
+impl IntoStringError {
+ fn description(&self) -> &str {
+ "C string contained non-utf8 bytes"
+ }
+}
+
+#[stable(feature = "cstring_into", since = "1.7.0")]
+impl fmt::Display for IntoStringError {
+ #[allow(deprecated, deprecated_in_future)]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.description().fmt(f)
+ }
+}
+
+#[stable(feature = "cstr_borrow", since = "1.3.0")]
+impl ToOwned for CStr {
+ type Owned = CString;
+
+ fn to_owned(&self) -> CString {
+ CString { inner: self.to_bytes_with_nul().into() }
+ }
+
+ fn clone_into(&self, target: &mut CString) {
+ let mut b = into_vec(mem::take(&mut target.inner));
+ self.to_bytes_with_nul().clone_into(&mut b);
+ target.inner = b.into_boxed_slice();
+ }
+}
+
+#[stable(feature = "cstring_asref", since = "1.7.0")]
+impl From<&CStr> for CString {
+ fn from(s: &CStr) -> CString {
+ s.to_owned()
+ }
+}
+
+#[stable(feature = "cstring_asref", since = "1.7.0")]
+impl ops::Index<ops::RangeFull> for CString {
+ type Output = CStr;
+
+ #[inline]
+ fn index(&self, _index: ops::RangeFull) -> &CStr {
+ self
+ }
+}
+
+#[stable(feature = "cstring_asref", since = "1.7.0")]
+impl AsRef<CStr> for CString {
+ #[inline]
+ fn as_ref(&self) -> &CStr {
+ self
+ }
+}
+
+#[cfg(bootstrap)]
+#[doc(hidden)]
+#[unstable(feature = "cstr_internals", issue = "none")]
+pub trait CStrExt {
+ /// Converts a `CStr` into a <code>[Cow]<[str]></code>.
+ ///
+ /// If the contents of the `CStr` are valid UTF-8 data, this
+ /// function will return a <code>[Cow]::[Borrowed]\(&[str])</code>
+ /// with the corresponding <code>&[str]</code> slice. Otherwise, it will
+ /// replace any invalid UTF-8 sequences with
+ /// [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD] and return a
+ /// <code>[Cow]::[Owned]\(&[str])</code> with the result.
+ ///
+ /// [str]: prim@str "str"
+ /// [Borrowed]: Cow::Borrowed
+ /// [Owned]: Cow::Owned
+ /// [U+FFFD]: crate::char::REPLACEMENT_CHARACTER "std::char::REPLACEMENT_CHARACTER"
+ ///
+ /// # Examples
+ ///
+ /// Calling `to_string_lossy` on a `CStr` containing valid UTF-8:
+ ///
+ /// ```
+ /// use std::borrow::Cow;
+ /// use std::ffi::CStr;
+ ///
+ /// let cstr = CStr::from_bytes_with_nul(b"Hello World\0")
+ /// .expect("CStr::from_bytes_with_nul failed");
+ /// assert_eq!(cstr.to_string_lossy(), Cow::Borrowed("Hello World"));
+ /// ```
+ ///
+ /// Calling `to_string_lossy` on a `CStr` containing invalid UTF-8:
+ ///
+ /// ```
+ /// use std::borrow::Cow;
+ /// use std::ffi::CStr;
+ ///
+ /// let cstr = CStr::from_bytes_with_nul(b"Hello \xF0\x90\x80World\0")
+ /// .expect("CStr::from_bytes_with_nul failed");
+ /// assert_eq!(
+ /// cstr.to_string_lossy(),
+ /// Cow::Owned(String::from("Hello �World")) as Cow<'_, str>
+ /// );
+ /// ```
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "cstr_to_str", since = "1.4.0")]
+ fn to_string_lossy(&self) -> Cow<'_, str>;
+
+ /// Converts a <code>[Box]<[CStr]></code> into a [`CString`] without copying or allocating.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let c_string = CString::new(b"foo".to_vec()).expect("CString::new failed");
+ /// let boxed = c_string.into_boxed_c_str();
+ /// assert_eq!(boxed.into_c_string(), CString::new("foo").expect("CString::new failed"));
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "into_boxed_c_str", since = "1.20.0")]
+ fn into_c_string(self: Box<Self>) -> CString;
+}
+
+#[cfg(bootstrap)]
+#[unstable(feature = "cstr_internals", issue = "none")]
+impl CStrExt for CStr {
+ fn to_string_lossy(&self) -> Cow<'_, str> {
+ String::from_utf8_lossy(self.to_bytes())
+ }
+
+ fn into_c_string(self: Box<Self>) -> CString {
+ CString::from(self)
+ }
+}
+
+#[cfg(not(bootstrap))]
+#[cfg(not(test))]
+impl CStr {
+ /// Converts a `CStr` into a <code>[Cow]<[str]></code>.
+ ///
+ /// If the contents of the `CStr` are valid UTF-8 data, this
+ /// function will return a <code>[Cow]::[Borrowed]\(&[str])</code>
+ /// with the corresponding <code>&[str]</code> slice. Otherwise, it will
+ /// replace any invalid UTF-8 sequences with
+ /// [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD] and return a
+ /// <code>[Cow]::[Owned]\(&[str])</code> with the result.
+ ///
+ /// [str]: prim@str "str"
+ /// [Borrowed]: Cow::Borrowed
+ /// [Owned]: Cow::Owned
+ /// [U+FFFD]: core::char::REPLACEMENT_CHARACTER "std::char::REPLACEMENT_CHARACTER"
+ ///
+ /// # Examples
+ ///
+ /// Calling `to_string_lossy` on a `CStr` containing valid UTF-8:
+ ///
+ /// ```
+ /// use std::borrow::Cow;
+ /// use std::ffi::CStr;
+ ///
+ /// let cstr = CStr::from_bytes_with_nul(b"Hello World\0")
+ /// .expect("CStr::from_bytes_with_nul failed");
+ /// assert_eq!(cstr.to_string_lossy(), Cow::Borrowed("Hello World"));
+ /// ```
+ ///
+ /// Calling `to_string_lossy` on a `CStr` containing invalid UTF-8:
+ ///
+ /// ```
+ /// use std::borrow::Cow;
+ /// use std::ffi::CStr;
+ ///
+ /// let cstr = CStr::from_bytes_with_nul(b"Hello \xF0\x90\x80World\0")
+ /// .expect("CStr::from_bytes_with_nul failed");
+ /// assert_eq!(
+ /// cstr.to_string_lossy(),
+ /// Cow::Owned(String::from("Hello �World")) as Cow<'_, str>
+ /// );
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "cstr_to_str", since = "1.4.0")]
+ pub fn to_string_lossy(&self) -> Cow<'_, str> {
+ String::from_utf8_lossy(self.to_bytes())
+ }
+
+ /// Converts a <code>[Box]<[CStr]></code> into a [`CString`] without copying or allocating.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let c_string = CString::new(b"foo".to_vec()).expect("CString::new failed");
+ /// let boxed = c_string.into_boxed_c_str();
+ /// assert_eq!(boxed.into_c_string(), CString::new("foo").expect("CString::new failed"));
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "into_boxed_c_str", since = "1.20.0")]
+ pub fn into_c_string(self: Box<Self>) -> CString {
+ CString::from(self)
+ }
+}
diff --git a/rust/alloc/ffi/mod.rs b/rust/alloc/ffi/mod.rs
new file mode 100644
index 000000000000..56d429785339
--- /dev/null
+++ b/rust/alloc/ffi/mod.rs
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! Utilities related to FFI bindings.
+//!
+//! This module provides utilities to handle data across non-Rust
+//! interfaces, like other programming languages and the underlying
+//! operating system. It is mainly of use for FFI (Foreign Function
+//! Interface) bindings and code that needs to exchange C-like strings
+//! with other languages.
+//!
+//! # Overview
+//!
+//! Rust represents owned strings with the [`String`] type, and
+//! borrowed slices of strings with the [`str`] primitive. Both are
+//! always in UTF-8 encoding, and may contain nul bytes in the middle,
+//! i.e., if you look at the bytes that make up the string, there may
+//! be a `\0` among them. Both `String` and `str` store their length
+//! explicitly; there are no nul terminators at the end of strings
+//! like in C.
+//!
+//! C strings are different from Rust strings:
+//!
+//! * **Encodings** - Rust strings are UTF-8, but C strings may use
+//! other encodings. If you are using a string from C, you should
+//! check its encoding explicitly, rather than just assuming that it
+//! is UTF-8 like you can do in Rust.
+//!
+//! * **Character size** - C strings may use `char` or `wchar_t`-sized
+//! characters; please **note** that C's `char` is different from Rust's.
+//! The C standard leaves the actual sizes of those types open to
+//! interpretation, but defines different APIs for strings made up of
+//! each character type. Rust strings are always UTF-8, so different
+//! Unicode characters will be encoded in a variable number of bytes
+//! each. The Rust type [`char`] represents a '[Unicode scalar
+//! value]', which is similar to, but not the same as, a '[Unicode
+//! code point]'.
+//!
+//! * **Nul terminators and implicit string lengths** - Often, C
+//! strings are nul-terminated, i.e., they have a `\0` character at the
+//! end. The length of a string buffer is not stored, but has to be
+//! calculated; to compute the length of a string, C code must
+//! manually call a function like `strlen()` for `char`-based strings,
+//! or `wcslen()` for `wchar_t`-based ones. Those functions return
+//! the number of characters in the string excluding the nul
+//! terminator, so the buffer length is really `len+1` characters.
+//! Rust strings don't have a nul terminator; their length is always
+//! stored and does not need to be calculated. While in Rust
+//! accessing a string's length is an *O*(1) operation (because the
+//! length is stored); in C it is an *O*(*n*) operation because the
+//! length needs to be computed by scanning the string for the nul
+//! terminator.
+//!
+//! * **Internal nul characters** - When C strings have a nul
+//! terminator character, this usually means that they cannot have nul
+//! characters in the middle — a nul character would essentially
+//! truncate the string. Rust strings *can* have nul characters in
+//! the middle, because nul does not have to mark the end of the
+//! string in Rust.
+//!
+//! # Representations of non-Rust strings
+//!
+//! [`CString`] and [`CStr`] are useful when you need to transfer
+//! UTF-8 strings to and from languages with a C ABI, like Python.
+//!
+//! * **From Rust to C:** [`CString`] represents an owned, C-friendly
+//! string: it is nul-terminated, and has no internal nul characters.
+//! Rust code can create a [`CString`] out of a normal string (provided
+//! that the string doesn't have nul characters in the middle), and
+//! then use a variety of methods to obtain a raw <code>\*mut [u8]</code> that can
+//! then be passed as an argument to functions which use the C
+//! conventions for strings.
+//!
+//! * **From C to Rust:** [`CStr`] represents a borrowed C string; it
+//! is what you would use to wrap a raw <code>\*const [u8]</code> that you got from
+//! a C function. A [`CStr`] is guaranteed to be a nul-terminated array
+//! of bytes. Once you have a [`CStr`], you can convert it to a Rust
+//! <code>&[str]</code> if it's valid UTF-8, or lossily convert it by adding
+//! replacement characters.
+//!
+//! [`String`]: crate::string::String
+//! [`CStr`]: core::ffi::CStr
+
+#![unstable(feature = "alloc_ffi", issue = "94079")]
+
+#[cfg(bootstrap)]
+#[unstable(feature = "cstr_internals", issue = "none")]
+pub use self::c_str::CStrExt;
+#[unstable(feature = "alloc_c_string", issue = "94079")]
+pub use self::c_str::FromVecWithNulError;
+#[unstable(feature = "alloc_c_string", issue = "94079")]
+pub use self::c_str::{CString, IntoStringError, NulError};
+
+mod c_str;
diff --git a/rust/alloc/fmt.rs b/rust/alloc/fmt.rs
new file mode 100644
index 000000000000..b9c4d2926d23
--- /dev/null
+++ b/rust/alloc/fmt.rs
@@ -0,0 +1,614 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! Utilities for formatting and printing `String`s.
+//!
+//! This module contains the runtime support for the [`format!`] syntax extension.
+//! This macro is implemented in the compiler to emit calls to this module in
+//! order to format arguments at runtime into strings.
+//!
+//! # Usage
+//!
+//! The [`format!`] macro is intended to be familiar to those coming from C's
+//! `printf`/`fprintf` functions or Python's `str.format` function.
+//!
+//! Some examples of the [`format!`] extension are:
+//!
+//! ```
+//! format!("Hello"); // => "Hello"
+//! format!("Hello, {}!", "world"); // => "Hello, world!"
+//! format!("The number is {}", 1); // => "The number is 1"
+//! format!("{:?}", (3, 4)); // => "(3, 4)"
+//! format!("{value}", value=4); // => "4"
+//! let people = "Rustaceans";
+//! format!("Hello {people}!"); // => "Hello Rustaceans!"
+//! format!("{} {}", 1, 2); // => "1 2"
+//! format!("{:04}", 42); // => "0042" with leading zeros
+//! format!("{:#?}", (100, 200)); // => "(
+//! // 100,
+//! // 200,
+//! // )"
+//! ```
+//!
+//! From these, you can see that the first argument is a format string. It is
+//! required by the compiler for this to be a string literal; it cannot be a
+//! variable passed in (in order to perform validity checking). The compiler
+//! will then parse the format string and determine if the list of arguments
+//! provided is suitable to pass to this format string.
+//!
+//! To convert a single value to a string, use the [`to_string`] method. This
+//! will use the [`Display`] formatting trait.
+//!
+//! ## Positional parameters
+//!
+//! Each formatting argument is allowed to specify which value argument it's
+//! referencing, and if omitted it is assumed to be "the next argument". For
+//! example, the format string `{} {} {}` would take three parameters, and they
+//! would be formatted in the same order as they're given. The format string
+//! `{2} {1} {0}`, however, would format arguments in reverse order.
+//!
+//! Things can get a little tricky once you start intermingling the two types of
+//! positional specifiers. The "next argument" specifier can be thought of as an
+//! iterator over the argument. Each time a "next argument" specifier is seen,
+//! the iterator advances. This leads to behavior like this:
+//!
+//! ```
+//! format!("{1} {} {0} {}", 1, 2); // => "2 1 1 2"
+//! ```
+//!
+//! The internal iterator over the argument has not been advanced by the time
+//! the first `{}` is seen, so it prints the first argument. Then upon reaching
+//! the second `{}`, the iterator has advanced forward to the second argument.
+//! Essentially, parameters that explicitly name their argument do not affect
+//! parameters that do not name an argument in terms of positional specifiers.
+//!
+//! A format string is required to use all of its arguments, otherwise it is a
+//! compile-time error. You may refer to the same argument more than once in the
+//! format string.
+//!
+//! ## Named parameters
+//!
+//! Rust itself does not have a Python-like equivalent of named parameters to a
+//! function, but the [`format!`] macro is a syntax extension that allows it to
+//! leverage named parameters. Named parameters are listed at the end of the
+//! argument list and have the syntax:
+//!
+//! ```text
+//! identifier '=' expression
+//! ```
+//!
+//! For example, the following [`format!`] expressions all use named arguments:
+//!
+//! ```
+//! format!("{argument}", argument = "test"); // => "test"
+//! format!("{name} {}", 1, name = 2); // => "2 1"
+//! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b"
+//! ```
+//!
+//! If a named parameter does not appear in the argument list, `format!` will
+//! reference a variable with that name in the current scope.
+//!
+//! ```
+//! let argument = 2 + 2;
+//! format!("{argument}"); // => "4"
+//!
+//! fn make_string(a: u32, b: &str) -> String {
+//! format!("{b} {a}")
+//! }
+//! make_string(927, "label"); // => "label 927"
+//! ```
+//!
+//! It is not valid to put positional parameters (those without names) after
+//! arguments that have names. Like with positional parameters, it is not
+//! valid to provide named parameters that are unused by the format string.
+//!
+//! # Formatting Parameters
+//!
+//! Each argument being formatted can be transformed by a number of formatting
+//! parameters (corresponding to `format_spec` in [the syntax](#syntax)). These
+//! parameters affect the string representation of what's being formatted.
+//!
+//! ## Width
+//!
+//! ```
+//! // All of these print "Hello x !"
+//! println!("Hello {:5}!", "x");
+//! println!("Hello {:1$}!", "x", 5);
+//! println!("Hello {1:0$}!", 5, "x");
+//! println!("Hello {:width$}!", "x", width = 5);
+//! let width = 5;
+//! println!("Hello {:width$}!", "x");
+//! ```
+//!
+//! This is a parameter for the "minimum width" that the format should take up.
+//! If the value's string does not fill up this many characters, then the
+//! padding specified by fill/alignment will be used to take up the required
+//! space (see below).
+//!
+//! The value for the width can also be provided as a [`usize`] in the list of
+//! parameters by adding a postfix `$`, indicating that the second argument is
+//! a [`usize`] specifying the width.
+//!
+//! Referring to an argument with the dollar syntax does not affect the "next
+//! argument" counter, so it's usually a good idea to refer to arguments by
+//! position, or use named arguments.
+//!
+//! ## Fill/Alignment
+//!
+//! ```
+//! assert_eq!(format!("Hello {:<5}!", "x"), "Hello x !");
+//! assert_eq!(format!("Hello {:-<5}!", "x"), "Hello x----!");
+//! assert_eq!(format!("Hello {:^5}!", "x"), "Hello x !");
+//! assert_eq!(format!("Hello {:>5}!", "x"), "Hello x!");
+//! ```
+//!
+//! The optional fill character and alignment is provided normally in conjunction with the
+//! [`width`](#width) parameter. It must be defined before `width`, right after the `:`.
+//! This indicates that if the value being formatted is smaller than
+//! `width` some extra characters will be printed around it.
+//! Filling comes in the following variants for different alignments:
+//!
+//! * `[fill]<` - the argument is left-aligned in `width` columns
+//! * `[fill]^` - the argument is center-aligned in `width` columns
+//! * `[fill]>` - the argument is right-aligned in `width` columns
+//!
+//! The default [fill/alignment](#fillalignment) for non-numerics is a space and
+//! left-aligned. The
+//! default for numeric formatters is also a space character but with right-alignment. If
+//! the `0` flag (see below) is specified for numerics, then the implicit fill character is
+//! `0`.
+//!
+//! Note that alignment might not be implemented by some types. In particular, it
+//! is not generally implemented for the `Debug` trait. A good way to ensure
+//! padding is applied is to format your input, then pad this resulting string
+//! to obtain your output:
+//!
+//! ```
+//! println!("Hello {:^15}!", format!("{:?}", Some("hi"))); // => "Hello Some("hi") !"
+//! ```
+//!
+//! ## Sign/`#`/`0`
+//!
+//! ```
+//! assert_eq!(format!("Hello {:+}!", 5), "Hello +5!");
+//! assert_eq!(format!("{:#x}!", 27), "0x1b!");
+//! assert_eq!(format!("Hello {:05}!", 5), "Hello 00005!");
+//! assert_eq!(format!("Hello {:05}!", -5), "Hello -0005!");
+//! assert_eq!(format!("{:#010x}!", 27), "0x0000001b!");
+//! ```
+//!
+//! These are all flags altering the behavior of the formatter.
+//!
+//! * `+` - This is intended for numeric types and indicates that the sign
+//! should always be printed. Positive signs are never printed by
+//! default, and the negative sign is only printed by default for signed values.
+//! This flag indicates that the correct sign (`+` or `-`) should always be printed.
+//! * `-` - Currently not used
+//! * `#` - This flag indicates that the "alternate" form of printing should
+//! be used. The alternate forms are:
+//! * `#?` - pretty-print the [`Debug`] formatting (adds linebreaks and indentation)
+//! * `#x` - precedes the argument with a `0x`
+//! * `#X` - precedes the argument with a `0x`
+//! * `#b` - precedes the argument with a `0b`
+//! * `#o` - precedes the argument with a `0o`
+//! * `0` - This is used to indicate for integer formats that the padding to `width` should
+//! both be done with a `0` character as well as be sign-aware. A format
+//! like `{:08}` would yield `00000001` for the integer `1`, while the
+//! same format would yield `-0000001` for the integer `-1`. Notice that
+//! the negative version has one fewer zero than the positive version.
+//! Note that padding zeros are always placed after the sign (if any)
+//! and before the digits. When used together with the `#` flag, a similar
+//! rule applies: padding zeros are inserted after the prefix but before
+//! the digits. The prefix is included in the total width.
+//!
+//! ## Precision
+//!
+//! For non-numeric types, this can be considered a "maximum width". If the resulting string is
+//! longer than this width, then it is truncated down to this many characters and that truncated
+//! value is emitted with proper `fill`, `alignment` and `width` if those parameters are set.
+//!
+//! For integral types, this is ignored.
+//!
+//! For floating-point types, this indicates how many digits after the decimal point should be
+//! printed.
+//!
+//! There are three possible ways to specify the desired `precision`:
+//!
+//! 1. An integer `.N`:
+//!
+//! the integer `N` itself is the precision.
+//!
+//! 2. An integer or name followed by dollar sign `.N$`:
+//!
+//! use format *argument* `N` (which must be a `usize`) as the precision.
+//!
+//! 3. An asterisk `.*`:
+//!
+//! `.*` means that this `{...}` is associated with *two* format inputs rather than one:
+//! - If a format string in the fashion of `{:<spec>.*}` is used, then the first input holds
+//! the `usize` precision, and the second holds the value to print.
+//! - If a format string in the fashion of `{<arg>:<spec>.*}` is used, then the `<arg>` part
+//! refers to the value to print, and the `precision` is taken like it was specified with an
+//! omitted positional parameter (`{}` instead of `{<arg>:}`).
+//!
+//! For example, the following calls all print the same thing `Hello x is 0.01000`:
+//!
+//! ```
+//! // Hello {arg 0 ("x")} is {arg 1 (0.01) with precision specified inline (5)}
+//! println!("Hello {0} is {1:.5}", "x", 0.01);
+//!
+//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision specified in arg 0 (5)}
+//! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
+//!
+//! // Hello {arg 0 ("x")} is {arg 2 (0.01) with precision specified in arg 1 (5)}
+//! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
+//!
+//! // Hello {next arg -> arg 0 ("x")} is {second of next two args -> arg 2 (0.01) with precision
+//! // specified in first of next two args -> arg 1 (5)}
+//! println!("Hello {} is {:.*}", "x", 5, 0.01);
+//!
+//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision
+//! // specified in next arg -> arg 0 (5)}
+//! println!("Hello {1} is {2:.*}", 5, "x", 0.01);
+//!
+//! // Hello {next arg -> arg 0 ("x")} is {arg 2 (0.01) with precision
+//! // specified in next arg -> arg 1 (5)}
+//! println!("Hello {} is {2:.*}", "x", 5, 0.01);
+//!
+//! // Hello {next arg -> arg 0 ("x")} is {arg "number" (0.01) with precision specified
+//! // in arg "prec" (5)}
+//! println!("Hello {} is {number:.prec$}", "x", prec = 5, number = 0.01);
+//! ```
+//!
+//! While these:
+//!
+//! ```
+//! println!("{}, `{name:.*}` has 3 fractional digits", "Hello", 3, name=1234.56);
+//! println!("{}, `{name:.*}` has 3 characters", "Hello", 3, name="1234.56");
+//! println!("{}, `{name:>8.*}` has 3 right-aligned characters", "Hello", 3, name="1234.56");
+//! ```
+//!
+//! print three significantly different things:
+//!
+//! ```text
+//! Hello, `1234.560` has 3 fractional digits
+//! Hello, `123` has 3 characters
+//! Hello, ` 123` has 3 right-aligned characters
+//! ```
+//!
+//! ## Localization
+//!
+//! In some programming languages, the behavior of string formatting functions
+//! depends on the operating system's locale setting. The format functions
+//! provided by Rust's standard library do not have any concept of locale and
+//! will produce the same results on all systems regardless of user
+//! configuration.
+//!
+//! For example, the following code will always print `1.5` even if the system
+//! locale uses a decimal separator other than a dot.
+//!
+//! ```
+//! println!("The value is {}", 1.5);
+//! ```
+//!
+//! # Escaping
+//!
+//! The literal characters `{` and `}` may be included in a string by preceding
+//! them with the same character. For example, the `{` character is escaped with
+//! `{{` and the `}` character is escaped with `}}`.
+//!
+//! ```
+//! assert_eq!(format!("Hello {{}}"), "Hello {}");
+//! assert_eq!(format!("{{ Hello"), "{ Hello");
+//! ```
+//!
+//! # Syntax
+//!
+//! To summarize, here you can find the full grammar of format strings.
+//! The syntax for the formatting language used is drawn from other languages,
+//! so it should not be too alien. Arguments are formatted with Python-like
+//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like
+//! `%`. The actual grammar for the formatting syntax is:
+//!
+//! ```text
+//! format_string := text [ maybe_format text ] *
+//! maybe_format := '{' '{' | '}' '}' | format
+//! format := '{' [ argument ] [ ':' format_spec ] [ ws ] * '}'
+//! argument := integer | identifier
+//!
+//! format_spec := [[fill]align][sign]['#']['0'][width]['.' precision]type
+//! fill := character
+//! align := '<' | '^' | '>'
+//! sign := '+' | '-'
+//! width := count
+//! precision := count | '*'
+//! type := '' | '?' | 'x?' | 'X?' | identifier
+//! count := parameter | integer
+//! parameter := argument '$'
+//! ```
+//! In the above grammar,
+//! - `text` must not contain any `'{'` or `'}'` characters,
+//! - `ws` is any character for which [`char::is_whitespace`] returns `true`, has no semantic
+//! meaning and is completely optional,
+//! - `integer` is a decimal integer that may contain leading zeroes and
+//! - `identifier` is an `IDENTIFIER_OR_KEYWORD` (not an `IDENTIFIER`) as defined by the [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html).
+//!
+//! # Formatting traits
+//!
+//! When requesting that an argument be formatted with a particular type, you
+//! are actually requesting that an argument ascribes to a particular trait.
+//! This allows multiple actual types to be formatted via `{:x}` (like [`i8`] as
+//! well as [`isize`]). The current mapping of types to traits is:
+//!
+//! * *nothing* ⇒ [`Display`]
+//! * `?` ⇒ [`Debug`]
+//! * `x?` ⇒ [`Debug`] with lower-case hexadecimal integers
+//! * `X?` ⇒ [`Debug`] with upper-case hexadecimal integers
+//! * `o` ⇒ [`Octal`]
+//! * `x` ⇒ [`LowerHex`]
+//! * `X` ⇒ [`UpperHex`]
+//! * `p` ⇒ [`Pointer`]
+//! * `b` ⇒ [`Binary`]
+//! * `e` ⇒ [`LowerExp`]
+//! * `E` ⇒ [`UpperExp`]
+//!
+//! What this means is that any type of argument which implements the
+//! [`fmt::Binary`][`Binary`] trait can then be formatted with `{:b}`. Implementations
+//! are provided for these traits for a number of primitive types by the
+//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
+//! then the format trait used is the [`Display`] trait.
+//!
+//! When implementing a format trait for your own type, you will have to
+//! implement a method of the signature:
+//!
+//! ```
+//! # #![allow(dead_code)]
+//! # use std::fmt;
+//! # struct Foo; // our custom type
+//! # impl fmt::Display for Foo {
+//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+//! # write!(f, "testing, testing")
+//! # } }
+//! ```
+//!
+//! Your type will be passed as `self` by-reference, and then the function
+//! should emit output into the Formatter `f` which implements `fmt::Write`. It is up to each
+//! format trait implementation to correctly adhere to the requested formatting parameters.
+//! The values of these parameters can be accessed with methods of the
+//! [`Formatter`] struct. In order to help with this, the [`Formatter`] struct also
+//! provides some helper methods.
+//!
+//! Additionally, the return value of this function is [`fmt::Result`] which is a
+//! type alias of <code>[Result]<(), [std::fmt::Error]></code>. Formatting implementations
+//! should ensure that they propagate errors from the [`Formatter`] (e.g., when
+//! calling [`write!`]). However, they should never return errors spuriously. That
+//! is, a formatting implementation must and may only return an error if the
+//! passed-in [`Formatter`] returns an error. This is because, contrary to what
+//! the function signature might suggest, string formatting is an infallible
+//! operation. This function only returns a result because writing to the
+//! underlying stream might fail and it must provide a way to propagate the fact
+//! that an error has occurred back up the stack.
+//!
+//! An example of implementing the formatting traits would look
+//! like:
+//!
+//! ```
+//! use std::fmt;
+//!
+//! #[derive(Debug)]
+//! struct Vector2D {
+//! x: isize,
+//! y: isize,
+//! }
+//!
+//! impl fmt::Display for Vector2D {
+//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+//! // The `f` value implements the `Write` trait, which is what the
+//! // write! macro is expecting. Note that this formatting ignores the
+//! // various flags provided to format strings.
+//! write!(f, "({}, {})", self.x, self.y)
+//! }
+//! }
+//!
+//! // Different traits allow different forms of output of a type. The meaning
+//! // of this format is to print the magnitude of a vector.
+//! impl fmt::Binary for Vector2D {
+//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
+//! let magnitude = magnitude.sqrt();
+//!
+//! // Respect the formatting flags by using the helper method
+//! // `pad_integral` on the Formatter object. See the method
+//! // documentation for details, and the function `pad` can be used
+//! // to pad strings.
+//! let decimals = f.precision().unwrap_or(3);
+//! let string = format!("{:.*}", decimals, magnitude);
+//! f.pad_integral(true, "", &string)
+//! }
+//! }
+//!
+//! fn main() {
+//! let myvector = Vector2D { x: 3, y: 4 };
+//!
+//! println!("{myvector}"); // => "(3, 4)"
+//! println!("{myvector:?}"); // => "Vector2D {x: 3, y:4}"
+//! println!("{myvector:10.3b}"); // => " 5.000"
+//! }
+//! ```
+//!
+//! ### `fmt::Display` vs `fmt::Debug`
+//!
+//! These two formatting traits have distinct purposes:
+//!
+//! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully
+//! represented as a UTF-8 string at all times. It is **not** expected that
+//! all types implement the [`Display`] trait.
+//! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types.
+//! Output will typically represent the internal state as faithfully as possible.
+//! The purpose of the [`Debug`] trait is to facilitate debugging Rust code. In
+//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
+//!
+//! Some examples of the output from both traits:
+//!
+//! ```
+//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
+//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
+//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
+//! ```
+//!
+//! # Related macros
+//!
+//! There are a number of related macros in the [`format!`] family. The ones that
+//! are currently implemented are:
+//!
+//! ```ignore (only-for-syntax-highlight)
+//! format! // described above
+//! write! // first argument is either a &mut io::Write or a &mut fmt::Write, the destination
+//! writeln! // same as write but appends a newline
+//! print! // the format string is printed to the standard output
+//! println! // same as print but appends a newline
+//! eprint! // the format string is printed to the standard error
+//! eprintln! // same as eprint but appends a newline
+//! format_args! // described below.
+//! ```
+//!
+//! ### `write!`
+//!
+//! [`write!`] and [`writeln!`] are two macros which are used to emit the format string
+//! to a specified stream. This is used to prevent intermediate allocations of
+//! format strings and instead directly write the output. Under the hood, this
+//! function is actually invoking the [`write_fmt`] function defined on the
+//! [`std::io::Write`] and the [`std::fmt::Write`] trait. Example usage is:
+//!
+//! ```
+//! # #![allow(unused_must_use)]
+//! use std::io::Write;
+//! let mut w = Vec::new();
+//! write!(&mut w, "Hello {}!", "world");
+//! ```
+//!
+//! ### `print!`
+//!
+//! This and [`println!`] emit their output to stdout. Similarly to the [`write!`]
+//! macro, the goal of these macros is to avoid intermediate allocations when
+//! printing output. Example usage is:
+//!
+//! ```
+//! print!("Hello {}!", "world");
+//! println!("I have a newline {}", "character at the end");
+//! ```
+//! ### `eprint!`
+//!
+//! The [`eprint!`] and [`eprintln!`] macros are identical to
+//! [`print!`] and [`println!`], respectively, except they emit their
+//! output to stderr.
+//!
+//! ### `format_args!`
+//!
+//! [`format_args!`] is a curious macro used to safely pass around
+//! an opaque object describing the format string. This object
+//! does not require any heap allocations to create, and it only
+//! references information on the stack. Under the hood, all of
+//! the related macros are implemented in terms of this. First
+//! off, some example usage is:
+//!
+//! ```
+//! # #![allow(unused_must_use)]
+//! use std::fmt;
+//! use std::io::{self, Write};
+//!
+//! let mut some_writer = io::stdout();
+//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
+//!
+//! fn my_fmt_fn(args: fmt::Arguments) {
+//! write!(&mut io::stdout(), "{}", args);
+//! }
+//! my_fmt_fn(format_args!(", or a {} too", "function"));
+//! ```
+//!
+//! The result of the [`format_args!`] macro is a value of type [`fmt::Arguments`].
+//! This structure can then be passed to the [`write`] and [`format`] functions
+//! inside this module in order to process the format string.
+//! The goal of this macro is to even further prevent intermediate allocations
+//! when dealing with formatting strings.
+//!
+//! For example, a logging library could use the standard formatting syntax, but
+//! it would internally pass around this structure until it has been determined
+//! where output should go to.
+//!
+//! [`fmt::Result`]: Result "fmt::Result"
+//! [Result]: core::result::Result "std::result::Result"
+//! [std::fmt::Error]: Error "fmt::Error"
+//! [`write`]: write() "fmt::write"
+//! [`to_string`]: crate::string::ToString::to_string "ToString::to_string"
+//! [`write_fmt`]: ../../std/io/trait.Write.html#method.write_fmt
+//! [`std::io::Write`]: ../../std/io/trait.Write.html
+//! [`std::fmt::Write`]: ../../std/fmt/trait.Write.html
+//! [`print!`]: ../../std/macro.print.html "print!"
+//! [`println!`]: ../../std/macro.println.html "println!"
+//! [`eprint!`]: ../../std/macro.eprint.html "eprint!"
+//! [`eprintln!`]: ../../std/macro.eprintln.html "eprintln!"
+//! [`format_args!`]: ../../std/macro.format_args.html "format_args!"
+//! [`fmt::Arguments`]: Arguments "fmt::Arguments"
+//! [`format`]: format() "fmt::format"
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[unstable(feature = "fmt_internals", issue = "none")]
+pub use core::fmt::rt;
+#[stable(feature = "fmt_flags_align", since = "1.28.0")]
+pub use core::fmt::Alignment;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::Error;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{write, ArgumentV1, Arguments};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{Binary, Octal};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{Debug, Display};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{Formatter, Result, Write};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{LowerExp, UpperExp};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{LowerHex, Pointer, UpperHex};
+
+#[cfg(not(no_global_oom_handling))]
+use crate::string;
+
+/// The `format` function takes an [`Arguments`] struct and returns the resulting
+/// formatted string.
+///
+/// The [`Arguments`] instance can be created with the [`format_args!`] macro.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::fmt;
+///
+/// let s = fmt::format(format_args!("Hello, {}!", "world"));
+/// assert_eq!(s, "Hello, world!");
+/// ```
+///
+/// Please note that using [`format!`] might be preferable.
+/// Example:
+///
+/// ```
+/// let s = format!("Hello, {}!", "world");
+/// assert_eq!(s, "Hello, world!");
+/// ```
+///
+/// [`format_args!`]: core::format_args
+/// [`format!`]: crate::format
+#[cfg(not(no_global_oom_handling))]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn format(args: Arguments<'_>) -> string::String {
+ let capacity = args.estimated_capacity();
+ let mut output = string::String::with_capacity(capacity);
+ output.write_fmt(args).expect("a formatting trait implementation returned an error");
+ output
+}
diff --git a/rust/alloc/lib.rs b/rust/alloc/lib.rs
new file mode 100644
index 000000000000..03d2ce1df814
--- /dev/null
+++ b/rust/alloc/lib.rs
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! # The Rust core allocation and collections library
+//!
+//! This library provides smart pointers and collections for managing
+//! heap-allocated values.
+//!
+//! This library, like libcore, normally doesn’t need to be used directly
+//! since its contents are re-exported in the [`std` crate](../std/index.html).
+//! Crates that use the `#![no_std]` attribute however will typically
+//! not depend on `std`, so they’d use this crate instead.
+//!
+//! ## Boxed values
+//!
+//! The [`Box`] type is a smart pointer type. There can only be one owner of a
+//! [`Box`], and the owner can decide to mutate the contents, which live on the
+//! heap.
+//!
+//! This type can be sent among threads efficiently as the size of a `Box` value
+//! is the same as that of a pointer. Tree-like data structures are often built
+//! with boxes because each node often has only one owner, the parent.
+//!
+//! ## Reference counted pointers
+//!
+//! The [`Rc`] type is a non-threadsafe reference-counted pointer type intended
+//! for sharing memory within a thread. An [`Rc`] pointer wraps a type, `T`, and
+//! only allows access to `&T`, a shared reference.
+//!
+//! This type is useful when inherited mutability (such as using [`Box`]) is too
+//! constraining for an application, and is often paired with the [`Cell`] or
+//! [`RefCell`] types in order to allow mutation.
+//!
+//! ## Atomically reference counted pointers
+//!
+//! The [`Arc`] type is the threadsafe equivalent of the [`Rc`] type. It
+//! provides all the same functionality of [`Rc`], except it requires that the
+//! contained type `T` is shareable. Additionally, [`Arc<T>`][`Arc`] is itself
+//! sendable while [`Rc<T>`][`Rc`] is not.
+//!
+//! This type allows for shared access to the contained data, and is often
+//! paired with synchronization primitives such as mutexes to allow mutation of
+//! shared resources.
+//!
+//! ## Collections
+//!
+//! Implementations of the most common general purpose data structures are
+//! defined in this library. They are re-exported through the
+//! [standard collections library](../std/collections/index.html).
+//!
+//! ## Heap interfaces
+//!
+//! The [`alloc`](alloc/index.html) module defines the low-level interface to the
+//! default global allocator. It is not compatible with the libc allocator API.
+//!
+//! [`Arc`]: sync
+//! [`Box`]: boxed
+//! [`Cell`]: core::cell
+//! [`Rc`]: rc
+//! [`RefCell`]: core::cell
+
+// To run liballoc tests without x.py without ending up with two copies of liballoc, Miri needs to be
+// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
+// rustc itself never sets the feature, so this line has no affect there.
+#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
+#![allow(unused_attributes)]
+#![stable(feature = "alloc", since = "1.36.0")]
+#![doc(
+ html_playground_url = "https://play.rust-lang.org/",
+ issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/",
+ test(no_crate_inject, attr(allow(unused_variables), deny(warnings)))
+)]
+#![doc(cfg_hide(
+ not(test),
+ not(any(test, bootstrap)),
+ any(not(feature = "miri-test-libstd"), test, doctest),
+ no_global_oom_handling,
+ not(no_global_oom_handling),
+ target_has_atomic = "ptr"
+))]
+#![no_std]
+#![needs_allocator]
+//
+// Lints:
+#![deny(unsafe_op_in_unsafe_fn)]
+#![warn(deprecated_in_future)]
+#![warn(missing_debug_implementations)]
+#![warn(missing_docs)]
+#![allow(explicit_outlives_requirements)]
+//
+// Library features:
+#![cfg_attr(not(no_global_oom_handling), feature(alloc_c_string))]
+#![feature(alloc_layout_extra)]
+#![feature(allocator_api)]
+#![feature(array_chunks)]
+#![feature(array_methods)]
+#![feature(array_windows)]
+#![feature(assert_matches)]
+#![feature(async_iterator)]
+#![feature(coerce_unsized)]
+#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
+#![feature(const_box)]
+#![cfg_attr(not(no_global_oom_handling), feature(const_btree_new))]
+#![feature(const_cow_is_borrowed)]
+#![feature(const_convert)]
+#![feature(const_size_of_val)]
+#![feature(const_align_of_val)]
+#![feature(const_ptr_read)]
+#![feature(const_maybe_uninit_write)]
+#![feature(const_maybe_uninit_as_mut_ptr)]
+#![feature(const_refs_to_cell)]
+#![feature(core_c_str)]
+#![feature(core_intrinsics)]
+#![feature(core_ffi_c)]
+#![feature(const_eval_select)]
+#![feature(const_pin)]
+#![feature(cstr_from_bytes_until_nul)]
+#![feature(dispatch_from_dyn)]
+#![feature(exact_size_is_empty)]
+#![feature(extend_one)]
+#![feature(fmt_internals)]
+#![feature(fn_traits)]
+#![feature(hasher_prefixfree_extras)]
+#![feature(inplace_iteration)]
+#![feature(iter_advance_by)]
+#![feature(layout_for_ptr)]
+#![feature(maybe_uninit_slice)]
+#![cfg_attr(test, feature(new_uninit))]
+#![feature(nonnull_slice_from_raw_parts)]
+#![feature(pattern)]
+#![feature(ptr_internals)]
+#![feature(ptr_metadata)]
+#![feature(ptr_sub_ptr)]
+#![feature(receiver_trait)]
+#![feature(set_ptr_value)]
+#![feature(slice_group_by)]
+#![feature(slice_ptr_get)]
+#![feature(slice_ptr_len)]
+#![feature(slice_range)]
+#![feature(str_internals)]
+#![feature(strict_provenance)]
+#![feature(trusted_len)]
+#![feature(trusted_random_access)]
+#![feature(try_trait_v2)]
+#![feature(unchecked_math)]
+#![feature(unicode_internals)]
+#![feature(unsize)]
+//
+// Language features:
+#![feature(allocator_internals)]
+#![feature(allow_internal_unstable)]
+#![feature(associated_type_bounds)]
+#![feature(box_syntax)]
+#![feature(cfg_sanitize)]
+#![feature(const_deref)]
+#![feature(const_mut_refs)]
+#![feature(const_ptr_write)]
+#![feature(const_precise_live_drops)]
+#![feature(const_trait_impl)]
+#![feature(const_try)]
+#![feature(dropck_eyepatch)]
+#![feature(exclusive_range_pattern)]
+#![feature(fundamental)]
+#![cfg_attr(not(test), feature(generator_trait))]
+#![feature(hashmap_internals)]
+#![feature(lang_items)]
+#![feature(let_else)]
+#![feature(min_specialization)]
+#![feature(negative_impls)]
+#![feature(never_type)]
+#![feature(nll)] // Not necessary, but here to test the `nll` feature.
+#![feature(rustc_allow_const_fn_unstable)]
+#![feature(rustc_attrs)]
+#![feature(slice_internals)]
+#![feature(staged_api)]
+#![cfg_attr(test, feature(test))]
+#![feature(unboxed_closures)]
+#![feature(unsized_fn_params)]
+#![feature(c_unwind)]
+//
+// Rustdoc features:
+#![feature(doc_cfg)]
+#![feature(doc_cfg_hide)]
+// Technically, this is a bug in rustdoc: rustdoc sees the documentation on `#[lang = slice_alloc]`
+// blocks is for `&[T]`, which also has documentation using this feature in `core`, and gets mad
+// that the feature-gate isn't enabled. Ideally, it wouldn't check for the feature gate for docs
+// from other crates, but since this can only appear for lang items, it doesn't seem worth fixing.
+#![feature(intra_doc_pointers)]
+
+// Allow testing this library
+#[cfg(test)]
+#[macro_use]
+extern crate std;
+#[cfg(test)]
+extern crate test;
+
+// Module with internal macros used by other modules (needs to be included before other modules).
+#[macro_use]
+mod macros;
+
+mod raw_vec;
+
+// Heaps provided for low-level allocation strategies
+
+pub mod alloc;
+
+// Primitive types using the heaps above
+
+// Need to conditionally define the mod from `boxed.rs` to avoid
+// duplicating the lang-items when building in test cfg; but also need
+// to allow code to have `use boxed::Box;` declarations.
+#[cfg(not(test))]
+pub mod boxed;
+#[cfg(test)]
+mod boxed {
+ pub use std::boxed::Box;
+}
+pub mod borrow;
+pub mod collections;
+#[cfg(not(no_global_oom_handling))]
+pub mod ffi;
+pub mod fmt;
+#[cfg(not(no_rc))]
+pub mod rc;
+pub mod slice;
+pub mod str;
+pub mod string;
+#[cfg(all(not(no_sync), target_has_atomic = "ptr"))]
+pub mod sync;
+#[cfg(all(not(no_global_oom_handling), target_has_atomic = "ptr"))]
+pub mod task;
+#[cfg(test)]
+mod tests;
+pub mod vec;
+
+#[doc(hidden)]
+#[unstable(feature = "liballoc_internals", issue = "none", reason = "implementation detail")]
+pub mod __export {
+ pub use core::format_args;
+}
diff --git a/rust/alloc/macros.rs b/rust/alloc/macros.rs
new file mode 100644
index 000000000000..fa7bacfd5847
--- /dev/null
+++ b/rust/alloc/macros.rs
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+/// Creates a [`Vec`] containing the arguments.
+///
+/// `vec!` allows `Vec`s to be defined with the same syntax as array expressions.
+/// There are two forms of this macro:
+///
+/// - Create a [`Vec`] containing a given list of elements:
+///
+/// ```
+/// let v = vec![1, 2, 3];
+/// assert_eq!(v[0], 1);
+/// assert_eq!(v[1], 2);
+/// assert_eq!(v[2], 3);
+/// ```
+///
+/// - Create a [`Vec`] from a given element and size:
+///
+/// ```
+/// let v = vec![1; 3];
+/// assert_eq!(v, [1, 1, 1]);
+/// ```
+///
+/// Note that unlike array expressions this syntax supports all elements
+/// which implement [`Clone`] and the number of elements doesn't have to be
+/// a constant.
+///
+/// This will use `clone` to duplicate an expression, so one should be careful
+/// using this with types having a nonstandard `Clone` implementation. For
+/// example, `vec![Rc::new(1); 5]` will create a vector of five references
+/// to the same boxed integer value, not five references pointing to independently
+/// boxed integers.
+///
+/// Also, note that `vec![expr; 0]` is allowed, and produces an empty vector.
+/// This will still evaluate `expr`, however, and immediately drop the resulting value, so
+/// be mindful of side effects.
+///
+/// [`Vec`]: crate::vec::Vec
+#[cfg(all(not(no_global_oom_handling), not(test)))]
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_diagnostic_item = "vec_macro"]
+#[allow_internal_unstable(box_syntax, liballoc_internals)]
+macro_rules! vec {
+ () => (
+ $crate::__rust_force_expr!($crate::vec::Vec::new())
+ );
+ ($elem:expr; $n:expr) => (
+ $crate::__rust_force_expr!($crate::vec::from_elem($elem, $n))
+ );
+ ($($x:expr),+ $(,)?) => (
+ $crate::__rust_force_expr!(<[_]>::into_vec(box [$($x),+]))
+ );
+}
+
+// HACK(japaric): with cfg(test) the inherent `[T]::into_vec` method, which is
+// required for this macro definition, is not available. Instead use the
+// `slice::into_vec` function which is only available with cfg(test)
+// NB see the slice::hack module in slice.rs for more information
+#[cfg(all(not(no_global_oom_handling), test))]
+#[cfg_attr(not(bootstrap), allow(unused_macro_rules))]
+macro_rules! vec {
+ () => (
+ $crate::vec::Vec::new()
+ );
+ ($elem:expr; $n:expr) => (
+ $crate::vec::from_elem($elem, $n)
+ );
+ ($($x:expr),*) => (
+ $crate::slice::into_vec(box [$($x),*])
+ );
+ ($($x:expr,)*) => (vec![$($x),*])
+}
+
+/// Creates a `String` using interpolation of runtime expressions.
+///
+/// The first argument `format!` receives is a format string. This must be a string
+/// literal. The power of the formatting string is in the `{}`s contained.
+///
+/// Additional parameters passed to `format!` replace the `{}`s within the
+/// formatting string in the order given unless named or positional parameters
+/// are used; see [`std::fmt`] for more information.
+///
+/// A common use for `format!` is concatenation and interpolation of strings.
+/// The same convention is used with [`print!`] and [`write!`] macros,
+/// depending on the intended destination of the string.
+///
+/// To convert a single value to a string, use the [`to_string`] method. This
+/// will use the [`Display`] formatting trait.
+///
+/// [`std::fmt`]: ../std/fmt/index.html
+/// [`print!`]: ../std/macro.print.html
+/// [`write!`]: core::write
+/// [`to_string`]: crate::string::ToString
+/// [`Display`]: core::fmt::Display
+///
+/// # Panics
+///
+/// `format!` panics if a formatting trait implementation returns an error.
+/// This indicates an incorrect implementation
+/// since `fmt::Write for String` never returns an error itself.
+///
+/// # Examples
+///
+/// ```
+/// format!("test");
+/// format!("hello {}", "world!");
+/// format!("x = {}, y = {y}", 10, y = 30);
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "format_macro")]
+macro_rules! format {
+ ($($arg:tt)*) => {{
+ let res = $crate::fmt::format($crate::__export::format_args!($($arg)*));
+ res
+ }}
+}
+
+/// Force AST node to an expression to improve diagnostics in pattern position.
+#[doc(hidden)]
+#[macro_export]
+#[unstable(feature = "liballoc_internals", issue = "none", reason = "implementation detail")]
+macro_rules! __rust_force_expr {
+ ($e:expr) => {
+ $e
+ };
+}
diff --git a/rust/alloc/raw_vec.rs b/rust/alloc/raw_vec.rs
new file mode 100644
index 000000000000..59e353bfe5d3
--- /dev/null
+++ b/rust/alloc/raw_vec.rs
@@ -0,0 +1,567 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+#![unstable(feature = "raw_vec_internals", reason = "unstable const warnings", issue = "none")]
+
+use core::alloc::LayoutError;
+use core::cmp;
+use core::intrinsics;
+use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::ops::Drop;
+use core::ptr::{self, NonNull, Unique};
+use core::slice;
+
+#[cfg(not(no_global_oom_handling))]
+use crate::alloc::handle_alloc_error;
+use crate::alloc::{Allocator, Global, Layout};
+use crate::boxed::Box;
+use crate::collections::TryReserveError;
+use crate::collections::TryReserveErrorKind::*;
+
+#[cfg(test)]
+mod tests;
+
+enum AllocInit {
+ /// The contents of the new memory are uninitialized.
+ Uninitialized,
+ #[allow(dead_code)]
+ /// The new memory is guaranteed to be zeroed.
+ Zeroed,
+}
+
+/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
+/// a buffer of memory on the heap without having to worry about all the corner cases
+/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
+/// In particular:
+///
+/// * Produces `Unique::dangling()` on zero-sized types.
+/// * Produces `Unique::dangling()` on zero-length allocations.
+/// * Avoids freeing `Unique::dangling()`.
+/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
+/// * Guards against 32-bit systems allocating more than isize::MAX bytes.
+/// * Guards against overflowing your length.
+/// * Calls `handle_alloc_error` for fallible allocations.
+/// * Contains a `ptr::Unique` and thus endows the user with all related benefits.
+/// * Uses the excess returned from the allocator to use the largest available capacity.
+///
+/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
+/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
+/// to handle the actual things *stored* inside of a `RawVec`.
+///
+/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns
+/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
+/// `Box<[T]>`, since `capacity()` won't yield the length.
+#[allow(missing_debug_implementations)]
+pub(crate) struct RawVec<T, A: Allocator = Global> {
+ ptr: Unique<T>,
+ cap: usize,
+ alloc: A,
+}
+
+impl<T> RawVec<T, Global> {
+ /// HACK(Centril): This exists because stable `const fn` can only call stable `const fn`, so
+ /// they cannot call `Self::new()`.
+ ///
+ /// If you change `RawVec<T>::new` or dependencies, please take care to not introduce anything
+ /// that would truly const-call something unstable.
+ pub const NEW: Self = Self::new();
+
+ /// Creates the biggest possible `RawVec` (on the system heap)
+ /// without allocating. If `T` has positive size, then this makes a
+ /// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a
+ /// `RawVec` with capacity `usize::MAX`. Useful for implementing
+ /// delayed allocation.
+ #[must_use]
+ pub const fn new() -> Self {
+ Self::new_in(Global)
+ }
+
+ /// Creates a `RawVec` (on the system heap) with exactly the
+ /// capacity and alignment requirements for a `[T; capacity]`. This is
+ /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is
+ /// zero-sized. Note that if `T` is zero-sized this means you will
+ /// *not* get a `RawVec` with the requested capacity.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the requested capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM.
+ #[cfg(not(any(no_global_oom_handling, test)))]
+ #[must_use]
+ #[inline]
+ pub fn with_capacity(capacity: usize) -> Self {
+ Self::with_capacity_in(capacity, Global)
+ }
+
+ /// Like `with_capacity`, but guarantees the buffer is zeroed.
+ #[cfg(not(any(no_global_oom_handling, test)))]
+ #[must_use]
+ #[inline]
+ pub fn with_capacity_zeroed(capacity: usize) -> Self {
+ Self::with_capacity_zeroed_in(capacity, Global)
+ }
+}
+
+impl<T, A: Allocator> RawVec<T, A> {
+ // Tiny Vecs are dumb. Skip to:
+ // - 8 if the element size is 1, because any heap allocators is likely
+ // to round up a request of less than 8 bytes to at least 8 bytes.
+ // - 4 if elements are moderate-sized (<= 1 KiB).
+ // - 1 otherwise, to avoid wasting too much space for very short Vecs.
+ pub(crate) const MIN_NON_ZERO_CAP: usize = if mem::size_of::<T>() == 1 {
+ 8
+ } else if mem::size_of::<T>() <= 1024 {
+ 4
+ } else {
+ 1
+ };
+
+ /// Like `new`, but parameterized over the choice of allocator for
+ /// the returned `RawVec`.
+ pub const fn new_in(alloc: A) -> Self {
+ // `cap: 0` means "unallocated". zero-sized types are ignored.
+ Self { ptr: Unique::dangling(), cap: 0, alloc }
+ }
+
+ /// Like `with_capacity`, but parameterized over the choice of
+ /// allocator for the returned `RawVec`.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
+ Self::allocate_in(capacity, AllocInit::Uninitialized, alloc)
+ }
+
+ /// Like `try_with_capacity`, but parameterized over the choice of
+ /// allocator for the returned `RawVec`.
+ #[inline]
+ pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
+ Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc)
+ }
+
+ /// Like `with_capacity_zeroed`, but parameterized over the choice
+ /// of allocator for the returned `RawVec`.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
+ Self::allocate_in(capacity, AllocInit::Zeroed, alloc)
+ }
+
+ /// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
+ ///
+ /// Note that this will correctly reconstitute any `cap` changes
+ /// that may have been performed. (See description of type for details.)
+ ///
+ /// # Safety
+ ///
+ /// * `len` must be greater than or equal to the most recently requested capacity, and
+ /// * `len` must be less than or equal to `self.capacity()`.
+ ///
+ /// Note, that the requested capacity and `self.capacity()` could differ, as
+ /// an allocator could overallocate and return a greater memory block than requested.
+ pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>], A> {
+ // Sanity-check one half of the safety requirement (we cannot check the other half).
+ debug_assert!(
+ len <= self.capacity(),
+ "`len` must be smaller than or equal to `self.capacity()`"
+ );
+
+ let me = ManuallyDrop::new(self);
+ unsafe {
+ let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len);
+ Box::from_raw_in(slice, ptr::read(&me.alloc))
+ }
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
+ // Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
+ if mem::size_of::<T>() == 0 || capacity == 0 {
+ Self::new_in(alloc)
+ } else {
+ // We avoid `unwrap_or_else` here because it bloats the amount of
+ // LLVM IR generated.
+ let layout = match Layout::array::<T>(capacity) {
+ Ok(layout) => layout,
+ Err(_) => capacity_overflow(),
+ };
+ match alloc_guard(layout.size()) {
+ Ok(_) => {}
+ Err(_) => capacity_overflow(),
+ }
+ let result = match init {
+ AllocInit::Uninitialized => alloc.allocate(layout),
+ AllocInit::Zeroed => alloc.allocate_zeroed(layout),
+ };
+ let ptr = match result {
+ Ok(ptr) => ptr,
+ Err(_) => handle_alloc_error(layout),
+ };
+
+ // Allocators currently return a `NonNull<[u8]>` whose length
+ // matches the size requested. If that ever changes, the capacity
+ // here should change to `ptr.len() / mem::size_of::<T>()`.
+ Self {
+ ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
+ cap: capacity,
+ alloc,
+ }
+ }
+ }
+
+ fn try_allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Result<Self, TryReserveError> {
+ // Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
+ if mem::size_of::<T>() == 0 || capacity == 0 {
+ return Ok(Self::new_in(alloc));
+ }
+
+ let layout = Layout::array::<T>(capacity).map_err(|_| CapacityOverflow)?;
+ alloc_guard(layout.size())?;
+ let result = match init {
+ AllocInit::Uninitialized => alloc.allocate(layout),
+ AllocInit::Zeroed => alloc.allocate_zeroed(layout),
+ };
+ let ptr = result.map_err(|_| AllocError { layout, non_exhaustive: () })?;
+
+ // Allocators currently return a `NonNull<[u8]>` whose length
+ // matches the size requested. If that ever changes, the capacity
+ // here should change to `ptr.len() / mem::size_of::<T>()`.
+ Ok(Self {
+ ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
+ cap: capacity,
+ alloc,
+ })
+ }
+
+ /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
+ ///
+ /// # Safety
+ ///
+ /// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
+ /// `capacity`.
+ /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
+ /// systems). ZST vectors may have a capacity up to `usize::MAX`.
+ /// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
+ /// guaranteed.
+ #[inline]
+ pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
+ Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap: capacity, alloc }
+ }
+
+ /// Gets a raw pointer to the start of the allocation. Note that this is
+ /// `Unique::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
+ /// be careful.
+ #[inline]
+ pub fn ptr(&self) -> *mut T {
+ self.ptr.as_ptr()
+ }
+
+ /// Gets the capacity of the allocation.
+ ///
+ /// This will always be `usize::MAX` if `T` is zero-sized.
+ #[inline(always)]
+ pub fn capacity(&self) -> usize {
+ if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap }
+ }
+
+ /// Returns a shared reference to the allocator backing this `RawVec`.
+ pub fn allocator(&self) -> &A {
+ &self.alloc
+ }
+
+ fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
+ if mem::size_of::<T>() == 0 || self.cap == 0 {
+ None
+ } else {
+ // We have an allocated chunk of memory, so we can bypass runtime
+ // checks to get our current layout.
+ unsafe {
+ let layout = Layout::array::<T>(self.cap).unwrap_unchecked();
+ Some((self.ptr.cast().into(), layout))
+ }
+ }
+ }
+
+ /// Ensures that the buffer contains at least enough space to hold `len +
+ /// additional` elements. If it doesn't already have enough capacity, will
+ /// reallocate enough space plus comfortable slack space to get amortized
+ /// *O*(1) behavior. Will limit this behavior if it would needlessly cause
+ /// itself to panic.
+ ///
+ /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
+ /// the requested space. This is not really unsafe, but the unsafe
+ /// code *you* write that relies on the behavior of this function may break.
+ ///
+ /// This is ideal for implementing a bulk-push operation like `extend`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ pub fn reserve(&mut self, len: usize, additional: usize) {
+ // Callers expect this function to be very cheap when there is already sufficient capacity.
+ // Therefore, we move all the resizing and error-handling logic from grow_amortized and
+ // handle_reserve behind a call, while making sure that this function is likely to be
+ // inlined as just a comparison and a call if the comparison fails.
+ #[cold]
+ fn do_reserve_and_handle<T, A: Allocator>(
+ slf: &mut RawVec<T, A>,
+ len: usize,
+ additional: usize,
+ ) {
+ handle_reserve(slf.grow_amortized(len, additional));
+ }
+
+ if self.needs_to_grow(len, additional) {
+ do_reserve_and_handle(self, len, additional);
+ }
+ }
+
+ /// A specialized version of `reserve()` used only by the hot and
+ /// oft-instantiated `Vec::push()`, which does its own capacity check.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(never)]
+ pub fn reserve_for_push(&mut self, len: usize) {
+ handle_reserve(self.grow_amortized(len, 1));
+ }
+
+ /// The same as `reserve`, but returns on errors instead of panicking or aborting.
+ pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
+ if self.needs_to_grow(len, additional) {
+ self.grow_amortized(len, additional)
+ } else {
+ Ok(())
+ }
+ }
+
+ /// The same as `reserve_for_push`, but returns on errors instead of panicking or aborting.
+ #[inline(never)]
+ pub fn try_reserve_for_push(&mut self, len: usize) -> Result<(), TryReserveError> {
+ self.grow_amortized(len, 1)
+ }
+
+ /// Ensures that the buffer contains at least enough space to hold `len +
+ /// additional` elements. If it doesn't already, will reallocate the
+ /// minimum possible amount of memory necessary. Generally this will be
+ /// exactly the amount of memory necessary, but in principle the allocator
+ /// is free to give back more than we asked for.
+ ///
+ /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
+ /// the requested space. This is not really unsafe, but the unsafe code
+ /// *you* write that relies on the behavior of this function may break.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM.
+ #[cfg(not(no_global_oom_handling))]
+ pub fn reserve_exact(&mut self, len: usize, additional: usize) {
+ handle_reserve(self.try_reserve_exact(len, additional));
+ }
+
+ /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
+ pub fn try_reserve_exact(
+ &mut self,
+ len: usize,
+ additional: usize,
+ ) -> Result<(), TryReserveError> {
+ if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) }
+ }
+
+ /// Shrinks the buffer down to the specified capacity. If the given amount
+ /// is 0, actually completely deallocates.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the given amount is *larger* than the current capacity.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM.
+ #[cfg(not(no_global_oom_handling))]
+ pub fn shrink_to_fit(&mut self, cap: usize) {
+ handle_reserve(self.shrink(cap));
+ }
+
+ /// Tries to shrink the buffer down to the specified capacity. If the given amount
+ /// is 0, actually completely deallocates.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the given amount is *larger* than the current capacity.
+ pub fn try_shrink_to_fit(&mut self, cap: usize) -> Result<(), TryReserveError> {
+ self.shrink(cap)
+ }
+}
+
+impl<T, A: Allocator> RawVec<T, A> {
+ /// Returns if the buffer needs to grow to fulfill the needed extra capacity.
+ /// Mainly used to make inlining reserve-calls possible without inlining `grow`.
+ fn needs_to_grow(&self, len: usize, additional: usize) -> bool {
+ additional > self.capacity().wrapping_sub(len)
+ }
+
+ fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
+ // Allocators currently return a `NonNull<[u8]>` whose length matches
+ // the size requested. If that ever changes, the capacity here should
+ // change to `ptr.len() / mem::size_of::<T>()`.
+ self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) };
+ self.cap = cap;
+ }
+
+ // This method is usually instantiated many times. So we want it to be as
+ // small as possible, to improve compile times. But we also want as much of
+ // its contents to be statically computable as possible, to make the
+ // generated code run faster. Therefore, this method is carefully written
+ // so that all of the code that depends on `T` is within it, while as much
+ // of the code that doesn't depend on `T` as possible is in functions that
+ // are non-generic over `T`.
+ fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
+ // This is ensured by the calling contexts.
+ debug_assert!(additional > 0);
+
+ if mem::size_of::<T>() == 0 {
+ // Since we return a capacity of `usize::MAX` when `elem_size` is
+ // 0, getting to here necessarily means the `RawVec` is overfull.
+ return Err(CapacityOverflow.into());
+ }
+
+ // Nothing we can really do about these checks, sadly.
+ let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
+
+ // This guarantees exponential growth. The doubling cannot overflow
+ // because `cap <= isize::MAX` and the type of `cap` is `usize`.
+ let cap = cmp::max(self.cap * 2, required_cap);
+ let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap);
+
+ let new_layout = Layout::array::<T>(cap);
+
+ // `finish_grow` is non-generic over `T`.
+ let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
+ self.set_ptr_and_cap(ptr, cap);
+ Ok(())
+ }
+
+ // The constraints on this method are much the same as those on
+ // `grow_amortized`, but this method is usually instantiated less often so
+ // it's less critical.
+ fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
+ if mem::size_of::<T>() == 0 {
+ // Since we return a capacity of `usize::MAX` when the type size is
+ // 0, getting to here necessarily means the `RawVec` is overfull.
+ return Err(CapacityOverflow.into());
+ }
+
+ let cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
+ let new_layout = Layout::array::<T>(cap);
+
+ // `finish_grow` is non-generic over `T`.
+ let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
+ self.set_ptr_and_cap(ptr, cap);
+ Ok(())
+ }
+
+ fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> {
+ assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity");
+
+ let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
+
+ let ptr = unsafe {
+ // `Layout::array` cannot overflow here because it would have
+ // overflowed earlier when capacity was larger.
+ let new_layout = Layout::array::<T>(cap).unwrap_unchecked();
+ self.alloc
+ .shrink(ptr, layout, new_layout)
+ .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
+ };
+ self.set_ptr_and_cap(ptr, cap);
+ Ok(())
+ }
+}
+
+// This function is outside `RawVec` to minimize compile times. See the comment
+// above `RawVec::grow_amortized` for details. (The `A` parameter isn't
+// significant, because the number of different `A` types seen in practice is
+// much smaller than the number of `T` types.)
+#[inline(never)]
+fn finish_grow<A>(
+ new_layout: Result<Layout, LayoutError>,
+ current_memory: Option<(NonNull<u8>, Layout)>,
+ alloc: &mut A,
+) -> Result<NonNull<[u8]>, TryReserveError>
+where
+ A: Allocator,
+{
+ // Check for the error here to minimize the size of `RawVec::grow_*`.
+ let new_layout = new_layout.map_err(|_| CapacityOverflow)?;
+
+ alloc_guard(new_layout.size())?;
+
+ let memory = if let Some((ptr, old_layout)) = current_memory {
+ debug_assert_eq!(old_layout.align(), new_layout.align());
+ unsafe {
+ // The allocator checks for alignment equality
+ intrinsics::assume(old_layout.align() == new_layout.align());
+ alloc.grow(ptr, old_layout, new_layout)
+ }
+ } else {
+ alloc.allocate(new_layout)
+ };
+
+ memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into())
+}
+
+unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec<T, A> {
+ /// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
+ fn drop(&mut self) {
+ if let Some((ptr, layout)) = self.current_memory() {
+ unsafe { self.alloc.deallocate(ptr, layout) }
+ }
+ }
+}
+
+// Central function for reserve error handling.
+#[cfg(not(no_global_oom_handling))]
+#[inline]
+fn handle_reserve(result: Result<(), TryReserveError>) {
+ match result.map_err(|e| e.kind()) {
+ Err(CapacityOverflow) => capacity_overflow(),
+ Err(AllocError { layout, .. }) => handle_alloc_error(layout),
+ Ok(()) => { /* yay */ }
+ }
+}
+
+// We need to guarantee the following:
+// * We don't ever allocate `> isize::MAX` byte-size objects.
+// * We don't overflow `usize::MAX` and actually allocate too little.
+//
+// On 64-bit we just need to check for overflow since trying to allocate
+// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
+// an extra guard for this in case we're running on a platform which can use
+// all 4GB in user-space, e.g., PAE or x32.
+
+#[inline]
+fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
+ if usize::BITS < 64 && alloc_size > isize::MAX as usize {
+ Err(CapacityOverflow.into())
+ } else {
+ Ok(())
+ }
+}
+
+// One central function responsible for reporting capacity overflows. This'll
+// ensure that the code generation related to these panics is minimal as there's
+// only one location which panics rather than a bunch throughout the module.
+#[cfg(not(no_global_oom_handling))]
+fn capacity_overflow() -> ! {
+ panic!("capacity overflow");
+}
diff --git a/rust/alloc/slice.rs b/rust/alloc/slice.rs
new file mode 100644
index 000000000000..d53f6051f3a8
--- /dev/null
+++ b/rust/alloc/slice.rs
@@ -0,0 +1,1295 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! A dynamically-sized view into a contiguous sequence, `[T]`.
+//!
+//! *[See also the slice primitive type](slice).*
+//!
+//! Slices are a view into a block of memory represented as a pointer and a
+//! length.
+//!
+//! ```
+//! // slicing a Vec
+//! let vec = vec![1, 2, 3];
+//! let int_slice = &vec[..];
+//! // coercing an array to a slice
+//! let str_slice: &[&str] = &["one", "two", "three"];
+//! ```
+//!
+//! Slices are either mutable or shared. The shared slice type is `&[T]`,
+//! while the mutable slice type is `&mut [T]`, where `T` represents the element
+//! type. For example, you can mutate the block of memory that a mutable slice
+//! points to:
+//!
+//! ```
+//! let x = &mut [1, 2, 3];
+//! x[1] = 7;
+//! assert_eq!(x, &[1, 7, 3]);
+//! ```
+//!
+//! Here are some of the things this module contains:
+//!
+//! ## Structs
+//!
+//! There are several structs that are useful for slices, such as [`Iter`], which
+//! represents iteration over a slice.
+//!
+//! ## Trait Implementations
+//!
+//! There are several implementations of common traits for slices. Some examples
+//! include:
+//!
+//! * [`Clone`]
+//! * [`Eq`], [`Ord`] - for slices whose element type are [`Eq`] or [`Ord`].
+//! * [`Hash`] - for slices whose element type is [`Hash`].
+//!
+//! ## Iteration
+//!
+//! The slices implement `IntoIterator`. The iterator yields references to the
+//! slice elements.
+//!
+//! ```
+//! let numbers = &[0, 1, 2];
+//! for n in numbers {
+//! println!("{n} is a number!");
+//! }
+//! ```
+//!
+//! The mutable slice yields mutable references to the elements:
+//!
+//! ```
+//! let mut scores = [7, 8, 9];
+//! for score in &mut scores[..] {
+//! *score += 1;
+//! }
+//! ```
+//!
+//! This iterator yields mutable references to the slice's elements, so while
+//! the element type of the slice is `i32`, the element type of the iterator is
+//! `&mut i32`.
+//!
+//! * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default
+//! iterators.
+//! * Further methods that return iterators are [`.split`], [`.splitn`],
+//! [`.chunks`], [`.windows`] and more.
+//!
+//! [`Hash`]: core::hash::Hash
+//! [`.iter`]: slice::iter
+//! [`.iter_mut`]: slice::iter_mut
+//! [`.split`]: slice::split
+//! [`.splitn`]: slice::splitn
+//! [`.chunks`]: slice::chunks
+//! [`.windows`]: slice::windows
+#![stable(feature = "rust1", since = "1.0.0")]
+// Many of the usings in this module are only used in the test configuration.
+// It's cleaner to just turn off the unused_imports warning than to fix them.
+#![cfg_attr(test, allow(unused_imports, dead_code))]
+
+use core::borrow::{Borrow, BorrowMut};
+#[cfg(not(no_global_oom_handling))]
+use core::cmp::Ordering::{self, Less};
+#[cfg(not(no_global_oom_handling))]
+use core::mem;
+#[cfg(not(no_global_oom_handling))]
+use core::mem::size_of;
+#[cfg(not(no_global_oom_handling))]
+use core::ptr;
+
+use crate::alloc::Allocator;
+use crate::alloc::Global;
+#[cfg(not(no_global_oom_handling))]
+use crate::borrow::ToOwned;
+use crate::boxed::Box;
+use crate::collections::TryReserveError;
+use crate::vec::Vec;
+
+#[unstable(feature = "slice_range", issue = "76393")]
+pub use core::slice::range;
+#[unstable(feature = "array_chunks", issue = "74985")]
+pub use core::slice::ArrayChunks;
+#[unstable(feature = "array_chunks", issue = "74985")]
+pub use core::slice::ArrayChunksMut;
+#[unstable(feature = "array_windows", issue = "75027")]
+pub use core::slice::ArrayWindows;
+#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
+pub use core::slice::EscapeAscii;
+#[stable(feature = "slice_get_slice", since = "1.28.0")]
+pub use core::slice::SliceIndex;
+#[stable(feature = "from_ref", since = "1.28.0")]
+pub use core::slice::{from_mut, from_ref};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::slice::{from_raw_parts, from_raw_parts_mut};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::slice::{Chunks, Windows};
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+pub use core::slice::{ChunksExact, ChunksExactMut};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::slice::{ChunksMut, Split, SplitMut};
+#[unstable(feature = "slice_group_by", issue = "80552")]
+pub use core::slice::{GroupBy, GroupByMut};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::slice::{Iter, IterMut};
+#[stable(feature = "rchunks", since = "1.31.0")]
+pub use core::slice::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+pub use core::slice::{RSplit, RSplitMut};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::slice::{RSplitN, RSplitNMut, SplitN, SplitNMut};
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+pub use core::slice::{SplitInclusive, SplitInclusiveMut};
+
+////////////////////////////////////////////////////////////////////////////////
+// Basic slice extension methods
+////////////////////////////////////////////////////////////////////////////////
+
+// HACK(japaric) needed for the implementation of `vec!` macro during testing
+// N.B., see the `hack` module in this file for more details.
+#[cfg(test)]
+pub use hack::into_vec;
+
+// HACK(japaric) needed for the implementation of `Vec::clone` during testing
+// N.B., see the `hack` module in this file for more details.
+#[cfg(test)]
+pub use hack::to_vec;
+
+// HACK(japaric): With cfg(test) `impl [T]` is not available, these three
+// functions are actually methods that are in `impl [T]` but not in
+// `core::slice::SliceExt` - we need to supply these functions for the
+// `test_permutations` test
+pub(crate) mod hack {
+ use core::alloc::Allocator;
+
+ use crate::boxed::Box;
+ use crate::collections::TryReserveError;
+ use crate::vec::Vec;
+
+ // We shouldn't add inline attribute to this since this is used in
+ // `vec!` macro mostly and causes perf regression. See #71204 for
+ // discussion and perf results.
+ pub fn into_vec<T, A: Allocator>(b: Box<[T], A>) -> Vec<T, A> {
+ unsafe {
+ let len = b.len();
+ let (b, alloc) = Box::into_raw_with_allocator(b);
+ Vec::from_raw_parts_in(b as *mut T, len, len, alloc)
+ }
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ pub fn to_vec<T: ConvertVec, A: Allocator>(s: &[T], alloc: A) -> Vec<T, A> {
+ T::to_vec(s, alloc)
+ }
+
+ #[inline]
+ pub fn try_to_vec<T: TryConvertVec, A: Allocator>(s: &[T], alloc: A) -> Result<Vec<T, A>, TryReserveError> {
+ T::try_to_vec(s, alloc)
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ pub trait ConvertVec {
+ fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A>
+ where
+ Self: Sized;
+ }
+
+ pub trait TryConvertVec {
+ fn try_to_vec<A: Allocator>(s: &[Self], alloc: A) -> Result<Vec<Self, A>, TryReserveError>
+ where
+ Self: Sized;
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ impl<T: Clone> ConvertVec for T {
+ #[inline]
+ default fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
+ struct DropGuard<'a, T, A: Allocator> {
+ vec: &'a mut Vec<T, A>,
+ num_init: usize,
+ }
+ impl<'a, T, A: Allocator> Drop for DropGuard<'a, T, A> {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY:
+ // items were marked initialized in the loop below
+ unsafe {
+ self.vec.set_len(self.num_init);
+ }
+ }
+ }
+ let mut vec = Vec::with_capacity_in(s.len(), alloc);
+ let mut guard = DropGuard { vec: &mut vec, num_init: 0 };
+ let slots = guard.vec.spare_capacity_mut();
+ // .take(slots.len()) is necessary for LLVM to remove bounds checks
+ // and has better codegen than zip.
+ for (i, b) in s.iter().enumerate().take(slots.len()) {
+ guard.num_init = i;
+ slots[i].write(b.clone());
+ }
+ core::mem::forget(guard);
+ // SAFETY:
+ // the vec was allocated and initialized above to at least this length.
+ unsafe {
+ vec.set_len(s.len());
+ }
+ vec
+ }
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ impl<T: Copy> ConvertVec for T {
+ #[inline]
+ fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
+ let mut v = Vec::with_capacity_in(s.len(), alloc);
+ // SAFETY:
+ // allocated above with the capacity of `s`, and initialize to `s.len()` in
+ // ptr::copy_to_non_overlapping below.
+ unsafe {
+ s.as_ptr().copy_to_nonoverlapping(v.as_mut_ptr(), s.len());
+ v.set_len(s.len());
+ }
+ v
+ }
+ }
+
+ impl<T: Clone> TryConvertVec for T {
+ #[inline]
+ default fn try_to_vec<A: Allocator>(s: &[Self], alloc: A) -> Result<Vec<Self, A>, TryReserveError> {
+ struct DropGuard<'a, T, A: Allocator> {
+ vec: &'a mut Vec<T, A>,
+ num_init: usize,
+ }
+ impl<'a, T, A: Allocator> Drop for DropGuard<'a, T, A> {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY:
+ // items were marked initialized in the loop below
+ unsafe {
+ self.vec.set_len(self.num_init);
+ }
+ }
+ }
+ let mut vec = Vec::try_with_capacity_in(s.len(), alloc)?;
+ let mut guard = DropGuard { vec: &mut vec, num_init: 0 };
+ let slots = guard.vec.spare_capacity_mut();
+ // .take(slots.len()) is necessary for LLVM to remove bounds checks
+ // and has better codegen than zip.
+ for (i, b) in s.iter().enumerate().take(slots.len()) {
+ guard.num_init = i;
+ slots[i].write(b.clone());
+ }
+ core::mem::forget(guard);
+ // SAFETY:
+ // the vec was allocated and initialized above to at least this length.
+ unsafe {
+ vec.set_len(s.len());
+ }
+ Ok(vec)
+ }
+ }
+}
+
+#[cfg(not(test))]
+impl<T> [T] {
+ /// Sorts the slice.
+ ///
+ /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
+ ///
+ /// When applicable, unstable sorting is preferred because it is generally faster than stable
+ /// sorting and it doesn't allocate auxiliary memory.
+ /// See [`sort_unstable`](slice::sort_unstable).
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is an adaptive, iterative merge sort inspired by
+ /// [timsort](https://en.wikipedia.org/wiki/Timsort).
+ /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
+ /// two or more sorted sequences concatenated one after another.
+ ///
+ /// Also, it allocates temporary storage half the size of `self`, but for short slices a
+ /// non-allocating insertion sort is used instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [-5, 4, 1, -3, 2];
+ ///
+ /// v.sort();
+ /// assert!(v == [-5, -3, 1, 2, 4]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn sort(&mut self)
+ where
+ T: Ord,
+ {
+ merge_sort(self, |a, b| a.lt(b));
+ }
+
+ /// Sorts the slice with a comparator function.
+ ///
+ /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
+ ///
+ /// The comparator function must define a total ordering for the elements in the slice. If
+ /// the ordering is not total, the order of the elements is unspecified. An order is a
+ /// total order if it is (for all `a`, `b` and `c`):
+ ///
+ /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
+ /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
+ ///
+ /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
+ /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
+ ///
+ /// ```
+ /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
+ /// floats.sort_by(|a, b| a.partial_cmp(b).unwrap());
+ /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
+ /// ```
+ ///
+ /// When applicable, unstable sorting is preferred because it is generally faster than stable
+ /// sorting and it doesn't allocate auxiliary memory.
+ /// See [`sort_unstable_by`](slice::sort_unstable_by).
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is an adaptive, iterative merge sort inspired by
+ /// [timsort](https://en.wikipedia.org/wiki/Timsort).
+ /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
+ /// two or more sorted sequences concatenated one after another.
+ ///
+ /// Also, it allocates temporary storage half the size of `self`, but for short slices a
+ /// non-allocating insertion sort is used instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [5, 4, 1, 3, 2];
+ /// v.sort_by(|a, b| a.cmp(b));
+ /// assert!(v == [1, 2, 3, 4, 5]);
+ ///
+ /// // reverse sorting
+ /// v.sort_by(|a, b| b.cmp(a));
+ /// assert!(v == [5, 4, 3, 2, 1]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn sort_by<F>(&mut self, mut compare: F)
+ where
+ F: FnMut(&T, &T) -> Ordering,
+ {
+ merge_sort(self, |a, b| compare(a, b) == Less);
+ }
+
+ /// Sorts the slice with a key extraction function.
+ ///
+ /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* \* log(*n*))
+ /// worst-case, where the key function is *O*(*m*).
+ ///
+ /// For expensive key functions (e.g. functions that are not simple property accesses or
+ /// basic operations), [`sort_by_cached_key`](slice::sort_by_cached_key) is likely to be
+ /// significantly faster, as it does not recompute element keys.
+ ///
+ /// When applicable, unstable sorting is preferred because it is generally faster than stable
+ /// sorting and it doesn't allocate auxiliary memory.
+ /// See [`sort_unstable_by_key`](slice::sort_unstable_by_key).
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is an adaptive, iterative merge sort inspired by
+ /// [timsort](https://en.wikipedia.org/wiki/Timsort).
+ /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
+ /// two or more sorted sequences concatenated one after another.
+ ///
+ /// Also, it allocates temporary storage half the size of `self`, but for short slices a
+ /// non-allocating insertion sort is used instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [-5i32, 4, 1, -3, 2];
+ ///
+ /// v.sort_by_key(|k| k.abs());
+ /// assert!(v == [1, 2, -3, 4, -5]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "slice_sort_by_key", since = "1.7.0")]
+ #[inline]
+ pub fn sort_by_key<K, F>(&mut self, mut f: F)
+ where
+ F: FnMut(&T) -> K,
+ K: Ord,
+ {
+ merge_sort(self, |a, b| f(a).lt(&f(b)));
+ }
+
+ /// Sorts the slice with a key extraction function.
+ ///
+ /// During sorting, the key function is called at most once per element, by using
+ /// temporary storage to remember the results of key evaluation.
+ /// The order of calls to the key function is unspecified and may change in future versions
+ /// of the standard library.
+ ///
+ /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* + *n* \* log(*n*))
+ /// worst-case, where the key function is *O*(*m*).
+ ///
+ /// For simple key functions (e.g., functions that are property accesses or
+ /// basic operations), [`sort_by_key`](slice::sort_by_key) is likely to be
+ /// faster.
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
+ /// which combines the fast average case of randomized quicksort with the fast worst case of
+ /// heapsort, while achieving linear time on slices with certain patterns. It uses some
+ /// randomization to avoid degenerate cases, but with a fixed seed to always provide
+ /// deterministic behavior.
+ ///
+ /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the
+ /// length of the slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [-5i32, 4, 32, -3, 2];
+ ///
+ /// v.sort_by_cached_key(|k| k.to_string());
+ /// assert!(v == [-3, -5, 2, 32, 4]);
+ /// ```
+ ///
+ /// [pdqsort]: https://github.com/orlp/pdqsort
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "slice_sort_by_cached_key", since = "1.34.0")]
+ #[inline]
+ pub fn sort_by_cached_key<K, F>(&mut self, f: F)
+ where
+ F: FnMut(&T) -> K,
+ K: Ord,
+ {
+ // Helper macro for indexing our vector by the smallest possible type, to reduce allocation.
+ macro_rules! sort_by_key {
+ ($t:ty, $slice:ident, $f:ident) => {{
+ let mut indices: Vec<_> =
+ $slice.iter().map($f).enumerate().map(|(i, k)| (k, i as $t)).collect();
+ // The elements of `indices` are unique, as they are indexed, so any sort will be
+ // stable with respect to the original slice. We use `sort_unstable` here because
+ // it requires less memory allocation.
+ indices.sort_unstable();
+ for i in 0..$slice.len() {
+ let mut index = indices[i].1;
+ while (index as usize) < i {
+ index = indices[index as usize].1;
+ }
+ indices[i].1 = index;
+ $slice.swap(i, index as usize);
+ }
+ }};
+ }
+
+ let sz_u8 = mem::size_of::<(K, u8)>();
+ let sz_u16 = mem::size_of::<(K, u16)>();
+ let sz_u32 = mem::size_of::<(K, u32)>();
+ let sz_usize = mem::size_of::<(K, usize)>();
+
+ let len = self.len();
+ if len < 2 {
+ return;
+ }
+ if sz_u8 < sz_u16 && len <= (u8::MAX as usize) {
+ return sort_by_key!(u8, self, f);
+ }
+ if sz_u16 < sz_u32 && len <= (u16::MAX as usize) {
+ return sort_by_key!(u16, self, f);
+ }
+ if sz_u32 < sz_usize && len <= (u32::MAX as usize) {
+ return sort_by_key!(u32, self, f);
+ }
+ sort_by_key!(usize, self, f)
+ }
+
+ /// Copies `self` into a new `Vec`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let s = [10, 40, 30];
+ /// let x = s.to_vec();
+ /// // Here, `s` and `x` can be modified independently.
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[rustc_conversion_suggestion]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn to_vec(&self) -> Vec<T>
+ where
+ T: Clone,
+ {
+ self.to_vec_in(Global)
+ }
+
+ /// Tries to copy `self` into a new `Vec`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let s = [10, 40, 30];
+ /// let x = s.try_to_vec().unwrap();
+ /// // Here, `s` and `x` can be modified independently.
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[inline]
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_to_vec(&self) -> Result<Vec<T>, TryReserveError>
+ where
+ T: Clone,
+ {
+ self.try_to_vec_in(Global)
+ }
+
+ /// Copies `self` into a new `Vec` with an allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let s = [10, 40, 30];
+ /// let x = s.to_vec_in(System);
+ /// // Here, `s` and `x` can be modified independently.
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn to_vec_in<A: Allocator>(&self, alloc: A) -> Vec<T, A>
+ where
+ T: Clone,
+ {
+ // N.B., see the `hack` module in this file for more details.
+ hack::to_vec(self, alloc)
+ }
+
+ /// Tries to copy `self` into a new `Vec` with an allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let s = [10, 40, 30];
+ /// let x = s.try_to_vec_in(System).unwrap();
+ /// // Here, `s` and `x` can be modified independently.
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[inline]
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_to_vec_in<A: Allocator>(&self, alloc: A) -> Result<Vec<T, A>, TryReserveError>
+ where
+ T: Clone,
+ {
+ // N.B., see the `hack` module in this file for more details.
+ hack::try_to_vec(self, alloc)
+ }
+
+ /// Converts `self` into a vector without clones or allocation.
+ ///
+ /// The resulting vector can be converted back into a box via
+ /// `Vec<T>`'s `into_boxed_slice` method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let s: Box<[i32]> = Box::new([10, 40, 30]);
+ /// let x = s.into_vec();
+ /// // `s` cannot be used anymore because it has been converted into `x`.
+ ///
+ /// assert_eq!(x, vec![10, 40, 30]);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn into_vec<A: Allocator>(self: Box<Self, A>) -> Vec<T, A> {
+ // N.B., see the `hack` module in this file for more details.
+ hack::into_vec(self)
+ }
+
+ /// Creates a vector by repeating a slice `n` times.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the capacity would overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
+ /// ```
+ ///
+ /// A panic upon overflow:
+ ///
+ /// ```should_panic
+ /// // this will panic at runtime
+ /// b"0123456789abcdef".repeat(usize::MAX);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "repeat_generic_slice", since = "1.40.0")]
+ pub fn repeat(&self, n: usize) -> Vec<T>
+ where
+ T: Copy,
+ {
+ if n == 0 {
+ return Vec::new();
+ }
+
+ // If `n` is larger than zero, it can be split as
+ // `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`.
+ // `2^expn` is the number represented by the leftmost '1' bit of `n`,
+ // and `rem` is the remaining part of `n`.
+
+ // Using `Vec` to access `set_len()`.
+ let capacity = self.len().checked_mul(n).expect("capacity overflow");
+ let mut buf = Vec::with_capacity(capacity);
+
+ // `2^expn` repetition is done by doubling `buf` `expn`-times.
+ buf.extend(self);
+ {
+ let mut m = n >> 1;
+ // If `m > 0`, there are remaining bits up to the leftmost '1'.
+ while m > 0 {
+ // `buf.extend(buf)`:
+ unsafe {
+ ptr::copy_nonoverlapping(
+ buf.as_ptr(),
+ (buf.as_mut_ptr() as *mut T).add(buf.len()),
+ buf.len(),
+ );
+ // `buf` has capacity of `self.len() * n`.
+ let buf_len = buf.len();
+ buf.set_len(buf_len * 2);
+ }
+
+ m >>= 1;
+ }
+ }
+
+ // `rem` (`= n - 2^expn`) repetition is done by copying
+ // first `rem` repetitions from `buf` itself.
+ let rem_len = capacity - buf.len(); // `self.len() * rem`
+ if rem_len > 0 {
+ // `buf.extend(buf[0 .. rem_len])`:
+ unsafe {
+ // This is non-overlapping since `2^expn > rem`.
+ ptr::copy_nonoverlapping(
+ buf.as_ptr(),
+ (buf.as_mut_ptr() as *mut T).add(buf.len()),
+ rem_len,
+ );
+ // `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`).
+ buf.set_len(capacity);
+ }
+ }
+ buf
+ }
+
+ /// Flattens a slice of `T` into a single value `Self::Output`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(["hello", "world"].concat(), "helloworld");
+ /// assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn concat<Item: ?Sized>(&self) -> <Self as Concat<Item>>::Output
+ where
+ Self: Concat<Item>,
+ {
+ Concat::concat(self)
+ }
+
+ /// Flattens a slice of `T` into a single value `Self::Output`, placing a
+ /// given separator between each.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(["hello", "world"].join(" "), "hello world");
+ /// assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
+ /// assert_eq!([[1, 2], [3, 4]].join(&[0, 0][..]), [1, 2, 0, 0, 3, 4]);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "rename_connect_to_join", since = "1.3.0")]
+ pub fn join<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
+ where
+ Self: Join<Separator>,
+ {
+ Join::join(self, sep)
+ }
+
+ /// Flattens a slice of `T` into a single value `Self::Output`, placing a
+ /// given separator between each.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![allow(deprecated)]
+ /// assert_eq!(["hello", "world"].connect(" "), "hello world");
+ /// assert_eq!([[1, 2], [3, 4]].connect(&0), [1, 2, 0, 3, 4]);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(since = "1.3.0", note = "renamed to join")]
+ pub fn connect<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
+ where
+ Self: Join<Separator>,
+ {
+ Join::join(self, sep)
+ }
+}
+
+#[cfg(not(test))]
+impl [u8] {
+ /// Returns a vector containing a copy of this slice where each byte
+ /// is mapped to its ASCII upper case equivalent.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To uppercase the value in-place, use [`make_ascii_uppercase`].
+ ///
+ /// [`make_ascii_uppercase`]: slice::make_ascii_uppercase
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "this returns the uppercase bytes as a new Vec, \
+ without modifying the original"]
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn to_ascii_uppercase(&self) -> Vec<u8> {
+ let mut me = self.to_vec();
+ me.make_ascii_uppercase();
+ me
+ }
+
+ /// Returns a vector containing a copy of this slice where each byte
+ /// is mapped to its ASCII lower case equivalent.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To lowercase the value in-place, use [`make_ascii_lowercase`].
+ ///
+ /// [`make_ascii_lowercase`]: slice::make_ascii_lowercase
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "this returns the lowercase bytes as a new Vec, \
+ without modifying the original"]
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn to_ascii_lowercase(&self) -> Vec<u8> {
+ let mut me = self.to_vec();
+ me.make_ascii_lowercase();
+ me
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Extension traits for slices over specific kinds of data
+////////////////////////////////////////////////////////////////////////////////
+
+/// Helper trait for [`[T]::concat`](slice::concat).
+///
+/// Note: the `Item` type parameter is not used in this trait,
+/// but it allows impls to be more generic.
+/// Without it, we get this error:
+///
+/// ```error
+/// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica
+/// --> src/liballoc/slice.rs:608:6
+/// |
+/// 608 | impl<T: Clone, V: Borrow<[T]>> Concat for [V] {
+/// | ^ unconstrained type parameter
+/// ```
+///
+/// This is because there could exist `V` types with multiple `Borrow<[_]>` impls,
+/// such that multiple `T` types would apply:
+///
+/// ```
+/// # #[allow(dead_code)]
+/// pub struct Foo(Vec<u32>, Vec<String>);
+///
+/// impl std::borrow::Borrow<[u32]> for Foo {
+/// fn borrow(&self) -> &[u32] { &self.0 }
+/// }
+///
+/// impl std::borrow::Borrow<[String]> for Foo {
+/// fn borrow(&self) -> &[String] { &self.1 }
+/// }
+/// ```
+#[unstable(feature = "slice_concat_trait", issue = "27747")]
+pub trait Concat<Item: ?Sized> {
+ #[unstable(feature = "slice_concat_trait", issue = "27747")]
+ /// The resulting type after concatenation
+ type Output;
+
+ /// Implementation of [`[T]::concat`](slice::concat)
+ #[unstable(feature = "slice_concat_trait", issue = "27747")]
+ fn concat(slice: &Self) -> Self::Output;
+}
+
+/// Helper trait for [`[T]::join`](slice::join)
+#[unstable(feature = "slice_concat_trait", issue = "27747")]
+pub trait Join<Separator> {
+ #[unstable(feature = "slice_concat_trait", issue = "27747")]
+ /// The resulting type after concatenation
+ type Output;
+
+ /// Implementation of [`[T]::join`](slice::join)
+ #[unstable(feature = "slice_concat_trait", issue = "27747")]
+ fn join(slice: &Self, sep: Separator) -> Self::Output;
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<T: Clone, V: Borrow<[T]>> Concat<T> for [V] {
+ type Output = Vec<T>;
+
+ fn concat(slice: &Self) -> Vec<T> {
+ let size = slice.iter().map(|slice| slice.borrow().len()).sum();
+ let mut result = Vec::with_capacity(size);
+ for v in slice {
+ result.extend_from_slice(v.borrow())
+ }
+ result
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<T: Clone, V: Borrow<[T]>> Join<&T> for [V] {
+ type Output = Vec<T>;
+
+ fn join(slice: &Self, sep: &T) -> Vec<T> {
+ let mut iter = slice.iter();
+ let first = match iter.next() {
+ Some(first) => first,
+ None => return vec![],
+ };
+ let size = slice.iter().map(|v| v.borrow().len()).sum::<usize>() + slice.len() - 1;
+ let mut result = Vec::with_capacity(size);
+ result.extend_from_slice(first.borrow());
+
+ for v in iter {
+ result.push(sep.clone());
+ result.extend_from_slice(v.borrow())
+ }
+ result
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<T: Clone, V: Borrow<[T]>> Join<&[T]> for [V] {
+ type Output = Vec<T>;
+
+ fn join(slice: &Self, sep: &[T]) -> Vec<T> {
+ let mut iter = slice.iter();
+ let first = match iter.next() {
+ Some(first) => first,
+ None => return vec![],
+ };
+ let size =
+ slice.iter().map(|v| v.borrow().len()).sum::<usize>() + sep.len() * (slice.len() - 1);
+ let mut result = Vec::with_capacity(size);
+ result.extend_from_slice(first.borrow());
+
+ for v in iter {
+ result.extend_from_slice(sep);
+ result.extend_from_slice(v.borrow())
+ }
+ result
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Standard trait implementations for slices
+////////////////////////////////////////////////////////////////////////////////
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Borrow<[T]> for Vec<T> {
+ fn borrow(&self) -> &[T] {
+ &self[..]
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> BorrowMut<[T]> for Vec<T> {
+ fn borrow_mut(&mut self) -> &mut [T] {
+ &mut self[..]
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone> ToOwned for [T] {
+ type Owned = Vec<T>;
+ #[cfg(not(test))]
+ fn to_owned(&self) -> Vec<T> {
+ self.to_vec()
+ }
+
+ #[cfg(test)]
+ fn to_owned(&self) -> Vec<T> {
+ hack::to_vec(self, Global)
+ }
+
+ fn clone_into(&self, target: &mut Vec<T>) {
+ // drop anything in target that will not be overwritten
+ target.truncate(self.len());
+
+ // target.len <= self.len due to the truncate above, so the
+ // slices here are always in-bounds.
+ let (init, tail) = self.split_at(target.len());
+
+ // reuse the contained values' allocations/resources.
+ target.clone_from_slice(init);
+ target.extend_from_slice(tail);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Sorting
+////////////////////////////////////////////////////////////////////////////////
+
+/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
+///
+/// This is the integral subroutine of insertion sort.
+#[cfg(not(no_global_oom_handling))]
+fn insert_head<T, F>(v: &mut [T], is_less: &mut F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ if v.len() >= 2 && is_less(&v[1], &v[0]) {
+ unsafe {
+ // There are three ways to implement insertion here:
+ //
+ // 1. Swap adjacent elements until the first one gets to its final destination.
+ // However, this way we copy data around more than is necessary. If elements are big
+ // structures (costly to copy), this method will be slow.
+ //
+ // 2. Iterate until the right place for the first element is found. Then shift the
+ // elements succeeding it to make room for it and finally place it into the
+ // remaining hole. This is a good method.
+ //
+ // 3. Copy the first element into a temporary variable. Iterate until the right place
+ // for it is found. As we go along, copy every traversed element into the slot
+ // preceding it. Finally, copy data from the temporary variable into the remaining
+ // hole. This method is very good. Benchmarks demonstrated slightly better
+ // performance than with the 2nd method.
+ //
+ // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
+ let tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
+
+ // Intermediate state of the insertion process is always tracked by `hole`, which
+ // serves two purposes:
+ // 1. Protects integrity of `v` from panics in `is_less`.
+ // 2. Fills the remaining hole in `v` in the end.
+ //
+ // Panic safety:
+ //
+ // If `is_less` panics at any point during the process, `hole` will get dropped and
+ // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
+ // initially held exactly once.
+ let mut hole = InsertionHole { src: &*tmp, dest: &mut v[1] };
+ ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
+
+ for i in 2..v.len() {
+ if !is_less(&v[i], &*tmp) {
+ break;
+ }
+ ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
+ hole.dest = &mut v[i];
+ }
+ // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
+ }
+ }
+
+ // When dropped, copies from `src` into `dest`.
+ struct InsertionHole<T> {
+ src: *const T,
+ dest: *mut T,
+ }
+
+ impl<T> Drop for InsertionHole<T> {
+ fn drop(&mut self) {
+ unsafe {
+ ptr::copy_nonoverlapping(self.src, self.dest, 1);
+ }
+ }
+ }
+}
+
+/// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
+/// stores the result into `v[..]`.
+///
+/// # Safety
+///
+/// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
+/// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
+#[cfg(not(no_global_oom_handling))]
+unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ let len = v.len();
+ let v = v.as_mut_ptr();
+ let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) };
+
+ // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
+ // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
+ // copying the lesser (or greater) one into `v`.
+ //
+ // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
+ // consumed first, then we must copy whatever is left of the shorter run into the remaining
+ // hole in `v`.
+ //
+ // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
+ // 1. Protects integrity of `v` from panics in `is_less`.
+ // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
+ //
+ // Panic safety:
+ //
+ // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
+ // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
+ // object it initially held exactly once.
+ let mut hole;
+
+ if mid <= len - mid {
+ // The left run is shorter.
+ unsafe {
+ ptr::copy_nonoverlapping(v, buf, mid);
+ hole = MergeHole { start: buf, end: buf.add(mid), dest: v };
+ }
+
+ // Initially, these pointers point to the beginnings of their arrays.
+ let left = &mut hole.start;
+ let mut right = v_mid;
+ let out = &mut hole.dest;
+
+ while *left < hole.end && right < v_end {
+ // Consume the lesser side.
+ // If equal, prefer the left run to maintain stability.
+ unsafe {
+ let to_copy = if is_less(&*right, &**left) {
+ get_and_increment(&mut right)
+ } else {
+ get_and_increment(left)
+ };
+ ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
+ }
+ }
+ } else {
+ // The right run is shorter.
+ unsafe {
+ ptr::copy_nonoverlapping(v_mid, buf, len - mid);
+ hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid };
+ }
+
+ // Initially, these pointers point past the ends of their arrays.
+ let left = &mut hole.dest;
+ let right = &mut hole.end;
+ let mut out = v_end;
+
+ while v < *left && buf < *right {
+ // Consume the greater side.
+ // If equal, prefer the right run to maintain stability.
+ unsafe {
+ let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
+ decrement_and_get(left)
+ } else {
+ decrement_and_get(right)
+ };
+ ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
+ }
+ }
+ }
+ // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
+ // it will now be copied into the hole in `v`.
+
+ unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
+ let old = *ptr;
+ *ptr = unsafe { ptr.offset(1) };
+ old
+ }
+
+ unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
+ *ptr = unsafe { ptr.offset(-1) };
+ *ptr
+ }
+
+ // When dropped, copies the range `start..end` into `dest..`.
+ struct MergeHole<T> {
+ start: *mut T,
+ end: *mut T,
+ dest: *mut T,
+ }
+
+ impl<T> Drop for MergeHole<T> {
+ fn drop(&mut self) {
+ // `T` is not a zero-sized type, and these are pointers into a slice's elements.
+ unsafe {
+ let len = self.end.sub_ptr(self.start);
+ ptr::copy_nonoverlapping(self.start, self.dest, len);
+ }
+ }
+ }
+}
+
+/// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
+/// [here](https://github.com/python/cpython/blob/main/Objects/listsort.txt).
+///
+/// The algorithm identifies strictly descending and non-descending subsequences, which are called
+/// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
+/// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
+/// satisfied:
+///
+/// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
+/// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
+///
+/// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case.
+#[cfg(not(no_global_oom_handling))]
+fn merge_sort<T, F>(v: &mut [T], mut is_less: F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ // Slices of up to this length get sorted using insertion sort.
+ const MAX_INSERTION: usize = 20;
+ // Very short runs are extended using insertion sort to span at least this many elements.
+ const MIN_RUN: usize = 10;
+
+ // Sorting has no meaningful behavior on zero-sized types.
+ if size_of::<T>() == 0 {
+ return;
+ }
+
+ let len = v.len();
+
+ // Short arrays get sorted in-place via insertion sort to avoid allocations.
+ if len <= MAX_INSERTION {
+ if len >= 2 {
+ for i in (0..len - 1).rev() {
+ insert_head(&mut v[i..], &mut is_less);
+ }
+ }
+ return;
+ }
+
+ // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
+ // shallow copies of the contents of `v` without risking the dtors running on copies if
+ // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
+ // which will always have length at most `len / 2`.
+ let mut buf = Vec::with_capacity(len / 2);
+
+ // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
+ // strange decision, but consider the fact that merges more often go in the opposite direction
+ // (forwards). According to benchmarks, merging forwards is slightly faster than merging
+ // backwards. To conclude, identifying runs by traversing backwards improves performance.
+ let mut runs = vec![];
+ let mut end = len;
+ while end > 0 {
+ // Find the next natural run, and reverse it if it's strictly descending.
+ let mut start = end - 1;
+ if start > 0 {
+ start -= 1;
+ unsafe {
+ if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
+ while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
+ start -= 1;
+ }
+ v[start..end].reverse();
+ } else {
+ while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1))
+ {
+ start -= 1;
+ }
+ }
+ }
+ }
+
+ // Insert some more elements into the run if it's too short. Insertion sort is faster than
+ // merge sort on short sequences, so this significantly improves performance.
+ while start > 0 && end - start < MIN_RUN {
+ start -= 1;
+ insert_head(&mut v[start..end], &mut is_less);
+ }
+
+ // Push this run onto the stack.
+ runs.push(Run { start, len: end - start });
+ end = start;
+
+ // Merge some pairs of adjacent runs to satisfy the invariants.
+ while let Some(r) = collapse(&runs) {
+ let left = runs[r + 1];
+ let right = runs[r];
+ unsafe {
+ merge(
+ &mut v[left.start..right.start + right.len],
+ left.len,
+ buf.as_mut_ptr(),
+ &mut is_less,
+ );
+ }
+ runs[r] = Run { start: left.start, len: left.len + right.len };
+ runs.remove(r + 1);
+ }
+ }
+
+ // Finally, exactly one run must remain in the stack.
+ debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
+
+ // Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
+ // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
+ // algorithm should continue building a new run instead, `None` is returned.
+ //
+ // TimSort is infamous for its buggy implementations, as described here:
+ // http://envisage-project.eu/timsort-specification-and-verification/
+ //
+ // The gist of the story is: we must enforce the invariants on the top four runs on the stack.
+ // Enforcing them on just top three is not sufficient to ensure that the invariants will still
+ // hold for *all* runs in the stack.
+ //
+ // This function correctly checks invariants for the top four runs. Additionally, if the top
+ // run starts at index 0, it will always demand a merge operation until the stack is fully
+ // collapsed, in order to complete the sort.
+ #[inline]
+ fn collapse(runs: &[Run]) -> Option<usize> {
+ let n = runs.len();
+ if n >= 2
+ && (runs[n - 1].start == 0
+ || runs[n - 2].len <= runs[n - 1].len
+ || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len)
+ || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
+ {
+ if n >= 3 && runs[n - 3].len < runs[n - 1].len { Some(n - 3) } else { Some(n - 2) }
+ } else {
+ None
+ }
+ }
+
+ #[derive(Clone, Copy)]
+ struct Run {
+ start: usize,
+ len: usize,
+ }
+}
diff --git a/rust/alloc/str.rs b/rust/alloc/str.rs
new file mode 100644
index 000000000000..4e3aec690fdb
--- /dev/null
+++ b/rust/alloc/str.rs
@@ -0,0 +1,641 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! Unicode string slices.
+//!
+//! *[See also the `str` primitive type](str).*
+//!
+//! The `&str` type is one of the two main string types, the other being `String`.
+//! Unlike its `String` counterpart, its contents are borrowed.
+//!
+//! # Basic Usage
+//!
+//! A basic string declaration of `&str` type:
+//!
+//! ```
+//! let hello_world = "Hello, World!";
+//! ```
+//!
+//! Here we have declared a string literal, also known as a string slice.
+//! String literals have a static lifetime, which means the string `hello_world`
+//! is guaranteed to be valid for the duration of the entire program.
+//! We can explicitly specify `hello_world`'s lifetime as well:
+//!
+//! ```
+//! let hello_world: &'static str = "Hello, world!";
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+// Many of the usings in this module are only used in the test configuration.
+// It's cleaner to just turn off the unused_imports warning than to fix them.
+#![allow(unused_imports)]
+
+use core::borrow::{Borrow, BorrowMut};
+use core::iter::FusedIterator;
+use core::mem;
+use core::ptr;
+use core::str::pattern::{DoubleEndedSearcher, Pattern, ReverseSearcher, Searcher};
+use core::unicode::conversions;
+
+use crate::borrow::ToOwned;
+use crate::boxed::Box;
+use crate::collections::TryReserveError;
+use crate::slice::{Concat, Join, SliceIndex};
+use crate::string::String;
+use crate::vec::Vec;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::pattern;
+#[stable(feature = "encode_utf16", since = "1.8.0")]
+pub use core::str::EncodeUtf16;
+#[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
+pub use core::str::SplitAsciiWhitespace;
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+pub use core::str::SplitInclusive;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::SplitWhitespace;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{from_utf8, from_utf8_mut, Bytes, CharIndices, Chars};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{from_utf8_unchecked, from_utf8_unchecked_mut, ParseBoolError};
+#[stable(feature = "str_escape", since = "1.34.0")]
+pub use core::str::{EscapeDebug, EscapeDefault, EscapeUnicode};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{FromStr, Utf8Error};
+#[allow(deprecated)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{Lines, LinesAny};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{MatchIndices, RMatchIndices};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{Matches, RMatches};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{RSplit, Split};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{RSplitN, SplitN};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{RSplitTerminator, SplitTerminator};
+
+/// Note: `str` in `Concat<str>` is not meaningful here.
+/// This type parameter of the trait only exists to enable another impl.
+#[cfg(not(no_global_oom_handling))]
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<S: Borrow<str>> Concat<str> for [S] {
+ type Output = String;
+
+ fn concat(slice: &Self) -> String {
+ Join::join(slice, "")
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<S: Borrow<str>> Join<&str> for [S] {
+ type Output = String;
+
+ fn join(slice: &Self, sep: &str) -> String {
+ unsafe { String::from_utf8_unchecked(join_generic_copy(slice, sep.as_bytes())) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+macro_rules! specialize_for_lengths {
+ ($separator:expr, $target:expr, $iter:expr; $($num:expr),*) => {{
+ let mut target = $target;
+ let iter = $iter;
+ let sep_bytes = $separator;
+ match $separator.len() {
+ $(
+ // loops with hardcoded sizes run much faster
+ // specialize the cases with small separator lengths
+ $num => {
+ for s in iter {
+ copy_slice_and_advance!(target, sep_bytes);
+ let content_bytes = s.borrow().as_ref();
+ copy_slice_and_advance!(target, content_bytes);
+ }
+ },
+ )*
+ _ => {
+ // arbitrary non-zero size fallback
+ for s in iter {
+ copy_slice_and_advance!(target, sep_bytes);
+ let content_bytes = s.borrow().as_ref();
+ copy_slice_and_advance!(target, content_bytes);
+ }
+ }
+ }
+ target
+ }}
+}
+
+#[cfg(not(no_global_oom_handling))]
+macro_rules! copy_slice_and_advance {
+ ($target:expr, $bytes:expr) => {
+ let len = $bytes.len();
+ let (head, tail) = { $target }.split_at_mut(len);
+ head.copy_from_slice($bytes);
+ $target = tail;
+ };
+}
+
+// Optimized join implementation that works for both Vec<T> (T: Copy) and String's inner vec
+// Currently (2018-05-13) there is a bug with type inference and specialization (see issue #36262)
+// For this reason SliceConcat<T> is not specialized for T: Copy and SliceConcat<str> is the
+// only user of this function. It is left in place for the time when that is fixed.
+//
+// the bounds for String-join are S: Borrow<str> and for Vec-join Borrow<[T]>
+// [T] and str both impl AsRef<[T]> for some T
+// => s.borrow().as_ref() and we always have slices
+#[cfg(not(no_global_oom_handling))]
+fn join_generic_copy<B, T, S>(slice: &[S], sep: &[T]) -> Vec<T>
+where
+ T: Copy,
+ B: AsRef<[T]> + ?Sized,
+ S: Borrow<B>,
+{
+ let sep_len = sep.len();
+ let mut iter = slice.iter();
+
+ // the first slice is the only one without a separator preceding it
+ let first = match iter.next() {
+ Some(first) => first,
+ None => return vec![],
+ };
+
+ // compute the exact total length of the joined Vec
+ // if the `len` calculation overflows, we'll panic
+ // we would have run out of memory anyway and the rest of the function requires
+ // the entire Vec pre-allocated for safety
+ let reserved_len = sep_len
+ .checked_mul(iter.len())
+ .and_then(|n| {
+ slice.iter().map(|s| s.borrow().as_ref().len()).try_fold(n, usize::checked_add)
+ })
+ .expect("attempt to join into collection with len > usize::MAX");
+
+ // prepare an uninitialized buffer
+ let mut result = Vec::with_capacity(reserved_len);
+ debug_assert!(result.capacity() >= reserved_len);
+
+ result.extend_from_slice(first.borrow().as_ref());
+
+ unsafe {
+ let pos = result.len();
+ let target = result.spare_capacity_mut().get_unchecked_mut(..reserved_len - pos);
+
+ // Convert the separator and slices to slices of MaybeUninit
+ // to simplify implementation in specialize_for_lengths
+ let sep_uninit = core::slice::from_raw_parts(sep.as_ptr().cast(), sep.len());
+ let iter_uninit = iter.map(|it| {
+ let it = it.borrow().as_ref();
+ core::slice::from_raw_parts(it.as_ptr().cast(), it.len())
+ });
+
+ // copy separator and slices over without bounds checks
+ // generate loops with hardcoded offsets for small separators
+ // massive improvements possible (~ x2)
+ let remain = specialize_for_lengths!(sep_uninit, target, iter_uninit; 0, 1, 2, 3, 4);
+
+ // A weird borrow implementation may return different
+ // slices for the length calculation and the actual copy.
+ // Make sure we don't expose uninitialized bytes to the caller.
+ let result_len = reserved_len - remain.len();
+ result.set_len(result_len);
+ }
+ result
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Borrow<str> for String {
+ #[inline]
+ fn borrow(&self) -> &str {
+ &self[..]
+ }
+}
+
+#[stable(feature = "string_borrow_mut", since = "1.36.0")]
+impl BorrowMut<str> for String {
+ #[inline]
+ fn borrow_mut(&mut self) -> &mut str {
+ &mut self[..]
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToOwned for str {
+ type Owned = String;
+ #[inline]
+ fn to_owned(&self) -> String {
+ unsafe { String::from_utf8_unchecked(self.as_bytes().to_owned()) }
+ }
+
+ fn clone_into(&self, target: &mut String) {
+ let mut b = mem::take(target).into_bytes();
+ self.as_bytes().clone_into(&mut b);
+ *target = unsafe { String::from_utf8_unchecked(b) }
+ }
+}
+
+/// Methods for string slices.
+#[cfg(not(test))]
+impl str {
+ /// Converts a `Box<str>` into a `Box<[u8]>` without copying or allocating.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "this is a string";
+ /// let boxed_str = s.to_owned().into_boxed_str();
+ /// let boxed_bytes = boxed_str.into_boxed_bytes();
+ /// assert_eq!(*boxed_bytes, *s.as_bytes());
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "str_box_extras", since = "1.20.0")]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[inline]
+ pub fn into_boxed_bytes(self: Box<str>) -> Box<[u8]> {
+ self.into()
+ }
+
+ /// Replaces all matches of a pattern with another string.
+ ///
+ /// `replace` creates a new [`String`], and copies the data from this string slice into it.
+ /// While doing so, it attempts to find matches of a pattern. If it finds any, it
+ /// replaces them with the replacement string slice.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "this is old";
+ ///
+ /// assert_eq!("this is new", s.replace("old", "new"));
+ /// ```
+ ///
+ /// When the pattern doesn't match:
+ ///
+ /// ```
+ /// let s = "this is old";
+ /// assert_eq!(s, s.replace("cookie monster", "little lamb"));
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "this returns the replaced string as a new allocation, \
+ without modifying the original"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn replace<'a, P: Pattern<'a>>(&'a self, from: P, to: &str) -> String {
+ let mut result = String::new();
+ let mut last_end = 0;
+ for (start, part) in self.match_indices(from) {
+ result.push_str(unsafe { self.get_unchecked(last_end..start) });
+ result.push_str(to);
+ last_end = start + part.len();
+ }
+ result.push_str(unsafe { self.get_unchecked(last_end..self.len()) });
+ result
+ }
+
+ /// Replaces first N matches of a pattern with another string.
+ ///
+ /// `replacen` creates a new [`String`], and copies the data from this string slice into it.
+ /// While doing so, it attempts to find matches of a pattern. If it finds any, it
+ /// replaces them with the replacement string slice at most `count` times.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "foo foo 123 foo";
+ /// assert_eq!("new new 123 foo", s.replacen("foo", "new", 2));
+ /// assert_eq!("faa fao 123 foo", s.replacen('o', "a", 3));
+ /// assert_eq!("foo foo new23 foo", s.replacen(char::is_numeric, "new", 1));
+ /// ```
+ ///
+ /// When the pattern doesn't match:
+ ///
+ /// ```
+ /// let s = "this is old";
+ /// assert_eq!(s, s.replacen("cookie monster", "little lamb", 10));
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "this returns the replaced string as a new allocation, \
+ without modifying the original"]
+ #[stable(feature = "str_replacen", since = "1.16.0")]
+ pub fn replacen<'a, P: Pattern<'a>>(&'a self, pat: P, to: &str, count: usize) -> String {
+ // Hope to reduce the times of re-allocation
+ let mut result = String::with_capacity(32);
+ let mut last_end = 0;
+ for (start, part) in self.match_indices(pat).take(count) {
+ result.push_str(unsafe { self.get_unchecked(last_end..start) });
+ result.push_str(to);
+ last_end = start + part.len();
+ }
+ result.push_str(unsafe { self.get_unchecked(last_end..self.len()) });
+ result
+ }
+
+ /// Returns the lowercase equivalent of this string slice, as a new [`String`].
+ ///
+ /// 'Lowercase' is defined according to the terms of the Unicode Derived Core Property
+ /// `Lowercase`.
+ ///
+ /// Since some characters can expand into multiple characters when changing
+ /// the case, this function returns a [`String`] instead of modifying the
+ /// parameter in-place.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "HELLO";
+ ///
+ /// assert_eq!("hello", s.to_lowercase());
+ /// ```
+ ///
+ /// A tricky example, with sigma:
+ ///
+ /// ```
+ /// let sigma = "Σ";
+ ///
+ /// assert_eq!("σ", sigma.to_lowercase());
+ ///
+ /// // but at the end of a word, it's ς, not σ:
+ /// let odysseus = "ὈΔΥΣΣΕΎΣ";
+ ///
+ /// assert_eq!("ὀδυσσεύς", odysseus.to_lowercase());
+ /// ```
+ ///
+ /// Languages without case are not changed:
+ ///
+ /// ```
+ /// let new_year = "农历新年";
+ ///
+ /// assert_eq!(new_year, new_year.to_lowercase());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "this returns the lowercase string as a new String, \
+ without modifying the original"]
+ #[stable(feature = "unicode_case_mapping", since = "1.2.0")]
+ pub fn to_lowercase(&self) -> String {
+ let mut s = String::with_capacity(self.len());
+ for (i, c) in self[..].char_indices() {
+ if c == 'Σ' {
+ // Σ maps to σ, except at the end of a word where it maps to ς.
+ // This is the only conditional (contextual) but language-independent mapping
+ // in `SpecialCasing.txt`,
+ // so hard-code it rather than have a generic "condition" mechanism.
+ // See https://github.com/rust-lang/rust/issues/26035
+ map_uppercase_sigma(self, i, &mut s)
+ } else {
+ match conversions::to_lower(c) {
+ [a, '\0', _] => s.push(a),
+ [a, b, '\0'] => {
+ s.push(a);
+ s.push(b);
+ }
+ [a, b, c] => {
+ s.push(a);
+ s.push(b);
+ s.push(c);
+ }
+ }
+ }
+ }
+ return s;
+
+ fn map_uppercase_sigma(from: &str, i: usize, to: &mut String) {
+ // See https://www.unicode.org/versions/Unicode7.0.0/ch03.pdf#G33992
+ // for the definition of `Final_Sigma`.
+ debug_assert!('Σ'.len_utf8() == 2);
+ let is_word_final = case_ignoreable_then_cased(from[..i].chars().rev())
+ && !case_ignoreable_then_cased(from[i + 2..].chars());
+ to.push_str(if is_word_final { "ς" } else { "σ" });
+ }
+
+ fn case_ignoreable_then_cased<I: Iterator<Item = char>>(iter: I) -> bool {
+ use core::unicode::{Case_Ignorable, Cased};
+ match iter.skip_while(|&c| Case_Ignorable(c)).next() {
+ Some(c) => Cased(c),
+ None => false,
+ }
+ }
+ }
+
+ /// Returns the uppercase equivalent of this string slice, as a new [`String`].
+ ///
+ /// 'Uppercase' is defined according to the terms of the Unicode Derived Core Property
+ /// `Uppercase`.
+ ///
+ /// Since some characters can expand into multiple characters when changing
+ /// the case, this function returns a [`String`] instead of modifying the
+ /// parameter in-place.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "hello";
+ ///
+ /// assert_eq!("HELLO", s.to_uppercase());
+ /// ```
+ ///
+ /// Scripts without case are not changed:
+ ///
+ /// ```
+ /// let new_year = "农历新年";
+ ///
+ /// assert_eq!(new_year, new_year.to_uppercase());
+ /// ```
+ ///
+ /// One character can become multiple:
+ /// ```
+ /// let s = "tschüß";
+ ///
+ /// assert_eq!("TSCHÜSS", s.to_uppercase());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "this returns the uppercase string as a new String, \
+ without modifying the original"]
+ #[stable(feature = "unicode_case_mapping", since = "1.2.0")]
+ pub fn to_uppercase(&self) -> String {
+ let mut s = String::with_capacity(self.len());
+ for c in self[..].chars() {
+ match conversions::to_upper(c) {
+ [a, '\0', _] => s.push(a),
+ [a, b, '\0'] => {
+ s.push(a);
+ s.push(b);
+ }
+ [a, b, c] => {
+ s.push(a);
+ s.push(b);
+ s.push(c);
+ }
+ }
+ }
+ s
+ }
+
+ /// Converts a [`Box<str>`] into a [`String`] without copying or allocating.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let string = String::from("birthday gift");
+ /// let boxed_str = string.clone().into_boxed_str();
+ ///
+ /// assert_eq!(boxed_str.into_string(), string);
+ /// ```
+ #[stable(feature = "box_str", since = "1.4.0")]
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[inline]
+ pub fn into_string(self: Box<str>) -> String {
+ let slice = Box::<[u8]>::from(self);
+ unsafe { String::from_utf8_unchecked(slice.into_vec()) }
+ }
+
+ /// Creates a new [`String`] by repeating a string `n` times.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the capacity would overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!("abc".repeat(4), String::from("abcabcabcabc"));
+ /// ```
+ ///
+ /// A panic upon overflow:
+ ///
+ /// ```should_panic
+ /// // this will panic at runtime
+ /// let huge = "0123456789abcdef".repeat(usize::MAX);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[must_use]
+ #[stable(feature = "repeat_str", since = "1.16.0")]
+ pub fn repeat(&self, n: usize) -> String {
+ unsafe { String::from_utf8_unchecked(self.as_bytes().repeat(n)) }
+ }
+
+ /// Returns a copy of this string where each character is mapped to its
+ /// ASCII upper case equivalent.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To uppercase the value in-place, use [`make_ascii_uppercase`].
+ ///
+ /// To uppercase ASCII characters in addition to non-ASCII characters, use
+ /// [`to_uppercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let s = "Grüße, Jürgen ❤";
+ ///
+ /// assert_eq!("GRüßE, JüRGEN ❤", s.to_ascii_uppercase());
+ /// ```
+ ///
+ /// [`make_ascii_uppercase`]: str::make_ascii_uppercase
+ /// [`to_uppercase`]: #method.to_uppercase
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "to uppercase the value in-place, use `make_ascii_uppercase()`"]
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn to_ascii_uppercase(&self) -> String {
+ let mut bytes = self.as_bytes().to_vec();
+ bytes.make_ascii_uppercase();
+ // make_ascii_uppercase() preserves the UTF-8 invariant.
+ unsafe { String::from_utf8_unchecked(bytes) }
+ }
+
+ /// Returns a copy of this string where each character is mapped to its
+ /// ASCII lower case equivalent.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To lowercase the value in-place, use [`make_ascii_lowercase`].
+ ///
+ /// To lowercase ASCII characters in addition to non-ASCII characters, use
+ /// [`to_lowercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let s = "Grüße, Jürgen ❤";
+ ///
+ /// assert_eq!("grüße, jürgen ❤", s.to_ascii_lowercase());
+ /// ```
+ ///
+ /// [`make_ascii_lowercase`]: str::make_ascii_lowercase
+ /// [`to_lowercase`]: #method.to_lowercase
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "to lowercase the value in-place, use `make_ascii_lowercase()`"]
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn to_ascii_lowercase(&self) -> String {
+ let mut bytes = self.as_bytes().to_vec();
+ bytes.make_ascii_lowercase();
+ // make_ascii_lowercase() preserves the UTF-8 invariant.
+ unsafe { String::from_utf8_unchecked(bytes) }
+ }
+
+ /// Tries to create a `String`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s: &str = "a";
+ /// let ss: String = s.try_to_owned().unwrap();
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[inline]
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_to_owned(&self) -> Result<String, TryReserveError> {
+ unsafe { Ok(String::from_utf8_unchecked(self.as_bytes().try_to_vec()?)) }
+ }
+}
+
+/// Converts a boxed slice of bytes to a boxed string slice without checking
+/// that the string contains valid UTF-8.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let smile_utf8 = Box::new([226, 152, 186]);
+/// let smile = unsafe { std::str::from_boxed_utf8_unchecked(smile_utf8) };
+///
+/// assert_eq!("☺", &*smile);
+/// ```
+#[stable(feature = "str_box_extras", since = "1.20.0")]
+#[must_use]
+#[inline]
+pub unsafe fn from_boxed_utf8_unchecked(v: Box<[u8]>) -> Box<str> {
+ unsafe { Box::from_raw(Box::into_raw(v) as *mut str) }
+}
diff --git a/rust/alloc/string.rs b/rust/alloc/string.rs
new file mode 100644
index 000000000000..2ba7f30a7503
--- /dev/null
+++ b/rust/alloc/string.rs
@@ -0,0 +1,2944 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! A UTF-8–encoded, growable string.
+//!
+//! This module contains the [`String`] type, the [`ToString`] trait for
+//! converting to strings, and several error types that may result from
+//! working with [`String`]s.
+//!
+//! # Examples
+//!
+//! There are multiple ways to create a new [`String`] from a string literal:
+//!
+//! ```
+//! let s = "Hello".to_string();
+//!
+//! let s = String::from("world");
+//! let s: String = "also this".into();
+//! ```
+//!
+//! You can create a new [`String`] from an existing one by concatenating with
+//! `+`:
+//!
+//! ```
+//! let s = "Hello".to_string();
+//!
+//! let message = s + " world!";
+//! ```
+//!
+//! If you have a vector of valid UTF-8 bytes, you can make a [`String`] out of
+//! it. You can do the reverse too.
+//!
+//! ```
+//! let sparkle_heart = vec![240, 159, 146, 150];
+//!
+//! // We know these bytes are valid, so we'll use `unwrap()`.
+//! let sparkle_heart = String::from_utf8(sparkle_heart).unwrap();
+//!
+//! assert_eq!("💖", sparkle_heart);
+//!
+//! let bytes = sparkle_heart.into_bytes();
+//!
+//! assert_eq!(bytes, [240, 159, 146, 150]);
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[cfg(not(no_global_oom_handling))]
+use core::char::{decode_utf16, REPLACEMENT_CHARACTER};
+use core::fmt;
+use core::hash;
+#[cfg(not(no_global_oom_handling))]
+use core::iter::{from_fn, FromIterator};
+use core::iter::FusedIterator;
+#[cfg(not(no_global_oom_handling))]
+use core::ops::Add;
+#[cfg(not(no_global_oom_handling))]
+use core::ops::AddAssign;
+#[cfg(not(no_global_oom_handling))]
+use core::ops::Bound::{Excluded, Included, Unbounded};
+use core::ops::{self, Index, IndexMut, Range, RangeBounds};
+use core::ptr;
+use core::slice;
+#[cfg(not(no_global_oom_handling))]
+use core::str::lossy;
+use core::str::pattern::Pattern;
+
+#[cfg(not(no_global_oom_handling))]
+use crate::borrow::{Cow, ToOwned};
+use crate::boxed::Box;
+use crate::collections::TryReserveError;
+use crate::str::{self, Chars, Utf8Error};
+#[cfg(not(no_global_oom_handling))]
+use crate::str::{from_boxed_utf8_unchecked, FromStr};
+use crate::vec::Vec;
+
+/// A UTF-8–encoded, growable string.
+///
+/// The `String` type is the most common string type that has ownership over the
+/// contents of the string. It has a close relationship with its borrowed
+/// counterpart, the primitive [`str`].
+///
+/// # Examples
+///
+/// You can create a `String` from [a literal string][`&str`] with [`String::from`]:
+///
+/// [`String::from`]: From::from
+///
+/// ```
+/// let hello = String::from("Hello, world!");
+/// ```
+///
+/// You can append a [`char`] to a `String` with the [`push`] method, and
+/// append a [`&str`] with the [`push_str`] method:
+///
+/// ```
+/// let mut hello = String::from("Hello, ");
+///
+/// hello.push('w');
+/// hello.push_str("orld!");
+/// ```
+///
+/// [`push`]: String::push
+/// [`push_str`]: String::push_str
+///
+/// If you have a vector of UTF-8 bytes, you can create a `String` from it with
+/// the [`from_utf8`] method:
+///
+/// ```
+/// // some bytes, in a vector
+/// let sparkle_heart = vec![240, 159, 146, 150];
+///
+/// // We know these bytes are valid, so we'll use `unwrap()`.
+/// let sparkle_heart = String::from_utf8(sparkle_heart).unwrap();
+///
+/// assert_eq!("💖", sparkle_heart);
+/// ```
+///
+/// [`from_utf8`]: String::from_utf8
+///
+/// # UTF-8
+///
+/// `String`s are always valid UTF-8. If you need a non-UTF-8 string, consider
+/// [`OsString`]. It is similar, but without the UTF-8 constraint. Because UTF-8
+/// is a variable width encoding, `String`s are typically smaller than an array of
+/// the same `chars`:
+///
+/// ```
+/// use std::mem;
+///
+/// // `s` is ASCII which represents each `char` as one byte
+/// let s = "hello";
+/// assert_eq!(s.len(), 5);
+///
+/// // A `char` array with the same contents would be longer because
+/// // every `char` is four bytes
+/// let s = ['h', 'e', 'l', 'l', 'o'];
+/// let size: usize = s.into_iter().map(|c| mem::size_of_val(&c)).sum();
+/// assert_eq!(size, 20);
+///
+/// // However, for non-ASCII strings, the difference will be smaller
+/// // and sometimes they are the same
+/// let s = "💖💖💖💖💖";
+/// assert_eq!(s.len(), 20);
+///
+/// let s = ['💖', '💖', '💖', '💖', '💖'];
+/// let size: usize = s.into_iter().map(|c| mem::size_of_val(&c)).sum();
+/// assert_eq!(size, 20);
+/// ```
+///
+/// This raises interesting questions as to how `s[i]` should work.
+/// What should `i` be here? Several options include byte indices and
+/// `char` indices but, because of UTF-8 encoding, only byte indices
+/// would provide constant time indexing. Getting the `i`th `char`, for
+/// example, is available using [`chars`]:
+///
+/// ```
+/// let s = "hello";
+/// let third_character = s.chars().nth(2);
+/// assert_eq!(third_character, Some('l'));
+///
+/// let s = "💖💖💖💖💖";
+/// let third_character = s.chars().nth(2);
+/// assert_eq!(third_character, Some('💖'));
+/// ```
+///
+/// Next, what should `s[i]` return? Because indexing returns a reference
+/// to underlying data it could be `&u8`, `&[u8]`, or something else similar.
+/// Since we're only providing one index, `&u8` makes the most sense but that
+/// might not be what the user expects and can be explicitly achieved with
+/// [`as_bytes()`]:
+///
+/// ```
+/// // The first byte is 104 - the byte value of `'h'`
+/// let s = "hello";
+/// assert_eq!(s.as_bytes()[0], 104);
+/// // or
+/// assert_eq!(s.as_bytes()[0], b'h');
+///
+/// // The first byte is 240 which isn't obviously useful
+/// let s = "💖💖💖💖💖";
+/// assert_eq!(s.as_bytes()[0], 240);
+/// ```
+///
+/// Due to these ambiguities/restrictions, indexing with a `usize` is simply
+/// forbidden:
+///
+/// ```compile_fail,E0277
+/// let s = "hello";
+///
+/// // The following will not compile!
+/// println!("The first letter of s is {}", s[0]);
+/// ```
+///
+/// It is more clear, however, how `&s[i..j]` should work (that is,
+/// indexing with a range). It should accept byte indices (to be constant-time)
+/// and return a `&str` which is UTF-8 encoded. This is also called "string slicing".
+/// Note this will panic if the byte indices provided are not character
+/// boundaries - see [`is_char_boundary`] for more details. See the implementations
+/// for [`SliceIndex<str>`] for more details on string slicing. For a non-panicking
+/// version of string slicing, see [`get`].
+///
+/// [`OsString`]: ../../std/ffi/struct.OsString.html "ffi::OsString"
+/// [`SliceIndex<str>`]: core::slice::SliceIndex
+/// [`as_bytes()`]: str::as_bytes
+/// [`get`]: str::get
+/// [`is_char_boundary`]: str::is_char_boundary
+///
+/// The [`bytes`] and [`chars`] methods return iterators over the bytes and
+/// codepoints of the string, respectively. To iterate over codepoints along
+/// with byte indices, use [`char_indices`].
+///
+/// [`bytes`]: str::bytes
+/// [`chars`]: str::chars
+/// [`char_indices`]: str::char_indices
+///
+/// # Deref
+///
+/// `String` implements <code>[Deref]<Target = [str]></code>, and so inherits all of [`str`]'s
+/// methods. In addition, this means that you can pass a `String` to a
+/// function which takes a [`&str`] by using an ampersand (`&`):
+///
+/// ```
+/// fn takes_str(s: &str) { }
+///
+/// let s = String::from("Hello");
+///
+/// takes_str(&s);
+/// ```
+///
+/// This will create a [`&str`] from the `String` and pass it in. This
+/// conversion is very inexpensive, and so generally, functions will accept
+/// [`&str`]s as arguments unless they need a `String` for some specific
+/// reason.
+///
+/// In certain cases Rust doesn't have enough information to make this
+/// conversion, known as [`Deref`] coercion. In the following example a string
+/// slice [`&'a str`][`&str`] implements the trait `TraitExample`, and the function
+/// `example_func` takes anything that implements the trait. In this case Rust
+/// would need to make two implicit conversions, which Rust doesn't have the
+/// means to do. For that reason, the following example will not compile.
+///
+/// ```compile_fail,E0277
+/// trait TraitExample {}
+///
+/// impl<'a> TraitExample for &'a str {}
+///
+/// fn example_func<A: TraitExample>(example_arg: A) {}
+///
+/// let example_string = String::from("example_string");
+/// example_func(&example_string);
+/// ```
+///
+/// There are two options that would work instead. The first would be to
+/// change the line `example_func(&example_string);` to
+/// `example_func(example_string.as_str());`, using the method [`as_str()`]
+/// to explicitly extract the string slice containing the string. The second
+/// way changes `example_func(&example_string);` to
+/// `example_func(&*example_string);`. In this case we are dereferencing a
+/// `String` to a [`str`], then referencing the [`str`] back to
+/// [`&str`]. The second way is more idiomatic, however both work to do the
+/// conversion explicitly rather than relying on the implicit conversion.
+///
+/// # Representation
+///
+/// A `String` is made up of three components: a pointer to some bytes, a
+/// length, and a capacity. The pointer points to an internal buffer `String`
+/// uses to store its data. The length is the number of bytes currently stored
+/// in the buffer, and the capacity is the size of the buffer in bytes. As such,
+/// the length will always be less than or equal to the capacity.
+///
+/// This buffer is always stored on the heap.
+///
+/// You can look at these with the [`as_ptr`], [`len`], and [`capacity`]
+/// methods:
+///
+/// ```
+/// use std::mem;
+///
+/// let story = String::from("Once upon a time...");
+///
+// FIXME Update this when vec_into_raw_parts is stabilized
+/// // Prevent automatically dropping the String's data
+/// let mut story = mem::ManuallyDrop::new(story);
+///
+/// let ptr = story.as_mut_ptr();
+/// let len = story.len();
+/// let capacity = story.capacity();
+///
+/// // story has nineteen bytes
+/// assert_eq!(19, len);
+///
+/// // We can re-build a String out of ptr, len, and capacity. This is all
+/// // unsafe because we are responsible for making sure the components are
+/// // valid:
+/// let s = unsafe { String::from_raw_parts(ptr, len, capacity) } ;
+///
+/// assert_eq!(String::from("Once upon a time..."), s);
+/// ```
+///
+/// [`as_ptr`]: str::as_ptr
+/// [`len`]: String::len
+/// [`capacity`]: String::capacity
+///
+/// If a `String` has enough capacity, adding elements to it will not
+/// re-allocate. For example, consider this program:
+///
+/// ```
+/// let mut s = String::new();
+///
+/// println!("{}", s.capacity());
+///
+/// for _ in 0..5 {
+/// s.push_str("hello");
+/// println!("{}", s.capacity());
+/// }
+/// ```
+///
+/// This will output the following:
+///
+/// ```text
+/// 0
+/// 5
+/// 10
+/// 20
+/// 20
+/// 40
+/// ```
+///
+/// At first, we have no memory allocated at all, but as we append to the
+/// string, it increases its capacity appropriately. If we instead use the
+/// [`with_capacity`] method to allocate the correct capacity initially:
+///
+/// ```
+/// let mut s = String::with_capacity(25);
+///
+/// println!("{}", s.capacity());
+///
+/// for _ in 0..5 {
+/// s.push_str("hello");
+/// println!("{}", s.capacity());
+/// }
+/// ```
+///
+/// [`with_capacity`]: String::with_capacity
+///
+/// We end up with a different output:
+///
+/// ```text
+/// 25
+/// 25
+/// 25
+/// 25
+/// 25
+/// 25
+/// ```
+///
+/// Here, there's no need to allocate more memory inside the loop.
+///
+/// [str]: prim@str "str"
+/// [`str`]: prim@str "str"
+/// [`&str`]: prim@str "&str"
+/// [Deref]: core::ops::Deref "ops::Deref"
+/// [`Deref`]: core::ops::Deref "ops::Deref"
+/// [`as_str()`]: String::as_str
+#[derive(PartialOrd, Eq, Ord)]
+#[cfg_attr(not(test), rustc_diagnostic_item = "String")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct String {
+ vec: Vec<u8>,
+}
+
+/// A possible error value when converting a `String` from a UTF-8 byte vector.
+///
+/// This type is the error type for the [`from_utf8`] method on [`String`]. It
+/// is designed in such a way to carefully avoid reallocations: the
+/// [`into_bytes`] method will give back the byte vector that was used in the
+/// conversion attempt.
+///
+/// [`from_utf8`]: String::from_utf8
+/// [`into_bytes`]: FromUtf8Error::into_bytes
+///
+/// The [`Utf8Error`] type provided by [`std::str`] represents an error that may
+/// occur when converting a slice of [`u8`]s to a [`&str`]. In this sense, it's
+/// an analogue to `FromUtf8Error`, and you can get one from a `FromUtf8Error`
+/// through the [`utf8_error`] method.
+///
+/// [`Utf8Error`]: str::Utf8Error "std::str::Utf8Error"
+/// [`std::str`]: core::str "std::str"
+/// [`&str`]: prim@str "&str"
+/// [`utf8_error`]: FromUtf8Error::utf8_error
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// // some invalid bytes, in a vector
+/// let bytes = vec![0, 159];
+///
+/// let value = String::from_utf8(bytes);
+///
+/// assert!(value.is_err());
+/// assert_eq!(vec![0, 159], value.unwrap_err().into_bytes());
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(no_global_oom_handling), derive(Clone))]
+#[derive(Debug, PartialEq, Eq)]
+pub struct FromUtf8Error {
+ bytes: Vec<u8>,
+ error: Utf8Error,
+}
+
+/// A possible error value when converting a `String` from a UTF-16 byte slice.
+///
+/// This type is the error type for the [`from_utf16`] method on [`String`].
+///
+/// [`from_utf16`]: String::from_utf16
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// // 𝄞mu<invalid>ic
+/// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
+/// 0xD800, 0x0069, 0x0063];
+///
+/// assert!(String::from_utf16(v).is_err());
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
+pub struct FromUtf16Error(());
+
+impl String {
+ /// Creates a new empty `String`.
+ ///
+ /// Given that the `String` is empty, this will not allocate any initial
+ /// buffer. While that means that this initial operation is very
+ /// inexpensive, it may cause excessive allocation later when you add
+ /// data. If you have an idea of how much data the `String` will hold,
+ /// consider the [`with_capacity`] method to prevent excessive
+ /// re-allocation.
+ ///
+ /// [`with_capacity`]: String::with_capacity
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = String::new();
+ /// ```
+ #[inline]
+ #[rustc_const_stable(feature = "const_string_new", since = "1.39.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub const fn new() -> String {
+ String { vec: Vec::new() }
+ }
+
+ /// Creates a new empty `String` with a particular capacity.
+ ///
+ /// `String`s have an internal buffer to hold their data. The capacity is
+ /// the length of that buffer, and can be queried with the [`capacity`]
+ /// method. This method creates an empty `String`, but one with an initial
+ /// buffer that can hold `capacity` bytes. This is useful when you may be
+ /// appending a bunch of data to the `String`, reducing the number of
+ /// reallocations it needs to do.
+ ///
+ /// [`capacity`]: String::capacity
+ ///
+ /// If the given capacity is `0`, no allocation will occur, and this method
+ /// is identical to the [`new`] method.
+ ///
+ /// [`new`]: String::new
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::with_capacity(10);
+ ///
+ /// // The String contains no chars, even though it has capacity for more
+ /// assert_eq!(s.len(), 0);
+ ///
+ /// // These are all done without reallocating...
+ /// let cap = s.capacity();
+ /// for _ in 0..10 {
+ /// s.push('a');
+ /// }
+ ///
+ /// assert_eq!(s.capacity(), cap);
+ ///
+ /// // ...but this may make the string reallocate
+ /// s.push('a');
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn with_capacity(capacity: usize) -> String {
+ String { vec: Vec::with_capacity(capacity) }
+ }
+
+ // HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is
+ // required for this method definition, is not available. Since we don't
+ // require this method for testing purposes, I'll just stub it
+ // NB see the slice::hack module in slice.rs for more information
+ #[inline]
+ #[cfg(test)]
+ pub fn from_str(_: &str) -> String {
+ panic!("not available with cfg(test)");
+ }
+
+ /// Converts a vector of bytes to a `String`.
+ ///
+ /// A string ([`String`]) is made of bytes ([`u8`]), and a vector of bytes
+ /// ([`Vec<u8>`]) is made of bytes, so this function converts between the
+ /// two. Not all byte slices are valid `String`s, however: `String`
+ /// requires that it is valid UTF-8. `from_utf8()` checks to ensure that
+ /// the bytes are valid UTF-8, and then does the conversion.
+ ///
+ /// If you are sure that the byte slice is valid UTF-8, and you don't want
+ /// to incur the overhead of the validity check, there is an unsafe version
+ /// of this function, [`from_utf8_unchecked`], which has the same behavior
+ /// but skips the check.
+ ///
+ /// This method will take care to not copy the vector, for efficiency's
+ /// sake.
+ ///
+ /// If you need a [`&str`] instead of a `String`, consider
+ /// [`str::from_utf8`].
+ ///
+ /// The inverse of this method is [`into_bytes`].
+ ///
+ /// # Errors
+ ///
+ /// Returns [`Err`] if the slice is not UTF-8 with a description as to why the
+ /// provided bytes are not UTF-8. The vector you moved in is also included.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // some bytes, in a vector
+ /// let sparkle_heart = vec![240, 159, 146, 150];
+ ///
+ /// // We know these bytes are valid, so we'll use `unwrap()`.
+ /// let sparkle_heart = String::from_utf8(sparkle_heart).unwrap();
+ ///
+ /// assert_eq!("💖", sparkle_heart);
+ /// ```
+ ///
+ /// Incorrect bytes:
+ ///
+ /// ```
+ /// // some invalid bytes, in a vector
+ /// let sparkle_heart = vec![0, 159, 146, 150];
+ ///
+ /// assert!(String::from_utf8(sparkle_heart).is_err());
+ /// ```
+ ///
+ /// See the docs for [`FromUtf8Error`] for more details on what you can do
+ /// with this error.
+ ///
+ /// [`from_utf8_unchecked`]: String::from_utf8_unchecked
+ /// [`Vec<u8>`]: crate::vec::Vec "Vec"
+ /// [`&str`]: prim@str "&str"
+ /// [`into_bytes`]: String::into_bytes
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn from_utf8(vec: Vec<u8>) -> Result<String, FromUtf8Error> {
+ match str::from_utf8(&vec) {
+ Ok(..) => Ok(String { vec }),
+ Err(e) => Err(FromUtf8Error { bytes: vec, error: e }),
+ }
+ }
+
+ /// Converts a slice of bytes to a string, including invalid characters.
+ ///
+ /// Strings are made of bytes ([`u8`]), and a slice of bytes
+ /// ([`&[u8]`][byteslice]) is made of bytes, so this function converts
+ /// between the two. Not all byte slices are valid strings, however: strings
+ /// are required to be valid UTF-8. During this conversion,
+ /// `from_utf8_lossy()` will replace any invalid UTF-8 sequences with
+ /// [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD], which looks like this: �
+ ///
+ /// [byteslice]: prim@slice
+ /// [U+FFFD]: core::char::REPLACEMENT_CHARACTER
+ ///
+ /// If you are sure that the byte slice is valid UTF-8, and you don't want
+ /// to incur the overhead of the conversion, there is an unsafe version
+ /// of this function, [`from_utf8_unchecked`], which has the same behavior
+ /// but skips the checks.
+ ///
+ /// [`from_utf8_unchecked`]: String::from_utf8_unchecked
+ ///
+ /// This function returns a [`Cow<'a, str>`]. If our byte slice is invalid
+ /// UTF-8, then we need to insert the replacement characters, which will
+ /// change the size of the string, and hence, require a `String`. But if
+ /// it's already valid UTF-8, we don't need a new allocation. This return
+ /// type allows us to handle both cases.
+ ///
+ /// [`Cow<'a, str>`]: crate::borrow::Cow "borrow::Cow"
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // some bytes, in a vector
+ /// let sparkle_heart = vec![240, 159, 146, 150];
+ ///
+ /// let sparkle_heart = String::from_utf8_lossy(&sparkle_heart);
+ ///
+ /// assert_eq!("💖", sparkle_heart);
+ /// ```
+ ///
+ /// Incorrect bytes:
+ ///
+ /// ```
+ /// // some invalid bytes
+ /// let input = b"Hello \xF0\x90\x80World";
+ /// let output = String::from_utf8_lossy(input);
+ ///
+ /// assert_eq!("Hello �World", output);
+ /// ```
+ #[must_use]
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn from_utf8_lossy(v: &[u8]) -> Cow<'_, str> {
+ let mut iter = lossy::Utf8Lossy::from_bytes(v).chunks();
+
+ let first_valid = if let Some(chunk) = iter.next() {
+ let lossy::Utf8LossyChunk { valid, broken } = chunk;
+ if broken.is_empty() {
+ debug_assert_eq!(valid.len(), v.len());
+ return Cow::Borrowed(valid);
+ }
+ valid
+ } else {
+ return Cow::Borrowed("");
+ };
+
+ const REPLACEMENT: &str = "\u{FFFD}";
+
+ let mut res = String::with_capacity(v.len());
+ res.push_str(first_valid);
+ res.push_str(REPLACEMENT);
+
+ for lossy::Utf8LossyChunk { valid, broken } in iter {
+ res.push_str(valid);
+ if !broken.is_empty() {
+ res.push_str(REPLACEMENT);
+ }
+ }
+
+ Cow::Owned(res)
+ }
+
+ /// Decode a UTF-16–encoded vector `v` into a `String`, returning [`Err`]
+ /// if `v` contains any invalid data.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // 𝄞music
+ /// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
+ /// 0x0073, 0x0069, 0x0063];
+ /// assert_eq!(String::from("𝄞music"),
+ /// String::from_utf16(v).unwrap());
+ ///
+ /// // 𝄞mu<invalid>ic
+ /// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
+ /// 0xD800, 0x0069, 0x0063];
+ /// assert!(String::from_utf16(v).is_err());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn from_utf16(v: &[u16]) -> Result<String, FromUtf16Error> {
+ // This isn't done via collect::<Result<_, _>>() for performance reasons.
+ // FIXME: the function can be simplified again when #48994 is closed.
+ let mut ret = String::with_capacity(v.len());
+ for c in decode_utf16(v.iter().cloned()) {
+ if let Ok(c) = c {
+ ret.push(c);
+ } else {
+ return Err(FromUtf16Error(()));
+ }
+ }
+ Ok(ret)
+ }
+
+ /// Decode a UTF-16–encoded slice `v` into a `String`, replacing
+ /// invalid data with [the replacement character (`U+FFFD`)][U+FFFD].
+ ///
+ /// Unlike [`from_utf8_lossy`] which returns a [`Cow<'a, str>`],
+ /// `from_utf16_lossy` returns a `String` since the UTF-16 to UTF-8
+ /// conversion requires a memory allocation.
+ ///
+ /// [`from_utf8_lossy`]: String::from_utf8_lossy
+ /// [`Cow<'a, str>`]: crate::borrow::Cow "borrow::Cow"
+ /// [U+FFFD]: core::char::REPLACEMENT_CHARACTER
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // 𝄞mus<invalid>ic<invalid>
+ /// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
+ /// 0x0073, 0xDD1E, 0x0069, 0x0063,
+ /// 0xD834];
+ ///
+ /// assert_eq!(String::from("𝄞mus\u{FFFD}ic\u{FFFD}"),
+ /// String::from_utf16_lossy(v));
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[must_use]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn from_utf16_lossy(v: &[u16]) -> String {
+ decode_utf16(v.iter().cloned()).map(|r| r.unwrap_or(REPLACEMENT_CHARACTER)).collect()
+ }
+
+ /// Decomposes a `String` into its raw components.
+ ///
+ /// Returns the raw pointer to the underlying data, the length of
+ /// the string (in bytes), and the allocated capacity of the data
+ /// (in bytes). These are the same arguments in the same order as
+ /// the arguments to [`from_raw_parts`].
+ ///
+ /// After calling this function, the caller is responsible for the
+ /// memory previously managed by the `String`. The only way to do
+ /// this is to convert the raw pointer, length, and capacity back
+ /// into a `String` with the [`from_raw_parts`] function, allowing
+ /// the destructor to perform the cleanup.
+ ///
+ /// [`from_raw_parts`]: String::from_raw_parts
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(vec_into_raw_parts)]
+ /// let s = String::from("hello");
+ ///
+ /// let (ptr, len, cap) = s.into_raw_parts();
+ ///
+ /// let rebuilt = unsafe { String::from_raw_parts(ptr, len, cap) };
+ /// assert_eq!(rebuilt, "hello");
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[unstable(feature = "vec_into_raw_parts", reason = "new API", issue = "65816")]
+ pub fn into_raw_parts(self) -> (*mut u8, usize, usize) {
+ self.vec.into_raw_parts()
+ }
+
+ /// Creates a new `String` from a length, capacity, and pointer.
+ ///
+ /// # Safety
+ ///
+ /// This is highly unsafe, due to the number of invariants that aren't
+ /// checked:
+ ///
+ /// * The memory at `buf` needs to have been previously allocated by the
+ /// same allocator the standard library uses, with a required alignment of exactly 1.
+ /// * `length` needs to be less than or equal to `capacity`.
+ /// * `capacity` needs to be the correct value.
+ /// * The first `length` bytes at `buf` need to be valid UTF-8.
+ ///
+ /// Violating these may cause problems like corrupting the allocator's
+ /// internal data structures. For example, it is normally **not** safe to
+ /// build a `String` from a pointer to a C `char` array containing UTF-8
+ /// _unless_ you are certain that array was originally allocated by the
+ /// Rust standard library's allocator.
+ ///
+ /// The ownership of `buf` is effectively transferred to the
+ /// `String` which may then deallocate, reallocate or change the
+ /// contents of memory pointed to by the pointer at will. Ensure
+ /// that nothing else uses the pointer after calling this
+ /// function.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::mem;
+ ///
+ /// unsafe {
+ /// let s = String::from("hello");
+ ///
+ // FIXME Update this when vec_into_raw_parts is stabilized
+ /// // Prevent automatically dropping the String's data
+ /// let mut s = mem::ManuallyDrop::new(s);
+ ///
+ /// let ptr = s.as_mut_ptr();
+ /// let len = s.len();
+ /// let capacity = s.capacity();
+ ///
+ /// let s = String::from_raw_parts(ptr, len, capacity);
+ ///
+ /// assert_eq!(String::from("hello"), s);
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub unsafe fn from_raw_parts(buf: *mut u8, length: usize, capacity: usize) -> String {
+ unsafe { String { vec: Vec::from_raw_parts(buf, length, capacity) } }
+ }
+
+ /// Converts a vector of bytes to a `String` without checking that the
+ /// string contains valid UTF-8.
+ ///
+ /// See the safe version, [`from_utf8`], for more details.
+ ///
+ /// [`from_utf8`]: String::from_utf8
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because it does not check that the bytes passed
+ /// to it are valid UTF-8. If this constraint is violated, it may cause
+ /// memory unsafety issues with future users of the `String`, as the rest of
+ /// the standard library assumes that `String`s are valid UTF-8.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // some bytes, in a vector
+ /// let sparkle_heart = vec![240, 159, 146, 150];
+ ///
+ /// let sparkle_heart = unsafe {
+ /// String::from_utf8_unchecked(sparkle_heart)
+ /// };
+ ///
+ /// assert_eq!("💖", sparkle_heart);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub unsafe fn from_utf8_unchecked(bytes: Vec<u8>) -> String {
+ String { vec: bytes }
+ }
+
+ /// Converts a `String` into a byte vector.
+ ///
+ /// This consumes the `String`, so we do not need to copy its contents.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = String::from("hello");
+ /// let bytes = s.into_bytes();
+ ///
+ /// assert_eq!(&[104, 101, 108, 108, 111][..], &bytes[..]);
+ /// ```
+ #[inline]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_bytes(self) -> Vec<u8> {
+ self.vec
+ }
+
+ /// Extracts a string slice containing the entire `String`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = String::from("foo");
+ ///
+ /// assert_eq!("foo", s.as_str());
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "string_as_str", since = "1.7.0")]
+ pub fn as_str(&self) -> &str {
+ self
+ }
+
+ /// Converts a `String` into a mutable string slice.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("foobar");
+ /// let s_mut_str = s.as_mut_str();
+ ///
+ /// s_mut_str.make_ascii_uppercase();
+ ///
+ /// assert_eq!("FOOBAR", s_mut_str);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "string_as_str", since = "1.7.0")]
+ pub fn as_mut_str(&mut self) -> &mut str {
+ self
+ }
+
+ /// Appends a given string slice onto the end of this `String`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("foo");
+ ///
+ /// s.push_str("bar");
+ ///
+ /// assert_eq!("foobar", s);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push_str(&mut self, string: &str) {
+ self.vec.extend_from_slice(string.as_bytes())
+ }
+
+ /// Copies elements from `src` range to the end of the string.
+ ///
+ /// ## Panics
+ ///
+ /// Panics if the starting point or end point do not lie on a [`char`]
+ /// boundary, or if they're out of bounds.
+ ///
+ /// ## Examples
+ ///
+ /// ```
+ /// #![feature(string_extend_from_within)]
+ /// let mut string = String::from("abcde");
+ ///
+ /// string.extend_from_within(2..);
+ /// assert_eq!(string, "abcdecde");
+ ///
+ /// string.extend_from_within(..2);
+ /// assert_eq!(string, "abcdecdeab");
+ ///
+ /// string.extend_from_within(4..8);
+ /// assert_eq!(string, "abcdecdeabecde");
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "string_extend_from_within", issue = "none")]
+ pub fn extend_from_within<R>(&mut self, src: R)
+ where
+ R: RangeBounds<usize>,
+ {
+ let src @ Range { start, end } = slice::range(src, ..self.len());
+
+ assert!(self.is_char_boundary(start));
+ assert!(self.is_char_boundary(end));
+
+ self.vec.extend_from_within(src);
+ }
+
+ /// Returns this `String`'s capacity, in bytes.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = String::with_capacity(10);
+ ///
+ /// assert!(s.capacity() >= 10);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn capacity(&self) -> usize {
+ self.vec.capacity()
+ }
+
+ /// Ensures that this `String`'s capacity is at least `additional` bytes
+ /// larger than its length.
+ ///
+ /// The capacity may be increased by more than `additional` bytes if it
+ /// chooses, to prevent frequent reallocations.
+ ///
+ /// If you do not want this "at least" behavior, see the [`reserve_exact`]
+ /// method.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows [`usize`].
+ ///
+ /// [`reserve_exact`]: String::reserve_exact
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::new();
+ ///
+ /// s.reserve(10);
+ ///
+ /// assert!(s.capacity() >= 10);
+ /// ```
+ ///
+ /// This might not actually increase the capacity:
+ ///
+ /// ```
+ /// let mut s = String::with_capacity(10);
+ /// s.push('a');
+ /// s.push('b');
+ ///
+ /// // s now has a length of 2 and a capacity of 10
+ /// assert_eq!(2, s.len());
+ /// assert_eq!(10, s.capacity());
+ ///
+ /// // Since we already have an extra 8 capacity, calling this...
+ /// s.reserve(8);
+ ///
+ /// // ... doesn't actually increase.
+ /// assert_eq!(10, s.capacity());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve(&mut self, additional: usize) {
+ self.vec.reserve(additional)
+ }
+
+ /// Ensures that this `String`'s capacity is `additional` bytes
+ /// larger than its length.
+ ///
+ /// Consider using the [`reserve`] method unless you absolutely know
+ /// better than the allocator.
+ ///
+ /// [`reserve`]: String::reserve
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows `usize`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::new();
+ ///
+ /// s.reserve_exact(10);
+ ///
+ /// assert!(s.capacity() >= 10);
+ /// ```
+ ///
+ /// This might not actually increase the capacity:
+ ///
+ /// ```
+ /// let mut s = String::with_capacity(10);
+ /// s.push('a');
+ /// s.push('b');
+ ///
+ /// // s now has a length of 2 and a capacity of 10
+ /// assert_eq!(2, s.len());
+ /// assert_eq!(10, s.capacity());
+ ///
+ /// // Since we already have an extra 8 capacity, calling this...
+ /// s.reserve_exact(8);
+ ///
+ /// // ... doesn't actually increase.
+ /// assert_eq!(10, s.capacity());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.vec.reserve_exact(additional)
+ }
+
+ /// Tries to reserve capacity for at least `additional` more elements to be inserted
+ /// in the given `String`. The collection may reserve more space to avoid
+ /// frequent reallocations. After calling `reserve`, capacity will be
+ /// greater than or equal to `self.len() + additional`. Does nothing if
+ /// capacity is already sufficient.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn process_data(data: &str) -> Result<String, TryReserveError> {
+ /// let mut output = String::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// output.try_reserve(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// output.push_str(data);
+ ///
+ /// Ok(output)
+ /// }
+ /// # process_data("rust").expect("why is the test harness OOMing on 4 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve", since = "1.57.0")]
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.vec.try_reserve(additional)
+ }
+
+ /// Tries to reserve the minimum capacity for exactly `additional` more elements to
+ /// be inserted in the given `String`. After calling `try_reserve_exact`,
+ /// capacity will be greater than or equal to `self.len() + additional`.
+ /// Does nothing if the capacity is already sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer [`try_reserve`] if future insertions are expected.
+ ///
+ /// [`try_reserve`]: String::try_reserve
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn process_data(data: &str) -> Result<String, TryReserveError> {
+ /// let mut output = String::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// output.try_reserve_exact(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// output.push_str(data);
+ ///
+ /// Ok(output)
+ /// }
+ /// # process_data("rust").expect("why is the test harness OOMing on 4 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve", since = "1.57.0")]
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.vec.try_reserve_exact(additional)
+ }
+
+ /// Shrinks the capacity of this `String` to match its length.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("foo");
+ ///
+ /// s.reserve(100);
+ /// assert!(s.capacity() >= 100);
+ ///
+ /// s.shrink_to_fit();
+ /// assert_eq!(3, s.capacity());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn shrink_to_fit(&mut self) {
+ self.vec.shrink_to_fit()
+ }
+
+ /// Shrinks the capacity of this `String` with a lower bound.
+ ///
+ /// The capacity will remain at least as large as both the length
+ /// and the supplied value.
+ ///
+ /// If the current capacity is less than the lower limit, this is a no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut s = String::from("foo");
+ ///
+ /// s.reserve(100);
+ /// assert!(s.capacity() >= 100);
+ ///
+ /// s.shrink_to(10);
+ /// assert!(s.capacity() >= 10);
+ /// s.shrink_to(0);
+ /// assert!(s.capacity() >= 3);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "shrink_to", since = "1.56.0")]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ self.vec.shrink_to(min_capacity)
+ }
+
+ /// Appends the given [`char`] to the end of this `String`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("abc");
+ ///
+ /// s.push('1');
+ /// s.push('2');
+ /// s.push('3');
+ ///
+ /// assert_eq!("abc123", s);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push(&mut self, ch: char) {
+ match ch.len_utf8() {
+ 1 => self.vec.push(ch as u8),
+ _ => self.vec.extend_from_slice(ch.encode_utf8(&mut [0; 4]).as_bytes()),
+ }
+ }
+
+ /// Returns a byte slice of this `String`'s contents.
+ ///
+ /// The inverse of this method is [`from_utf8`].
+ ///
+ /// [`from_utf8`]: String::from_utf8
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = String::from("hello");
+ ///
+ /// assert_eq!(&[104, 101, 108, 108, 111], s.as_bytes());
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn as_bytes(&self) -> &[u8] {
+ &self.vec
+ }
+
+ /// Shortens this `String` to the specified length.
+ ///
+ /// If `new_len` is greater than the string's current length, this has no
+ /// effect.
+ ///
+ /// Note that this method has no effect on the allocated capacity
+ /// of the string
+ ///
+ /// # Panics
+ ///
+ /// Panics if `new_len` does not lie on a [`char`] boundary.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("hello");
+ ///
+ /// s.truncate(2);
+ ///
+ /// assert_eq!("he", s);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn truncate(&mut self, new_len: usize) {
+ if new_len <= self.len() {
+ assert!(self.is_char_boundary(new_len));
+ self.vec.truncate(new_len)
+ }
+ }
+
+ /// Removes the last character from the string buffer and returns it.
+ ///
+ /// Returns [`None`] if this `String` is empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("foo");
+ ///
+ /// assert_eq!(s.pop(), Some('o'));
+ /// assert_eq!(s.pop(), Some('o'));
+ /// assert_eq!(s.pop(), Some('f'));
+ ///
+ /// assert_eq!(s.pop(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pop(&mut self) -> Option<char> {
+ let ch = self.chars().rev().next()?;
+ let newlen = self.len() - ch.len_utf8();
+ unsafe {
+ self.vec.set_len(newlen);
+ }
+ Some(ch)
+ }
+
+ /// Removes a [`char`] from this `String` at a byte position and returns it.
+ ///
+ /// This is an *O*(*n*) operation, as it requires copying every element in the
+ /// buffer.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `idx` is larger than or equal to the `String`'s length,
+ /// or if it does not lie on a [`char`] boundary.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("foo");
+ ///
+ /// assert_eq!(s.remove(0), 'f');
+ /// assert_eq!(s.remove(1), 'o');
+ /// assert_eq!(s.remove(0), 'o');
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn remove(&mut self, idx: usize) -> char {
+ let ch = match self[idx..].chars().next() {
+ Some(ch) => ch,
+ None => panic!("cannot remove a char from the end of a string"),
+ };
+
+ let next = idx + ch.len_utf8();
+ let len = self.len();
+ unsafe {
+ ptr::copy(self.vec.as_ptr().add(next), self.vec.as_mut_ptr().add(idx), len - next);
+ self.vec.set_len(len - (next - idx));
+ }
+ ch
+ }
+
+ /// Remove all matches of pattern `pat` in the `String`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(string_remove_matches)]
+ /// let mut s = String::from("Trees are not green, the sky is not blue.");
+ /// s.remove_matches("not ");
+ /// assert_eq!("Trees are green, the sky is blue.", s);
+ /// ```
+ ///
+ /// Matches will be detected and removed iteratively, so in cases where
+ /// patterns overlap, only the first pattern will be removed:
+ ///
+ /// ```
+ /// #![feature(string_remove_matches)]
+ /// let mut s = String::from("banana");
+ /// s.remove_matches("ana");
+ /// assert_eq!("bna", s);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "string_remove_matches", reason = "new API", issue = "72826")]
+ pub fn remove_matches<'a, P>(&'a mut self, pat: P)
+ where
+ P: for<'x> Pattern<'x>,
+ {
+ use core::str::pattern::Searcher;
+
+ let rejections = {
+ let mut searcher = pat.into_searcher(self);
+ // Per Searcher::next:
+ //
+ // A Match result needs to contain the whole matched pattern,
+ // however Reject results may be split up into arbitrary many
+ // adjacent fragments. Both ranges may have zero length.
+ //
+ // In practice the implementation of Searcher::next_match tends to
+ // be more efficient, so we use it here and do some work to invert
+ // matches into rejections since that's what we want to copy below.
+ let mut front = 0;
+ let rejections: Vec<_> = from_fn(|| {
+ let (start, end) = searcher.next_match()?;
+ let prev_front = front;
+ front = end;
+ Some((prev_front, start))
+ })
+ .collect();
+ rejections.into_iter().chain(core::iter::once((front, self.len())))
+ };
+
+ let mut len = 0;
+ let ptr = self.vec.as_mut_ptr();
+
+ for (start, end) in rejections {
+ let count = end - start;
+ if start != len {
+ // SAFETY: per Searcher::next:
+ //
+ // The stream of Match and Reject values up to a Done will
+ // contain index ranges that are adjacent, non-overlapping,
+ // covering the whole haystack, and laying on utf8
+ // boundaries.
+ unsafe {
+ ptr::copy(ptr.add(start), ptr.add(len), count);
+ }
+ }
+ len += count;
+ }
+
+ unsafe {
+ self.vec.set_len(len);
+ }
+ }
+
+ /// Retains only the characters specified by the predicate.
+ ///
+ /// In other words, remove all characters `c` such that `f(c)` returns `false`.
+ /// This method operates in place, visiting each character exactly once in the
+ /// original order, and preserves the order of the retained characters.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut s = String::from("f_o_ob_ar");
+ ///
+ /// s.retain(|c| c != '_');
+ ///
+ /// assert_eq!(s, "foobar");
+ /// ```
+ ///
+ /// Because the elements are visited exactly once in the original order,
+ /// external state may be used to decide which elements to keep.
+ ///
+ /// ```
+ /// let mut s = String::from("abcde");
+ /// let keep = [false, true, true, false, true];
+ /// let mut iter = keep.iter();
+ /// s.retain(|_| *iter.next().unwrap());
+ /// assert_eq!(s, "bce");
+ /// ```
+ #[inline]
+ #[stable(feature = "string_retain", since = "1.26.0")]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ F: FnMut(char) -> bool,
+ {
+ struct SetLenOnDrop<'a> {
+ s: &'a mut String,
+ idx: usize,
+ del_bytes: usize,
+ }
+
+ impl<'a> Drop for SetLenOnDrop<'a> {
+ fn drop(&mut self) {
+ let new_len = self.idx - self.del_bytes;
+ debug_assert!(new_len <= self.s.len());
+ unsafe { self.s.vec.set_len(new_len) };
+ }
+ }
+
+ let len = self.len();
+ let mut guard = SetLenOnDrop { s: self, idx: 0, del_bytes: 0 };
+
+ while guard.idx < len {
+ let ch = unsafe { guard.s.get_unchecked(guard.idx..len).chars().next().unwrap() };
+ let ch_len = ch.len_utf8();
+
+ if !f(ch) {
+ guard.del_bytes += ch_len;
+ } else if guard.del_bytes > 0 {
+ unsafe {
+ ptr::copy(
+ guard.s.vec.as_ptr().add(guard.idx),
+ guard.s.vec.as_mut_ptr().add(guard.idx - guard.del_bytes),
+ ch_len,
+ );
+ }
+ }
+
+ // Point idx to the next char
+ guard.idx += ch_len;
+ }
+
+ drop(guard);
+ }
+
+ /// Inserts a character into this `String` at a byte position.
+ ///
+ /// This is an *O*(*n*) operation as it requires copying every element in the
+ /// buffer.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `idx` is larger than the `String`'s length, or if it does not
+ /// lie on a [`char`] boundary.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::with_capacity(3);
+ ///
+ /// s.insert(0, 'f');
+ /// s.insert(1, 'o');
+ /// s.insert(2, 'o');
+ ///
+ /// assert_eq!("foo", s);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn insert(&mut self, idx: usize, ch: char) {
+ assert!(self.is_char_boundary(idx));
+ let mut bits = [0; 4];
+ let bits = ch.encode_utf8(&mut bits).as_bytes();
+
+ unsafe {
+ self.insert_bytes(idx, bits);
+ }
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn insert_bytes(&mut self, idx: usize, bytes: &[u8]) {
+ let len = self.len();
+ let amt = bytes.len();
+ self.vec.reserve(amt);
+
+ unsafe {
+ ptr::copy(self.vec.as_ptr().add(idx), self.vec.as_mut_ptr().add(idx + amt), len - idx);
+ ptr::copy_nonoverlapping(bytes.as_ptr(), self.vec.as_mut_ptr().add(idx), amt);
+ self.vec.set_len(len + amt);
+ }
+ }
+
+ /// Inserts a string slice into this `String` at a byte position.
+ ///
+ /// This is an *O*(*n*) operation as it requires copying every element in the
+ /// buffer.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `idx` is larger than the `String`'s length, or if it does not
+ /// lie on a [`char`] boundary.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("bar");
+ ///
+ /// s.insert_str(0, "foo");
+ ///
+ /// assert_eq!("foobar", s);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "insert_str", since = "1.16.0")]
+ pub fn insert_str(&mut self, idx: usize, string: &str) {
+ assert!(self.is_char_boundary(idx));
+
+ unsafe {
+ self.insert_bytes(idx, string.as_bytes());
+ }
+ }
+
+ /// Returns a mutable reference to the contents of this `String`.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because the returned `&mut Vec` allows writing
+ /// bytes which are not valid UTF-8. If this constraint is violated, using
+ /// the original `String` after dropping the `&mut Vec` may violate memory
+ /// safety, as the rest of the standard library assumes that `String`s are
+ /// valid UTF-8.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("hello");
+ ///
+ /// unsafe {
+ /// let vec = s.as_mut_vec();
+ /// assert_eq!(&[104, 101, 108, 108, 111][..], &vec[..]);
+ ///
+ /// vec.reverse();
+ /// }
+ /// assert_eq!(s, "olleh");
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub unsafe fn as_mut_vec(&mut self) -> &mut Vec<u8> {
+ &mut self.vec
+ }
+
+ /// Returns the length of this `String`, in bytes, not [`char`]s or
+ /// graphemes. In other words, it might not be what a human considers the
+ /// length of the string.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = String::from("foo");
+ /// assert_eq!(a.len(), 3);
+ ///
+ /// let fancy_f = String::from("ƒoo");
+ /// assert_eq!(fancy_f.len(), 4);
+ /// assert_eq!(fancy_f.chars().count(), 3);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn len(&self) -> usize {
+ self.vec.len()
+ }
+
+ /// Returns `true` if this `String` has a length of zero, and `false` otherwise.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut v = String::new();
+ /// assert!(v.is_empty());
+ ///
+ /// v.push('a');
+ /// assert!(!v.is_empty());
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Splits the string into two at the given byte index.
+ ///
+ /// Returns a newly allocated `String`. `self` contains bytes `[0, at)`, and
+ /// the returned `String` contains bytes `[at, len)`. `at` must be on the
+ /// boundary of a UTF-8 code point.
+ ///
+ /// Note that the capacity of `self` does not change.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `at` is not on a `UTF-8` code point boundary, or if it is beyond the last
+ /// code point of the string.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # fn main() {
+ /// let mut hello = String::from("Hello, World!");
+ /// let world = hello.split_off(7);
+ /// assert_eq!(hello, "Hello, ");
+ /// assert_eq!(world, "World!");
+ /// # }
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "string_split_off", since = "1.16.0")]
+ #[must_use = "use `.truncate()` if you don't need the other half"]
+ pub fn split_off(&mut self, at: usize) -> String {
+ assert!(self.is_char_boundary(at));
+ let other = self.vec.split_off(at);
+ unsafe { String::from_utf8_unchecked(other) }
+ }
+
+ /// Truncates this `String`, removing all contents.
+ ///
+ /// While this means the `String` will have a length of zero, it does not
+ /// touch its capacity.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("foo");
+ ///
+ /// s.clear();
+ ///
+ /// assert!(s.is_empty());
+ /// assert_eq!(0, s.len());
+ /// assert_eq!(3, s.capacity());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn clear(&mut self) {
+ self.vec.clear()
+ }
+
+ /// Removes the specified range from the string in bulk, returning all
+ /// removed characters as an iterator.
+ ///
+ /// The returned iterator keeps a mutable borrow on the string to optimize
+ /// its implementation.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point or end point do not lie on a [`char`]
+ /// boundary, or if they're out of bounds.
+ ///
+ /// # Leaking
+ ///
+ /// If the returned iterator goes out of scope without being dropped (due to
+ /// [`core::mem::forget`], for example), the string may still contain a copy
+ /// of any drained characters, or may have lost characters arbitrarily,
+ /// including characters outside the range.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("α is alpha, β is beta");
+ /// let beta_offset = s.find('β').unwrap_or(s.len());
+ ///
+ /// // Remove the range up until the β from the string
+ /// let t: String = s.drain(..beta_offset).collect();
+ /// assert_eq!(t, "α is alpha, ");
+ /// assert_eq!(s, "β is beta");
+ ///
+ /// // A full range clears the string, like `clear()` does
+ /// s.drain(..);
+ /// assert_eq!(s, "");
+ /// ```
+ #[stable(feature = "drain", since = "1.6.0")]
+ pub fn drain<R>(&mut self, range: R) -> Drain<'_>
+ where
+ R: RangeBounds<usize>,
+ {
+ // Memory safety
+ //
+ // The String version of Drain does not have the memory safety issues
+ // of the vector version. The data is just plain bytes.
+ // Because the range removal happens in Drop, if the Drain iterator is leaked,
+ // the removal will not happen.
+ let Range { start, end } = slice::range(range, ..self.len());
+ assert!(self.is_char_boundary(start));
+ assert!(self.is_char_boundary(end));
+
+ // Take out two simultaneous borrows. The &mut String won't be accessed
+ // until iteration is over, in Drop.
+ let self_ptr = self as *mut _;
+ // SAFETY: `slice::range` and `is_char_boundary` do the appropriate bounds checks.
+ let chars_iter = unsafe { self.get_unchecked(start..end) }.chars();
+
+ Drain { start, end, iter: chars_iter, string: self_ptr }
+ }
+
+ /// Removes the specified range in the string,
+ /// and replaces it with the given string.
+ /// The given string doesn't need to be the same length as the range.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point or end point do not lie on a [`char`]
+ /// boundary, or if they're out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("α is alpha, β is beta");
+ /// let beta_offset = s.find('β').unwrap_or(s.len());
+ ///
+ /// // Replace the range up until the β from the string
+ /// s.replace_range(..beta_offset, "Α is capital alpha; ");
+ /// assert_eq!(s, "Α is capital alpha; β is beta");
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "splice", since = "1.27.0")]
+ pub fn replace_range<R>(&mut self, range: R, replace_with: &str)
+ where
+ R: RangeBounds<usize>,
+ {
+ // Memory safety
+ //
+ // Replace_range does not have the memory safety issues of a vector Splice.
+ // of the vector version. The data is just plain bytes.
+
+ // WARNING: Inlining this variable would be unsound (#81138)
+ let start = range.start_bound();
+ match start {
+ Included(&n) => assert!(self.is_char_boundary(n)),
+ Excluded(&n) => assert!(self.is_char_boundary(n + 1)),
+ Unbounded => {}
+ };
+ // WARNING: Inlining this variable would be unsound (#81138)
+ let end = range.end_bound();
+ match end {
+ Included(&n) => assert!(self.is_char_boundary(n + 1)),
+ Excluded(&n) => assert!(self.is_char_boundary(n)),
+ Unbounded => {}
+ };
+
+ // Using `range` again would be unsound (#81138)
+ // We assume the bounds reported by `range` remain the same, but
+ // an adversarial implementation could change between calls
+ unsafe { self.as_mut_vec() }.splice((start, end), replace_with.bytes());
+ }
+
+ /// Converts this `String` into a <code>[Box]<[str]></code>.
+ ///
+ /// This will drop any excess capacity.
+ ///
+ /// [str]: prim@str "str"
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = String::from("hello");
+ ///
+ /// let b = s.into_boxed_str();
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "box_str", since = "1.4.0")]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[inline]
+ pub fn into_boxed_str(self) -> Box<str> {
+ let slice = self.vec.into_boxed_slice();
+ unsafe { from_boxed_utf8_unchecked(slice) }
+ }
+}
+
+impl FromUtf8Error {
+ /// Returns a slice of [`u8`]s bytes that were attempted to convert to a `String`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // some invalid bytes, in a vector
+ /// let bytes = vec![0, 159];
+ ///
+ /// let value = String::from_utf8(bytes);
+ ///
+ /// assert_eq!(&[0, 159], value.unwrap_err().as_bytes());
+ /// ```
+ #[must_use]
+ #[stable(feature = "from_utf8_error_as_bytes", since = "1.26.0")]
+ pub fn as_bytes(&self) -> &[u8] {
+ &self.bytes[..]
+ }
+
+ /// Returns the bytes that were attempted to convert to a `String`.
+ ///
+ /// This method is carefully constructed to avoid allocation. It will
+ /// consume the error, moving out the bytes, so that a copy of the bytes
+ /// does not need to be made.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // some invalid bytes, in a vector
+ /// let bytes = vec![0, 159];
+ ///
+ /// let value = String::from_utf8(bytes);
+ ///
+ /// assert_eq!(vec![0, 159], value.unwrap_err().into_bytes());
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_bytes(self) -> Vec<u8> {
+ self.bytes
+ }
+
+ /// Fetch a `Utf8Error` to get more details about the conversion failure.
+ ///
+ /// The [`Utf8Error`] type provided by [`std::str`] represents an error that may
+ /// occur when converting a slice of [`u8`]s to a [`&str`]. In this sense, it's
+ /// an analogue to `FromUtf8Error`. See its documentation for more details
+ /// on using it.
+ ///
+ /// [`std::str`]: core::str "std::str"
+ /// [`&str`]: prim@str "&str"
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // some invalid bytes, in a vector
+ /// let bytes = vec![0, 159];
+ ///
+ /// let error = String::from_utf8(bytes).unwrap_err().utf8_error();
+ ///
+ /// // the first byte is invalid here
+ /// assert_eq!(1, error.valid_up_to());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn utf8_error(&self) -> Utf8Error {
+ self.error
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for FromUtf8Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.error, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for FromUtf16Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt("invalid utf-16: lone surrogate found", f)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Clone for String {
+ fn clone(&self) -> Self {
+ String { vec: self.vec.clone() }
+ }
+
+ fn clone_from(&mut self, source: &Self) {
+ self.vec.clone_from(&source.vec);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl FromIterator<char> for String {
+ fn from_iter<I: IntoIterator<Item = char>>(iter: I) -> String {
+ let mut buf = String::new();
+ buf.extend(iter);
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "string_from_iter_by_ref", since = "1.17.0")]
+impl<'a> FromIterator<&'a char> for String {
+ fn from_iter<I: IntoIterator<Item = &'a char>>(iter: I) -> String {
+ let mut buf = String::new();
+ buf.extend(iter);
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> FromIterator<&'a str> for String {
+ fn from_iter<I: IntoIterator<Item = &'a str>>(iter: I) -> String {
+ let mut buf = String::new();
+ buf.extend(iter);
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "extend_string", since = "1.4.0")]
+impl FromIterator<String> for String {
+ fn from_iter<I: IntoIterator<Item = String>>(iter: I) -> String {
+ let mut iterator = iter.into_iter();
+
+ // Because we're iterating over `String`s, we can avoid at least
+ // one allocation by getting the first string from the iterator
+ // and appending to it all the subsequent strings.
+ match iterator.next() {
+ None => String::new(),
+ Some(mut buf) => {
+ buf.extend(iterator);
+ buf
+ }
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_str2", since = "1.45.0")]
+impl FromIterator<Box<str>> for String {
+ fn from_iter<I: IntoIterator<Item = Box<str>>>(iter: I) -> String {
+ let mut buf = String::new();
+ buf.extend(iter);
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "herd_cows", since = "1.19.0")]
+impl<'a> FromIterator<Cow<'a, str>> for String {
+ fn from_iter<I: IntoIterator<Item = Cow<'a, str>>>(iter: I) -> String {
+ let mut iterator = iter.into_iter();
+
+ // Because we're iterating over CoWs, we can (potentially) avoid at least
+ // one allocation by getting the first item and appending to it all the
+ // subsequent items.
+ match iterator.next() {
+ None => String::new(),
+ Some(cow) => {
+ let mut buf = cow.into_owned();
+ buf.extend(iterator);
+ buf
+ }
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Extend<char> for String {
+ fn extend<I: IntoIterator<Item = char>>(&mut self, iter: I) {
+ let iterator = iter.into_iter();
+ let (lower_bound, _) = iterator.size_hint();
+ self.reserve(lower_bound);
+ iterator.for_each(move |c| self.push(c));
+ }
+
+ #[inline]
+ fn extend_one(&mut self, c: char) {
+ self.push(c);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "extend_ref", since = "1.2.0")]
+impl<'a> Extend<&'a char> for String {
+ fn extend<I: IntoIterator<Item = &'a char>>(&mut self, iter: I) {
+ self.extend(iter.into_iter().cloned());
+ }
+
+ #[inline]
+ fn extend_one(&mut self, &c: &'a char) {
+ self.push(c);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> Extend<&'a str> for String {
+ fn extend<I: IntoIterator<Item = &'a str>>(&mut self, iter: I) {
+ iter.into_iter().for_each(move |s| self.push_str(s));
+ }
+
+ #[inline]
+ fn extend_one(&mut self, s: &'a str) {
+ self.push_str(s);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_str2", since = "1.45.0")]
+impl Extend<Box<str>> for String {
+ fn extend<I: IntoIterator<Item = Box<str>>>(&mut self, iter: I) {
+ iter.into_iter().for_each(move |s| self.push_str(&s));
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "extend_string", since = "1.4.0")]
+impl Extend<String> for String {
+ fn extend<I: IntoIterator<Item = String>>(&mut self, iter: I) {
+ iter.into_iter().for_each(move |s| self.push_str(&s));
+ }
+
+ #[inline]
+ fn extend_one(&mut self, s: String) {
+ self.push_str(&s);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "herd_cows", since = "1.19.0")]
+impl<'a> Extend<Cow<'a, str>> for String {
+ fn extend<I: IntoIterator<Item = Cow<'a, str>>>(&mut self, iter: I) {
+ iter.into_iter().for_each(move |s| self.push_str(&s));
+ }
+
+ #[inline]
+ fn extend_one(&mut self, s: Cow<'a, str>) {
+ self.push_str(&s);
+ }
+}
+
+/// A convenience impl that delegates to the impl for `&str`.
+///
+/// # Examples
+///
+/// ```
+/// assert_eq!(String::from("Hello world").find("world"), Some(6));
+/// ```
+#[unstable(
+ feature = "pattern",
+ reason = "API not fully fleshed out and ready to be stabilized",
+ issue = "27721"
+)]
+impl<'a, 'b> Pattern<'a> for &'b String {
+ type Searcher = <&'b str as Pattern<'a>>::Searcher;
+
+ fn into_searcher(self, haystack: &'a str) -> <&'b str as Pattern<'a>>::Searcher {
+ self[..].into_searcher(haystack)
+ }
+
+ #[inline]
+ fn is_contained_in(self, haystack: &'a str) -> bool {
+ self[..].is_contained_in(haystack)
+ }
+
+ #[inline]
+ fn is_prefix_of(self, haystack: &'a str) -> bool {
+ self[..].is_prefix_of(haystack)
+ }
+
+ #[inline]
+ fn strip_prefix_of(self, haystack: &'a str) -> Option<&'a str> {
+ self[..].strip_prefix_of(haystack)
+ }
+
+ #[inline]
+ fn is_suffix_of(self, haystack: &'a str) -> bool {
+ self[..].is_suffix_of(haystack)
+ }
+
+ #[inline]
+ fn strip_suffix_of(self, haystack: &'a str) -> Option<&'a str> {
+ self[..].strip_suffix_of(haystack)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialEq for String {
+ #[inline]
+ fn eq(&self, other: &String) -> bool {
+ PartialEq::eq(&self[..], &other[..])
+ }
+ #[inline]
+ fn ne(&self, other: &String) -> bool {
+ PartialEq::ne(&self[..], &other[..])
+ }
+}
+
+macro_rules! impl_eq {
+ ($lhs:ty, $rhs: ty) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[allow(unused_lifetimes)]
+ impl<'a, 'b> PartialEq<$rhs> for $lhs {
+ #[inline]
+ fn eq(&self, other: &$rhs) -> bool {
+ PartialEq::eq(&self[..], &other[..])
+ }
+ #[inline]
+ fn ne(&self, other: &$rhs) -> bool {
+ PartialEq::ne(&self[..], &other[..])
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[allow(unused_lifetimes)]
+ impl<'a, 'b> PartialEq<$lhs> for $rhs {
+ #[inline]
+ fn eq(&self, other: &$lhs) -> bool {
+ PartialEq::eq(&self[..], &other[..])
+ }
+ #[inline]
+ fn ne(&self, other: &$lhs) -> bool {
+ PartialEq::ne(&self[..], &other[..])
+ }
+ }
+ };
+}
+
+impl_eq! { String, str }
+impl_eq! { String, &'a str }
+#[cfg(not(no_global_oom_handling))]
+impl_eq! { Cow<'a, str>, str }
+#[cfg(not(no_global_oom_handling))]
+impl_eq! { Cow<'a, str>, &'b str }
+#[cfg(not(no_global_oom_handling))]
+impl_eq! { Cow<'a, str>, String }
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+impl const Default for String {
+ /// Creates an empty `String`.
+ #[inline]
+ fn default() -> String {
+ String::new()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for String {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for String {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl hash::Hash for String {
+ #[inline]
+ fn hash<H: hash::Hasher>(&self, hasher: &mut H) {
+ (**self).hash(hasher)
+ }
+}
+
+/// Implements the `+` operator for concatenating two strings.
+///
+/// This consumes the `String` on the left-hand side and re-uses its buffer (growing it if
+/// necessary). This is done to avoid allocating a new `String` and copying the entire contents on
+/// every operation, which would lead to *O*(*n*^2) running time when building an *n*-byte string by
+/// repeated concatenation.
+///
+/// The string on the right-hand side is only borrowed; its contents are copied into the returned
+/// `String`.
+///
+/// # Examples
+///
+/// Concatenating two `String`s takes the first by value and borrows the second:
+///
+/// ```
+/// let a = String::from("hello");
+/// let b = String::from(" world");
+/// let c = a + &b;
+/// // `a` is moved and can no longer be used here.
+/// ```
+///
+/// If you want to keep using the first `String`, you can clone it and append to the clone instead:
+///
+/// ```
+/// let a = String::from("hello");
+/// let b = String::from(" world");
+/// let c = a.clone() + &b;
+/// // `a` is still valid here.
+/// ```
+///
+/// Concatenating `&str` slices can be done by converting the first to a `String`:
+///
+/// ```
+/// let a = "hello";
+/// let b = " world";
+/// let c = a.to_string() + b;
+/// ```
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Add<&str> for String {
+ type Output = String;
+
+ #[inline]
+ fn add(mut self, other: &str) -> String {
+ self.push_str(other);
+ self
+ }
+}
+
+/// Implements the `+=` operator for appending to a `String`.
+///
+/// This has the same behavior as the [`push_str`][String::push_str] method.
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "stringaddassign", since = "1.12.0")]
+impl AddAssign<&str> for String {
+ #[inline]
+ fn add_assign(&mut self, other: &str) {
+ self.push_str(other);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Index<ops::Range<usize>> for String {
+ type Output = str;
+
+ #[inline]
+ fn index(&self, index: ops::Range<usize>) -> &str {
+ &self[..][index]
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Index<ops::RangeTo<usize>> for String {
+ type Output = str;
+
+ #[inline]
+ fn index(&self, index: ops::RangeTo<usize>) -> &str {
+ &self[..][index]
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Index<ops::RangeFrom<usize>> for String {
+ type Output = str;
+
+ #[inline]
+ fn index(&self, index: ops::RangeFrom<usize>) -> &str {
+ &self[..][index]
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Index<ops::RangeFull> for String {
+ type Output = str;
+
+ #[inline]
+ fn index(&self, _index: ops::RangeFull) -> &str {
+ unsafe { str::from_utf8_unchecked(&self.vec) }
+ }
+}
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl ops::Index<ops::RangeInclusive<usize>> for String {
+ type Output = str;
+
+ #[inline]
+ fn index(&self, index: ops::RangeInclusive<usize>) -> &str {
+ Index::index(&**self, index)
+ }
+}
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl ops::Index<ops::RangeToInclusive<usize>> for String {
+ type Output = str;
+
+ #[inline]
+ fn index(&self, index: ops::RangeToInclusive<usize>) -> &str {
+ Index::index(&**self, index)
+ }
+}
+
+#[stable(feature = "derefmut_for_string", since = "1.3.0")]
+impl ops::IndexMut<ops::Range<usize>> for String {
+ #[inline]
+ fn index_mut(&mut self, index: ops::Range<usize>) -> &mut str {
+ &mut self[..][index]
+ }
+}
+#[stable(feature = "derefmut_for_string", since = "1.3.0")]
+impl ops::IndexMut<ops::RangeTo<usize>> for String {
+ #[inline]
+ fn index_mut(&mut self, index: ops::RangeTo<usize>) -> &mut str {
+ &mut self[..][index]
+ }
+}
+#[stable(feature = "derefmut_for_string", since = "1.3.0")]
+impl ops::IndexMut<ops::RangeFrom<usize>> for String {
+ #[inline]
+ fn index_mut(&mut self, index: ops::RangeFrom<usize>) -> &mut str {
+ &mut self[..][index]
+ }
+}
+#[stable(feature = "derefmut_for_string", since = "1.3.0")]
+impl ops::IndexMut<ops::RangeFull> for String {
+ #[inline]
+ fn index_mut(&mut self, _index: ops::RangeFull) -> &mut str {
+ unsafe { str::from_utf8_unchecked_mut(&mut *self.vec) }
+ }
+}
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl ops::IndexMut<ops::RangeInclusive<usize>> for String {
+ #[inline]
+ fn index_mut(&mut self, index: ops::RangeInclusive<usize>) -> &mut str {
+ IndexMut::index_mut(&mut **self, index)
+ }
+}
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl ops::IndexMut<ops::RangeToInclusive<usize>> for String {
+ #[inline]
+ fn index_mut(&mut self, index: ops::RangeToInclusive<usize>) -> &mut str {
+ IndexMut::index_mut(&mut **self, index)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Deref for String {
+ type Target = str;
+
+ #[inline]
+ fn deref(&self) -> &str {
+ unsafe { str::from_utf8_unchecked(&self.vec) }
+ }
+}
+
+#[stable(feature = "derefmut_for_string", since = "1.3.0")]
+impl ops::DerefMut for String {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut str {
+ unsafe { str::from_utf8_unchecked_mut(&mut *self.vec) }
+ }
+}
+
+/// A type alias for [`Infallible`].
+///
+/// This alias exists for backwards compatibility, and may be eventually deprecated.
+///
+/// [`Infallible`]: core::convert::Infallible "convert::Infallible"
+#[stable(feature = "str_parse_error", since = "1.5.0")]
+pub type ParseError = core::convert::Infallible;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl FromStr for String {
+ type Err = core::convert::Infallible;
+ #[inline]
+ fn from_str(s: &str) -> Result<String, Self::Err> {
+ Ok(String::from(s))
+ }
+}
+
+/// A trait for converting a value to a `String`.
+///
+/// This trait is automatically implemented for any type which implements the
+/// [`Display`] trait. As such, `ToString` shouldn't be implemented directly:
+/// [`Display`] should be implemented instead, and you get the `ToString`
+/// implementation for free.
+///
+/// [`Display`]: fmt::Display
+#[cfg_attr(not(test), rustc_diagnostic_item = "ToString")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait ToString {
+ /// Converts the given value to a `String`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let i = 5;
+ /// let five = String::from("5");
+ ///
+ /// assert_eq!(five, i.to_string());
+ /// ```
+ #[rustc_conversion_suggestion]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn to_string(&self) -> String;
+}
+
+/// # Panics
+///
+/// In this implementation, the `to_string` method panics
+/// if the `Display` implementation returns an error.
+/// This indicates an incorrect `Display` implementation
+/// since `fmt::Write for String` never returns an error itself.
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Display + ?Sized> ToString for T {
+ // A common guideline is to not inline generic functions. However,
+ // removing `#[inline]` from this method causes non-negligible regressions.
+ // See <https://github.com/rust-lang/rust/pull/74852>, the last attempt
+ // to try to remove it.
+ #[inline]
+ default fn to_string(&self) -> String {
+ let mut buf = String::new();
+ let mut formatter = core::fmt::Formatter::new(&mut buf);
+ // Bypass format_args!() to avoid write_str with zero-length strs
+ fmt::Display::fmt(self, &mut formatter)
+ .expect("a Display implementation returned an error unexpectedly");
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "char_to_string_specialization", since = "1.46.0")]
+impl ToString for char {
+ #[inline]
+ fn to_string(&self) -> String {
+ String::from(self.encode_utf8(&mut [0; 4]))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "u8_to_string_specialization", since = "1.54.0")]
+impl ToString for u8 {
+ #[inline]
+ fn to_string(&self) -> String {
+ let mut buf = String::with_capacity(3);
+ let mut n = *self;
+ if n >= 10 {
+ if n >= 100 {
+ buf.push((b'0' + n / 100) as char);
+ n %= 100;
+ }
+ buf.push((b'0' + n / 10) as char);
+ n %= 10;
+ }
+ buf.push((b'0' + n) as char);
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "i8_to_string_specialization", since = "1.54.0")]
+impl ToString for i8 {
+ #[inline]
+ fn to_string(&self) -> String {
+ let mut buf = String::with_capacity(4);
+ if self.is_negative() {
+ buf.push('-');
+ }
+ let mut n = self.unsigned_abs();
+ if n >= 10 {
+ if n >= 100 {
+ buf.push('1');
+ n -= 100;
+ }
+ buf.push((b'0' + n / 10) as char);
+ n %= 10;
+ }
+ buf.push((b'0' + n) as char);
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "str_to_string_specialization", since = "1.9.0")]
+impl ToString for str {
+ #[inline]
+ fn to_string(&self) -> String {
+ String::from(self)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_str_to_string_specialization", since = "1.17.0")]
+impl ToString for Cow<'_, str> {
+ #[inline]
+ fn to_string(&self) -> String {
+ self[..].to_owned()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "string_to_string_specialization", since = "1.17.0")]
+impl ToString for String {
+ #[inline]
+ fn to_string(&self) -> String {
+ self.to_owned()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<str> for String {
+ #[inline]
+ fn as_ref(&self) -> &str {
+ self
+ }
+}
+
+#[stable(feature = "string_as_mut", since = "1.43.0")]
+impl AsMut<str> for String {
+ #[inline]
+ fn as_mut(&mut self) -> &mut str {
+ self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<[u8]> for String {
+ #[inline]
+ fn as_ref(&self) -> &[u8] {
+ self.as_bytes()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl From<&str> for String {
+ /// Converts a `&str` into a [`String`].
+ ///
+ /// The result is allocated on the heap.
+ #[inline]
+ fn from(s: &str) -> String {
+ s.to_owned()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "from_mut_str_for_string", since = "1.44.0")]
+impl From<&mut str> for String {
+ /// Converts a `&mut str` into a [`String`].
+ ///
+ /// The result is allocated on the heap.
+ #[inline]
+ fn from(s: &mut str) -> String {
+ s.to_owned()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "from_ref_string", since = "1.35.0")]
+impl From<&String> for String {
+ /// Converts a `&String` into a [`String`].
+ ///
+ /// This clones `s` and returns the clone.
+ #[inline]
+ fn from(s: &String) -> String {
+ s.clone()
+ }
+}
+
+// note: test pulls in libstd, which causes errors here
+#[cfg(not(test))]
+#[stable(feature = "string_from_box", since = "1.18.0")]
+impl From<Box<str>> for String {
+ /// Converts the given boxed `str` slice to a [`String`].
+ /// It is notable that the `str` slice is owned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s1: String = String::from("hello world");
+ /// let s2: Box<str> = s1.into_boxed_str();
+ /// let s3: String = String::from(s2);
+ ///
+ /// assert_eq!("hello world", s3)
+ /// ```
+ fn from(s: Box<str>) -> String {
+ s.into_string()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_from_str", since = "1.20.0")]
+impl From<String> for Box<str> {
+ /// Converts the given [`String`] to a boxed `str` slice that is owned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s1: String = String::from("hello world");
+ /// let s2: Box<str> = Box::from(s1);
+ /// let s3: String = String::from(s2);
+ ///
+ /// assert_eq!("hello world", s3)
+ /// ```
+ fn from(s: String) -> Box<str> {
+ s.into_boxed_str()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "string_from_cow_str", since = "1.14.0")]
+impl<'a> From<Cow<'a, str>> for String {
+ /// Converts a clone-on-write string to an owned
+ /// instance of [`String`].
+ ///
+ /// This extracts the owned string,
+ /// clones the string if it is not already owned.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::borrow::Cow;
+ /// // If the string is not owned...
+ /// let cow: Cow<str> = Cow::Borrowed("eggplant");
+ /// // It will allocate on the heap and copy the string.
+ /// let owned: String = String::from(cow);
+ /// assert_eq!(&owned[..], "eggplant");
+ /// ```
+ fn from(s: Cow<'a, str>) -> String {
+ s.into_owned()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> From<&'a str> for Cow<'a, str> {
+ /// Converts a string slice into a [`Borrowed`] variant.
+ /// No heap allocation is performed, and the string
+ /// is not copied.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::borrow::Cow;
+ /// assert_eq!(Cow::from("eggplant"), Cow::Borrowed("eggplant"));
+ /// ```
+ ///
+ /// [`Borrowed`]: crate::borrow::Cow::Borrowed "borrow::Cow::Borrowed"
+ #[inline]
+ fn from(s: &'a str) -> Cow<'a, str> {
+ Cow::Borrowed(s)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> From<String> for Cow<'a, str> {
+ /// Converts a [`String`] into an [`Owned`] variant.
+ /// No heap allocation is performed, and the string
+ /// is not copied.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::borrow::Cow;
+ /// let s = "eggplant".to_string();
+ /// let s2 = "eggplant".to_string();
+ /// assert_eq!(Cow::from(s), Cow::<'static, str>::Owned(s2));
+ /// ```
+ ///
+ /// [`Owned`]: crate::borrow::Cow::Owned "borrow::Cow::Owned"
+ #[inline]
+ fn from(s: String) -> Cow<'a, str> {
+ Cow::Owned(s)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_from_string_ref", since = "1.28.0")]
+impl<'a> From<&'a String> for Cow<'a, str> {
+ /// Converts a [`String`] reference into a [`Borrowed`] variant.
+ /// No heap allocation is performed, and the string
+ /// is not copied.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::borrow::Cow;
+ /// let s = "eggplant".to_string();
+ /// assert_eq!(Cow::from(&s), Cow::Borrowed("eggplant"));
+ /// ```
+ ///
+ /// [`Borrowed`]: crate::borrow::Cow::Borrowed "borrow::Cow::Borrowed"
+ #[inline]
+ fn from(s: &'a String) -> Cow<'a, str> {
+ Cow::Borrowed(s.as_str())
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_str_from_iter", since = "1.12.0")]
+impl<'a> FromIterator<char> for Cow<'a, str> {
+ fn from_iter<I: IntoIterator<Item = char>>(it: I) -> Cow<'a, str> {
+ Cow::Owned(FromIterator::from_iter(it))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_str_from_iter", since = "1.12.0")]
+impl<'a, 'b> FromIterator<&'b str> for Cow<'a, str> {
+ fn from_iter<I: IntoIterator<Item = &'b str>>(it: I) -> Cow<'a, str> {
+ Cow::Owned(FromIterator::from_iter(it))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_str_from_iter", since = "1.12.0")]
+impl<'a> FromIterator<String> for Cow<'a, str> {
+ fn from_iter<I: IntoIterator<Item = String>>(it: I) -> Cow<'a, str> {
+ Cow::Owned(FromIterator::from_iter(it))
+ }
+}
+
+#[stable(feature = "from_string_for_vec_u8", since = "1.14.0")]
+impl From<String> for Vec<u8> {
+ /// Converts the given [`String`] to a vector [`Vec`] that holds values of type [`u8`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s1 = String::from("hello world");
+ /// let v1 = Vec::from(s1);
+ ///
+ /// for b in v1 {
+ /// println!("{b}");
+ /// }
+ /// ```
+ fn from(string: String) -> Vec<u8> {
+ string.into_bytes()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Write for String {
+ #[inline]
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ self.push_str(s);
+ Ok(())
+ }
+
+ #[inline]
+ fn write_char(&mut self, c: char) -> fmt::Result {
+ self.push(c);
+ Ok(())
+ }
+}
+
+/// A draining iterator for `String`.
+///
+/// This struct is created by the [`drain`] method on [`String`]. See its
+/// documentation for more.
+///
+/// [`drain`]: String::drain
+#[stable(feature = "drain", since = "1.6.0")]
+pub struct Drain<'a> {
+ /// Will be used as &'a mut String in the destructor
+ string: *mut String,
+ /// Start of part to remove
+ start: usize,
+ /// End of part to remove
+ end: usize,
+ /// Current remaining range to remove
+ iter: Chars<'a>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl fmt::Debug for Drain<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Drain").field(&self.as_str()).finish()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+unsafe impl Sync for Drain<'_> {}
+#[stable(feature = "drain", since = "1.6.0")]
+unsafe impl Send for Drain<'_> {}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl Drop for Drain<'_> {
+ fn drop(&mut self) {
+ unsafe {
+ // Use Vec::drain. "Reaffirm" the bounds checks to avoid
+ // panic code being inserted again.
+ let self_vec = (*self.string).as_mut_vec();
+ if self.start <= self.end && self.end <= self_vec.len() {
+ self_vec.drain(self.start..self.end);
+ }
+ }
+ }
+}
+
+impl<'a> Drain<'a> {
+ /// Returns the remaining (sub)string of this iterator as a slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut s = String::from("abc");
+ /// let mut drain = s.drain(..);
+ /// assert_eq!(drain.as_str(), "abc");
+ /// let _ = drain.next().unwrap();
+ /// assert_eq!(drain.as_str(), "bc");
+ /// ```
+ #[must_use]
+ #[stable(feature = "string_drain_as_str", since = "1.55.0")]
+ pub fn as_str(&self) -> &str {
+ self.iter.as_str()
+ }
+}
+
+#[stable(feature = "string_drain_as_str", since = "1.55.0")]
+impl<'a> AsRef<str> for Drain<'a> {
+ fn as_ref(&self) -> &str {
+ self.as_str()
+ }
+}
+
+#[stable(feature = "string_drain_as_str", since = "1.55.0")]
+impl<'a> AsRef<[u8]> for Drain<'a> {
+ fn as_ref(&self) -> &[u8] {
+ self.as_str().as_bytes()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl Iterator for Drain<'_> {
+ type Item = char;
+
+ #[inline]
+ fn next(&mut self) -> Option<char> {
+ self.iter.next()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<char> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl DoubleEndedIterator for Drain<'_> {
+ #[inline]
+ fn next_back(&mut self) -> Option<char> {
+ self.iter.next_back()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for Drain<'_> {}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "from_char_for_string", since = "1.46.0")]
+impl From<char> for String {
+ /// Allocates an owned [`String`] from a single character.
+ ///
+ /// # Example
+ /// ```rust
+ /// let c: char = 'a';
+ /// let s: String = String::from(c);
+ /// assert_eq!("a", &s[..]);
+ /// ```
+ #[inline]
+ fn from(c: char) -> Self {
+ c.to_string()
+ }
+}
diff --git a/rust/alloc/vec/drain.rs b/rust/alloc/vec/drain.rs
new file mode 100644
index 000000000000..b6a5f98e4fcd
--- /dev/null
+++ b/rust/alloc/vec/drain.rs
@@ -0,0 +1,186 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+use crate::alloc::{Allocator, Global};
+use core::fmt;
+use core::iter::{FusedIterator, TrustedLen};
+use core::mem;
+use core::ptr::{self, NonNull};
+use core::slice::{self};
+
+use super::Vec;
+
+/// A draining iterator for `Vec<T>`.
+///
+/// This `struct` is created by [`Vec::drain`].
+/// See its documentation for more.
+///
+/// # Example
+///
+/// ```
+/// let mut v = vec![0, 1, 2];
+/// let iter: std::vec::Drain<_> = v.drain(..);
+/// ```
+#[stable(feature = "drain", since = "1.6.0")]
+pub struct Drain<
+ 'a,
+ T: 'a,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global,
+> {
+ /// Index of tail to preserve
+ pub(super) tail_start: usize,
+ /// Length of tail
+ pub(super) tail_len: usize,
+ /// Current remaining range to remove
+ pub(super) iter: slice::Iter<'a, T>,
+ pub(super) vec: NonNull<Vec<T, A>>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Drain").field(&self.iter.as_slice()).finish()
+ }
+}
+
+impl<'a, T, A: Allocator> Drain<'a, T, A> {
+ /// Returns the remaining items of this iterator as a slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec!['a', 'b', 'c'];
+ /// let mut drain = vec.drain(..);
+ /// assert_eq!(drain.as_slice(), &['a', 'b', 'c']);
+ /// let _ = drain.next().unwrap();
+ /// assert_eq!(drain.as_slice(), &['b', 'c']);
+ /// ```
+ #[must_use]
+ #[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
+ pub fn as_slice(&self) -> &[T] {
+ self.iter.as_slice()
+ }
+
+ /// Returns a reference to the underlying allocator.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[must_use]
+ #[inline]
+ pub fn allocator(&self) -> &A {
+ unsafe { self.vec.as_ref().allocator() }
+ }
+}
+
+#[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
+impl<'a, T, A: Allocator> AsRef<[T]> for Drain<'a, T, A> {
+ fn as_ref(&self) -> &[T] {
+ self.as_slice()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+unsafe impl<T: Sync, A: Sync + Allocator> Sync for Drain<'_, T, A> {}
+#[stable(feature = "drain", since = "1.6.0")]
+unsafe impl<T: Send, A: Send + Allocator> Send for Drain<'_, T, A> {}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.iter.next().map(|elt| unsafe { ptr::read(elt as *const _) })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ self.iter.next_back().map(|elt| unsafe { ptr::read(elt as *const _) })
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> Drop for Drain<'_, T, A> {
+ fn drop(&mut self) {
+ /// Moves back the un-`Drain`ed elements to restore the original `Vec`.
+ struct DropGuard<'r, 'a, T, A: Allocator>(&'r mut Drain<'a, T, A>);
+
+ impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
+ fn drop(&mut self) {
+ if self.0.tail_len > 0 {
+ unsafe {
+ let source_vec = self.0.vec.as_mut();
+ // memmove back untouched tail, update to new length
+ let start = source_vec.len();
+ let tail = self.0.tail_start;
+ if tail != start {
+ let src = source_vec.as_ptr().add(tail);
+ let dst = source_vec.as_mut_ptr().add(start);
+ ptr::copy(src, dst, self.0.tail_len);
+ }
+ source_vec.set_len(start + self.0.tail_len);
+ }
+ }
+ }
+ }
+
+ let iter = mem::replace(&mut self.iter, (&mut []).iter());
+ let drop_len = iter.len();
+
+ let mut vec = self.vec;
+
+ if mem::size_of::<T>() == 0 {
+ // ZSTs have no identity, so we don't need to move them around, we only need to drop the correct amount.
+ // this can be achieved by manipulating the Vec length instead of moving values out from `iter`.
+ unsafe {
+ let vec = vec.as_mut();
+ let old_len = vec.len();
+ vec.set_len(old_len + drop_len + self.tail_len);
+ vec.truncate(old_len + self.tail_len);
+ }
+
+ return;
+ }
+
+ // ensure elements are moved back into their appropriate places, even when drop_in_place panics
+ let _guard = DropGuard(self);
+
+ if drop_len == 0 {
+ return;
+ }
+
+ // as_slice() must only be called when iter.len() is > 0 because
+ // vec::Splice modifies vec::Drain fields and may grow the vec which would invalidate
+ // the iterator's internal pointers. Creating a reference to deallocated memory
+ // is invalid even when it is zero-length
+ let drop_ptr = iter.as_slice().as_ptr();
+
+ unsafe {
+ // drop_ptr comes from a slice::Iter which only gives us a &[T] but for drop_in_place
+ // a pointer with mutable provenance is necessary. Therefore we must reconstruct
+ // it from the original vec but also avoid creating a &mut to the front since that could
+ // invalidate raw pointers to it which some unsafe code might rely on.
+ let vec_ptr = vec.as_mut().as_mut_ptr();
+ let drop_offset = drop_ptr.sub_ptr(vec_ptr);
+ let to_drop = ptr::slice_from_raw_parts_mut(vec_ptr.add(drop_offset), drop_len);
+ ptr::drop_in_place(to_drop);
+ }
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T, A: Allocator> TrustedLen for Drain<'_, T, A> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {}
diff --git a/rust/alloc/vec/drain_filter.rs b/rust/alloc/vec/drain_filter.rs
new file mode 100644
index 000000000000..b04fce041622
--- /dev/null
+++ b/rust/alloc/vec/drain_filter.rs
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+use crate::alloc::{Allocator, Global};
+use core::ptr::{self};
+use core::slice::{self};
+
+use super::Vec;
+
+/// An iterator which uses a closure to determine if an element should be removed.
+///
+/// This struct is created by [`Vec::drain_filter`].
+/// See its documentation for more.
+///
+/// # Example
+///
+/// ```
+/// #![feature(drain_filter)]
+///
+/// let mut v = vec![0, 1, 2];
+/// let iter: std::vec::DrainFilter<_, _> = v.drain_filter(|x| *x % 2 == 0);
+/// ```
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+#[derive(Debug)]
+pub struct DrainFilter<
+ 'a,
+ T,
+ F,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+> where
+ F: FnMut(&mut T) -> bool,
+{
+ pub(super) vec: &'a mut Vec<T, A>,
+ /// The index of the item that will be inspected by the next call to `next`.
+ pub(super) idx: usize,
+ /// The number of items that have been drained (removed) thus far.
+ pub(super) del: usize,
+ /// The original length of `vec` prior to draining.
+ pub(super) old_len: usize,
+ /// The filter test predicate.
+ pub(super) pred: F,
+ /// A flag that indicates a panic has occurred in the filter test predicate.
+ /// This is used as a hint in the drop implementation to prevent consumption
+ /// of the remainder of the `DrainFilter`. Any unprocessed items will be
+ /// backshifted in the `vec`, but no further items will be dropped or
+ /// tested by the filter predicate.
+ pub(super) panic_flag: bool,
+}
+
+impl<T, F, A: Allocator> DrainFilter<'_, T, F, A>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ /// Returns a reference to the underlying allocator.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn allocator(&self) -> &A {
+ self.vec.allocator()
+ }
+}
+
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+impl<T, F, A: Allocator> Iterator for DrainFilter<'_, T, F, A>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ unsafe {
+ while self.idx < self.old_len {
+ let i = self.idx;
+ let v = slice::from_raw_parts_mut(self.vec.as_mut_ptr(), self.old_len);
+ self.panic_flag = true;
+ let drained = (self.pred)(&mut v[i]);
+ self.panic_flag = false;
+ // Update the index *after* the predicate is called. If the index
+ // is updated prior and the predicate panics, the element at this
+ // index would be leaked.
+ self.idx += 1;
+ if drained {
+ self.del += 1;
+ return Some(ptr::read(&v[i]));
+ } else if self.del > 0 {
+ let del = self.del;
+ let src: *const T = &v[i];
+ let dst: *mut T = &mut v[i - del];
+ ptr::copy_nonoverlapping(src, dst, 1);
+ }
+ }
+ None
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(self.old_len - self.idx))
+ }
+}
+
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+impl<T, F, A: Allocator> Drop for DrainFilter<'_, T, F, A>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ fn drop(&mut self) {
+ struct BackshiftOnDrop<'a, 'b, T, F, A: Allocator>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ drain: &'b mut DrainFilter<'a, T, F, A>,
+ }
+
+ impl<'a, 'b, T, F, A: Allocator> Drop for BackshiftOnDrop<'a, 'b, T, F, A>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ fn drop(&mut self) {
+ unsafe {
+ if self.drain.idx < self.drain.old_len && self.drain.del > 0 {
+ // This is a pretty messed up state, and there isn't really an
+ // obviously right thing to do. We don't want to keep trying
+ // to execute `pred`, so we just backshift all the unprocessed
+ // elements and tell the vec that they still exist. The backshift
+ // is required to prevent a double-drop of the last successfully
+ // drained item prior to a panic in the predicate.
+ let ptr = self.drain.vec.as_mut_ptr();
+ let src = ptr.add(self.drain.idx);
+ let dst = src.sub(self.drain.del);
+ let tail_len = self.drain.old_len - self.drain.idx;
+ src.copy_to(dst, tail_len);
+ }
+ self.drain.vec.set_len(self.drain.old_len - self.drain.del);
+ }
+ }
+ }
+
+ let backshift = BackshiftOnDrop { drain: self };
+
+ // Attempt to consume any remaining elements if the filter predicate
+ // has not yet panicked. We'll backshift any remaining elements
+ // whether we've already panicked or if the consumption here panics.
+ if !backshift.drain.panic_flag {
+ backshift.drain.for_each(drop);
+ }
+ }
+}
diff --git a/rust/alloc/vec/into_iter.rs b/rust/alloc/vec/into_iter.rs
new file mode 100644
index 000000000000..09cfee0ae631
--- /dev/null
+++ b/rust/alloc/vec/into_iter.rs
@@ -0,0 +1,365 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+#[cfg(not(no_global_oom_handling))]
+use super::AsVecIntoIter;
+use crate::alloc::{Allocator, Global};
+use crate::raw_vec::RawVec;
+use core::fmt;
+use core::intrinsics::arith_offset;
+use core::iter::{
+ FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccessNoCoerce,
+};
+use core::marker::PhantomData;
+use core::mem::{self, ManuallyDrop};
+#[cfg(not(no_global_oom_handling))]
+use core::ops::Deref;
+use core::ptr::{self, NonNull};
+use core::slice::{self};
+
+/// An iterator that moves out of a vector.
+///
+/// This `struct` is created by the `into_iter` method on [`Vec`](super::Vec)
+/// (provided by the [`IntoIterator`] trait).
+///
+/// # Example
+///
+/// ```
+/// let v = vec![0, 1, 2];
+/// let iter: std::vec::IntoIter<_> = v.into_iter();
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_insignificant_dtor]
+pub struct IntoIter<
+ T,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+> {
+ pub(super) buf: NonNull<T>,
+ pub(super) phantom: PhantomData<T>,
+ pub(super) cap: usize,
+ // the drop impl reconstructs a RawVec from buf, cap and alloc
+ // to avoid dropping the allocator twice we need to wrap it into ManuallyDrop
+ pub(super) alloc: ManuallyDrop<A>,
+ pub(super) ptr: *const T,
+ pub(super) end: *const T,
+}
+
+#[stable(feature = "vec_intoiter_debug", since = "1.13.0")]
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("IntoIter").field(&self.as_slice()).finish()
+ }
+}
+
+impl<T, A: Allocator> IntoIter<T, A> {
+ /// Returns the remaining items of this iterator as a slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let vec = vec!['a', 'b', 'c'];
+ /// let mut into_iter = vec.into_iter();
+ /// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
+ /// let _ = into_iter.next().unwrap();
+ /// assert_eq!(into_iter.as_slice(), &['b', 'c']);
+ /// ```
+ #[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")]
+ pub fn as_slice(&self) -> &[T] {
+ unsafe { slice::from_raw_parts(self.ptr, self.len()) }
+ }
+
+ /// Returns the remaining items of this iterator as a mutable slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let vec = vec!['a', 'b', 'c'];
+ /// let mut into_iter = vec.into_iter();
+ /// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
+ /// into_iter.as_mut_slice()[2] = 'z';
+ /// assert_eq!(into_iter.next().unwrap(), 'a');
+ /// assert_eq!(into_iter.next().unwrap(), 'b');
+ /// assert_eq!(into_iter.next().unwrap(), 'z');
+ /// ```
+ #[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")]
+ pub fn as_mut_slice(&mut self) -> &mut [T] {
+ unsafe { &mut *self.as_raw_mut_slice() }
+ }
+
+ /// Returns a reference to the underlying allocator.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn allocator(&self) -> &A {
+ &self.alloc
+ }
+
+ fn as_raw_mut_slice(&mut self) -> *mut [T] {
+ ptr::slice_from_raw_parts_mut(self.ptr as *mut T, self.len())
+ }
+
+ /// Drops remaining elements and relinquishes the backing allocation.
+ ///
+ /// This is roughly equivalent to the following, but more efficient
+ ///
+ /// ```
+ /// # let mut into_iter = Vec::<u8>::with_capacity(10).into_iter();
+ /// (&mut into_iter).for_each(core::mem::drop);
+ /// unsafe { core::ptr::write(&mut into_iter, Vec::new().into_iter()); }
+ /// ```
+ ///
+ /// This method is used by in-place iteration, refer to the vec::in_place_collect
+ /// documentation for an overview.
+ #[cfg(not(no_global_oom_handling))]
+ pub(super) fn forget_allocation_drop_remaining(&mut self) {
+ let remaining = self.as_raw_mut_slice();
+
+ // overwrite the individual fields instead of creating a new
+ // struct and then overwriting &mut self.
+ // this creates less assembly
+ self.cap = 0;
+ self.buf = unsafe { NonNull::new_unchecked(RawVec::NEW.ptr()) };
+ self.ptr = self.buf.as_ptr();
+ self.end = self.buf.as_ptr();
+
+ unsafe {
+ ptr::drop_in_place(remaining);
+ }
+ }
+
+ /// Forgets to Drop the remaining elements while still allowing the backing allocation to be freed.
+ pub(crate) fn forget_remaining_elements(&mut self) {
+ self.ptr = self.end;
+ }
+}
+
+#[stable(feature = "vec_intoiter_as_ref", since = "1.46.0")]
+impl<T, A: Allocator> AsRef<[T]> for IntoIter<T, A> {
+ fn as_ref(&self) -> &[T] {
+ self.as_slice()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Send, A: Allocator + Send> Send for IntoIter<T, A> {}
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync, A: Allocator + Sync> Sync for IntoIter<T, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> Iterator for IntoIter<T, A> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ if self.ptr as *const _ == self.end {
+ None
+ } else if mem::size_of::<T>() == 0 {
+ // purposefully don't use 'ptr.offset' because for
+ // vectors with 0-size elements this would return the
+ // same pointer.
+ self.ptr = unsafe { arith_offset(self.ptr as *const i8, 1) as *mut T };
+
+ // Make up a value of this ZST.
+ Some(unsafe { mem::zeroed() })
+ } else {
+ let old = self.ptr;
+ self.ptr = unsafe { self.ptr.offset(1) };
+
+ Some(unsafe { ptr::read(old) })
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let exact = if mem::size_of::<T>() == 0 {
+ self.end.addr().wrapping_sub(self.ptr.addr())
+ } else {
+ unsafe { self.end.sub_ptr(self.ptr) }
+ };
+ (exact, Some(exact))
+ }
+
+ #[inline]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ let step_size = self.len().min(n);
+ let to_drop = ptr::slice_from_raw_parts_mut(self.ptr as *mut T, step_size);
+ if mem::size_of::<T>() == 0 {
+ // SAFETY: due to unchecked casts of unsigned amounts to signed offsets the wraparound
+ // effectively results in unsigned pointers representing positions 0..usize::MAX,
+ // which is valid for ZSTs.
+ self.ptr = unsafe { arith_offset(self.ptr as *const i8, step_size as isize) as *mut T }
+ } else {
+ // SAFETY: the min() above ensures that step_size is in bounds
+ self.ptr = unsafe { self.ptr.add(step_size) };
+ }
+ // SAFETY: the min() above ensures that step_size is in bounds
+ unsafe {
+ ptr::drop_in_place(to_drop);
+ }
+ if step_size < n {
+ return Err(step_size);
+ }
+ Ok(())
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> Self::Item
+ where
+ Self: TrustedRandomAccessNoCoerce,
+ {
+ // SAFETY: the caller must guarantee that `i` is in bounds of the
+ // `Vec<T>`, so `i` cannot overflow an `isize`, and the `self.ptr.add(i)`
+ // is guaranteed to pointer to an element of the `Vec<T>` and
+ // thus guaranteed to be valid to dereference.
+ //
+ // Also note the implementation of `Self: TrustedRandomAccess` requires
+ // that `T: Copy` so reading elements from the buffer doesn't invalidate
+ // them for `Drop`.
+ unsafe {
+ if mem::size_of::<T>() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ if self.end == self.ptr {
+ None
+ } else if mem::size_of::<T>() == 0 {
+ // See above for why 'ptr.offset' isn't used
+ self.end = unsafe { arith_offset(self.end as *const i8, -1) as *mut T };
+
+ // Make up a value of this ZST.
+ Some(unsafe { mem::zeroed() })
+ } else {
+ self.end = unsafe { self.end.offset(-1) };
+
+ Some(unsafe { ptr::read(self.end) })
+ }
+ }
+
+ #[inline]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ let step_size = self.len().min(n);
+ if mem::size_of::<T>() == 0 {
+ // SAFETY: same as for advance_by()
+ self.end = unsafe {
+ arith_offset(self.end as *const i8, step_size.wrapping_neg() as isize) as *mut T
+ }
+ } else {
+ // SAFETY: same as for advance_by()
+ self.end = unsafe { self.end.offset(step_size.wrapping_neg() as isize) };
+ }
+ let to_drop = ptr::slice_from_raw_parts_mut(self.end as *mut T, step_size);
+ // SAFETY: same as for advance_by()
+ unsafe {
+ ptr::drop_in_place(to_drop);
+ }
+ if step_size < n {
+ return Err(step_size);
+ }
+ Ok(())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
+ fn is_empty(&self) -> bool {
+ self.ptr == self.end
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {}
+
+#[doc(hidden)]
+#[unstable(issue = "none", feature = "std_internals")]
+#[rustc_unsafe_specialization_marker]
+pub trait NonDrop {}
+
+// T: Copy as approximation for !Drop since get_unchecked does not advance self.ptr
+// and thus we can't implement drop-handling
+#[unstable(issue = "none", feature = "std_internals")]
+impl<T: Copy> NonDrop for T {}
+
+#[doc(hidden)]
+#[unstable(issue = "none", feature = "std_internals")]
+// TrustedRandomAccess (without NoCoerce) must not be implemented because
+// subtypes/supertypes of `T` might not be `NonDrop`
+unsafe impl<T, A: Allocator> TrustedRandomAccessNoCoerce for IntoIter<T, A>
+where
+ T: NonDrop,
+{
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "vec_into_iter_clone", since = "1.8.0")]
+impl<T: Clone, A: Allocator + Clone> Clone for IntoIter<T, A> {
+ #[cfg(not(test))]
+ fn clone(&self) -> Self {
+ self.as_slice().to_vec_in(self.alloc.deref().clone()).into_iter()
+ }
+ #[cfg(test)]
+ fn clone(&self) -> Self {
+ crate::slice::to_vec(self.as_slice(), self.alloc.deref().clone()).into_iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter<T, A> {
+ fn drop(&mut self) {
+ struct DropGuard<'a, T, A: Allocator>(&'a mut IntoIter<T, A>);
+
+ impl<T, A: Allocator> Drop for DropGuard<'_, T, A> {
+ fn drop(&mut self) {
+ unsafe {
+ // `IntoIter::alloc` is not used anymore after this and will be dropped by RawVec
+ let alloc = ManuallyDrop::take(&mut self.0.alloc);
+ // RawVec handles deallocation
+ let _ = RawVec::from_raw_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc);
+ }
+ }
+ }
+
+ let guard = DropGuard(self);
+ // destroy the remaining elements
+ unsafe {
+ ptr::drop_in_place(guard.0.as_raw_mut_slice());
+ }
+ // now `guard` will be dropped and do the rest
+ }
+}
+
+// In addition to the SAFETY invariants of the following three unsafe traits
+// also refer to the vec::in_place_collect module documentation to get an overview
+#[unstable(issue = "none", feature = "inplace_iteration")]
+#[doc(hidden)]
+unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> {}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+#[doc(hidden)]
+unsafe impl<T, A: Allocator> SourceIter for IntoIter<T, A> {
+ type Source = Self;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut Self::Source {
+ self
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+unsafe impl<T> AsVecIntoIter for IntoIter<T> {
+ type Item = T;
+
+ fn as_into_iter(&mut self) -> &mut IntoIter<Self::Item> {
+ self
+ }
+}
diff --git a/rust/alloc/vec/is_zero.rs b/rust/alloc/vec/is_zero.rs
new file mode 100644
index 000000000000..377f3d172777
--- /dev/null
+++ b/rust/alloc/vec/is_zero.rs
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+use crate::boxed::Box;
+
+#[rustc_specialization_trait]
+pub(super) unsafe trait IsZero {
+ /// Whether this value's representation is all zeros
+ fn is_zero(&self) -> bool;
+}
+
+macro_rules! impl_is_zero {
+ ($t:ty, $is_zero:expr) => {
+ unsafe impl IsZero for $t {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ $is_zero(*self)
+ }
+ }
+ };
+}
+
+impl_is_zero!(i16, |x| x == 0);
+impl_is_zero!(i32, |x| x == 0);
+impl_is_zero!(i64, |x| x == 0);
+impl_is_zero!(i128, |x| x == 0);
+impl_is_zero!(isize, |x| x == 0);
+
+impl_is_zero!(u16, |x| x == 0);
+impl_is_zero!(u32, |x| x == 0);
+impl_is_zero!(u64, |x| x == 0);
+impl_is_zero!(u128, |x| x == 0);
+impl_is_zero!(usize, |x| x == 0);
+
+impl_is_zero!(bool, |x| x == false);
+impl_is_zero!(char, |x| x == '\0');
+
+impl_is_zero!(f32, |x: f32| x.to_bits() == 0);
+impl_is_zero!(f64, |x: f64| x.to_bits() == 0);
+
+unsafe impl<T> IsZero for *const T {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ (*self).is_null()
+ }
+}
+
+unsafe impl<T> IsZero for *mut T {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ (*self).is_null()
+ }
+}
+
+unsafe impl<T: IsZero, const N: usize> IsZero for [T; N] {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ // Because this is generated as a runtime check, it's not obvious that
+ // it's worth doing if the array is really long. The threshold here
+ // is largely arbitrary, but was picked because as of 2022-05-01 LLVM
+ // can const-fold the check in `vec![[0; 32]; n]` but not in
+ // `vec![[0; 64]; n]`: https://godbolt.org/z/WTzjzfs5b
+ // Feel free to tweak if you have better evidence.
+
+ N <= 32 && self.iter().all(IsZero::is_zero)
+ }
+}
+
+// `Option<&T>` and `Option<Box<T>>` are guaranteed to represent `None` as null.
+// For fat pointers, the bytes that would be the pointer metadata in the `Some`
+// variant are padding in the `None` variant, so ignoring them and
+// zero-initializing instead is ok.
+// `Option<&mut T>` never implements `Clone`, so there's no need for an impl of
+// `SpecFromElem`.
+
+unsafe impl<T: ?Sized> IsZero for Option<&T> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.is_none()
+ }
+}
+
+unsafe impl<T: ?Sized> IsZero for Option<Box<T>> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.is_none()
+ }
+}
+
+// `Option<num::NonZeroU32>` and similar have a representation guarantee that
+// they're the same size as the corresponding `u32` type, as well as a guarantee
+// that transmuting between `NonZeroU32` and `Option<num::NonZeroU32>` works.
+// While the documentation officially makes it UB to transmute from `None`,
+// we're the standard library so we can make extra inferences, and we know that
+// the only niche available to represent `None` is the one that's all zeros.
+
+macro_rules! impl_is_zero_option_of_nonzero {
+ ($($t:ident,)+) => {$(
+ unsafe impl IsZero for Option<core::num::$t> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.is_none()
+ }
+ }
+ )+};
+}
+
+impl_is_zero_option_of_nonzero!(
+ NonZeroU8,
+ NonZeroU16,
+ NonZeroU32,
+ NonZeroU64,
+ NonZeroU128,
+ NonZeroI8,
+ NonZeroI16,
+ NonZeroI32,
+ NonZeroI64,
+ NonZeroI128,
+ NonZeroUsize,
+ NonZeroIsize,
+);
diff --git a/rust/alloc/vec/mod.rs b/rust/alloc/vec/mod.rs
new file mode 100644
index 000000000000..4ae81b890fd9
--- /dev/null
+++ b/rust/alloc/vec/mod.rs
@@ -0,0 +1,3420 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! A contiguous growable array type with heap-allocated contents, written
+//! `Vec<T>`.
+//!
+//! Vectors have *O*(1) indexing, amortized *O*(1) push (to the end) and
+//! *O*(1) pop (from the end).
+//!
+//! Vectors ensure they never allocate more than `isize::MAX` bytes.
+//!
+//! # Examples
+//!
+//! You can explicitly create a [`Vec`] with [`Vec::new`]:
+//!
+//! ```
+//! let v: Vec<i32> = Vec::new();
+//! ```
+//!
+//! ...or by using the [`vec!`] macro:
+//!
+//! ```
+//! let v: Vec<i32> = vec![];
+//!
+//! let v = vec![1, 2, 3, 4, 5];
+//!
+//! let v = vec![0; 10]; // ten zeroes
+//! ```
+//!
+//! You can [`push`] values onto the end of a vector (which will grow the vector
+//! as needed):
+//!
+//! ```
+//! let mut v = vec![1, 2];
+//!
+//! v.push(3);
+//! ```
+//!
+//! Popping values works in much the same way:
+//!
+//! ```
+//! let mut v = vec![1, 2];
+//!
+//! let two = v.pop();
+//! ```
+//!
+//! Vectors also support indexing (through the [`Index`] and [`IndexMut`] traits):
+//!
+//! ```
+//! let mut v = vec![1, 2, 3];
+//! let three = v[2];
+//! v[1] = v[1] + 5;
+//! ```
+//!
+//! [`push`]: Vec::push
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[cfg(not(no_global_oom_handling))]
+use core::cmp;
+use core::cmp::Ordering;
+use core::convert::TryFrom;
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::intrinsics::{arith_offset, assume};
+use core::iter;
+#[cfg(not(no_global_oom_handling))]
+use core::iter::FromIterator;
+use core::marker::PhantomData;
+use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::ops::{self, Index, IndexMut, Range, RangeBounds};
+use core::ptr::{self, NonNull};
+use core::slice::{self, SliceIndex};
+
+use crate::alloc::{Allocator, Global};
+use crate::borrow::{Cow, ToOwned};
+use crate::boxed::Box;
+use crate::collections::TryReserveError;
+use crate::raw_vec::RawVec;
+
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+pub use self::drain_filter::DrainFilter;
+
+mod drain_filter;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "vec_splice", since = "1.21.0")]
+pub use self::splice::Splice;
+
+#[cfg(not(no_global_oom_handling))]
+mod splice;
+
+#[stable(feature = "drain", since = "1.6.0")]
+pub use self::drain::Drain;
+
+mod drain;
+
+#[cfg(not(no_global_oom_handling))]
+mod cow;
+
+#[cfg(not(no_global_oom_handling))]
+pub(crate) use self::in_place_collect::AsVecIntoIter;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::into_iter::IntoIter;
+
+mod into_iter;
+
+#[cfg(not(no_global_oom_handling))]
+use self::is_zero::IsZero;
+
+mod is_zero;
+
+#[cfg(not(no_global_oom_handling))]
+mod in_place_collect;
+
+mod partial_eq;
+
+#[cfg(not(no_global_oom_handling))]
+use self::spec_from_elem::SpecFromElem;
+
+#[cfg(not(no_global_oom_handling))]
+mod spec_from_elem;
+
+use self::set_len_on_drop::SetLenOnDrop;
+
+mod set_len_on_drop;
+
+#[cfg(not(no_global_oom_handling))]
+use self::in_place_drop::InPlaceDrop;
+
+#[cfg(not(no_global_oom_handling))]
+mod in_place_drop;
+
+#[cfg(not(no_global_oom_handling))]
+use self::spec_from_iter_nested::SpecFromIterNested;
+
+#[cfg(not(no_global_oom_handling))]
+mod spec_from_iter_nested;
+
+#[cfg(not(no_global_oom_handling))]
+use self::spec_from_iter::SpecFromIter;
+
+#[cfg(not(no_global_oom_handling))]
+mod spec_from_iter;
+
+#[cfg(not(no_global_oom_handling))]
+use self::spec_extend::SpecExtend;
+
+use self::spec_extend::TrySpecExtend;
+
+mod spec_extend;
+
+/// A contiguous growable array type, written as `Vec<T>`, short for 'vector'.
+///
+/// # Examples
+///
+/// ```
+/// let mut vec = Vec::new();
+/// vec.push(1);
+/// vec.push(2);
+///
+/// assert_eq!(vec.len(), 2);
+/// assert_eq!(vec[0], 1);
+///
+/// assert_eq!(vec.pop(), Some(2));
+/// assert_eq!(vec.len(), 1);
+///
+/// vec[0] = 7;
+/// assert_eq!(vec[0], 7);
+///
+/// vec.extend([1, 2, 3].iter().copied());
+///
+/// for x in &vec {
+/// println!("{x}");
+/// }
+/// assert_eq!(vec, [7, 1, 2, 3]);
+/// ```
+///
+/// The [`vec!`] macro is provided for convenient initialization:
+///
+/// ```
+/// let mut vec1 = vec![1, 2, 3];
+/// vec1.push(4);
+/// let vec2 = Vec::from([1, 2, 3, 4]);
+/// assert_eq!(vec1, vec2);
+/// ```
+///
+/// It can also initialize each element of a `Vec<T>` with a given value.
+/// This may be more efficient than performing allocation and initialization
+/// in separate steps, especially when initializing a vector of zeros:
+///
+/// ```
+/// let vec = vec![0; 5];
+/// assert_eq!(vec, [0, 0, 0, 0, 0]);
+///
+/// // The following is equivalent, but potentially slower:
+/// let mut vec = Vec::with_capacity(5);
+/// vec.resize(5, 0);
+/// assert_eq!(vec, [0, 0, 0, 0, 0]);
+/// ```
+///
+/// For more information, see
+/// [Capacity and Reallocation](#capacity-and-reallocation).
+///
+/// Use a `Vec<T>` as an efficient stack:
+///
+/// ```
+/// let mut stack = Vec::new();
+///
+/// stack.push(1);
+/// stack.push(2);
+/// stack.push(3);
+///
+/// while let Some(top) = stack.pop() {
+/// // Prints 3, 2, 1
+/// println!("{top}");
+/// }
+/// ```
+///
+/// # Indexing
+///
+/// The `Vec` type allows to access values by index, because it implements the
+/// [`Index`] trait. An example will be more explicit:
+///
+/// ```
+/// let v = vec![0, 2, 4, 6];
+/// println!("{}", v[1]); // it will display '2'
+/// ```
+///
+/// However be careful: if you try to access an index which isn't in the `Vec`,
+/// your software will panic! You cannot do this:
+///
+/// ```should_panic
+/// let v = vec![0, 2, 4, 6];
+/// println!("{}", v[6]); // it will panic!
+/// ```
+///
+/// Use [`get`] and [`get_mut`] if you want to check whether the index is in
+/// the `Vec`.
+///
+/// # Slicing
+///
+/// A `Vec` can be mutable. On the other hand, slices are read-only objects.
+/// To get a [slice][prim@slice], use [`&`]. Example:
+///
+/// ```
+/// fn read_slice(slice: &[usize]) {
+/// // ...
+/// }
+///
+/// let v = vec![0, 1];
+/// read_slice(&v);
+///
+/// // ... and that's all!
+/// // you can also do it like this:
+/// let u: &[usize] = &v;
+/// // or like this:
+/// let u: &[_] = &v;
+/// ```
+///
+/// In Rust, it's more common to pass slices as arguments rather than vectors
+/// when you just want to provide read access. The same goes for [`String`] and
+/// [`&str`].
+///
+/// # Capacity and reallocation
+///
+/// The capacity of a vector is the amount of space allocated for any future
+/// elements that will be added onto the vector. This is not to be confused with
+/// the *length* of a vector, which specifies the number of actual elements
+/// within the vector. If a vector's length exceeds its capacity, its capacity
+/// will automatically be increased, but its elements will have to be
+/// reallocated.
+///
+/// For example, a vector with capacity 10 and length 0 would be an empty vector
+/// with space for 10 more elements. Pushing 10 or fewer elements onto the
+/// vector will not change its capacity or cause reallocation to occur. However,
+/// if the vector's length is increased to 11, it will have to reallocate, which
+/// can be slow. For this reason, it is recommended to use [`Vec::with_capacity`]
+/// whenever possible to specify how big the vector is expected to get.
+///
+/// # Guarantees
+///
+/// Due to its incredibly fundamental nature, `Vec` makes a lot of guarantees
+/// about its design. This ensures that it's as low-overhead as possible in
+/// the general case, and can be correctly manipulated in primitive ways
+/// by unsafe code. Note that these guarantees refer to an unqualified `Vec<T>`.
+/// If additional type parameters are added (e.g., to support custom allocators),
+/// overriding their defaults may change the behavior.
+///
+/// Most fundamentally, `Vec` is and always will be a (pointer, capacity, length)
+/// triplet. No more, no less. The order of these fields is completely
+/// unspecified, and you should use the appropriate methods to modify these.
+/// The pointer will never be null, so this type is null-pointer-optimized.
+///
+/// However, the pointer might not actually point to allocated memory. In particular,
+/// if you construct a `Vec` with capacity 0 via [`Vec::new`], [`vec![]`][`vec!`],
+/// [`Vec::with_capacity(0)`][`Vec::with_capacity`], or by calling [`shrink_to_fit`]
+/// on an empty Vec, it will not allocate memory. Similarly, if you store zero-sized
+/// types inside a `Vec`, it will not allocate space for them. *Note that in this case
+/// the `Vec` might not report a [`capacity`] of 0*. `Vec` will allocate if and only
+/// if <code>[mem::size_of::\<T>]\() * [capacity]\() > 0</code>. In general, `Vec`'s allocation
+/// details are very subtle --- if you intend to allocate memory using a `Vec`
+/// and use it for something else (either to pass to unsafe code, or to build your
+/// own memory-backed collection), be sure to deallocate this memory by using
+/// `from_raw_parts` to recover the `Vec` and then dropping it.
+///
+/// If a `Vec` *has* allocated memory, then the memory it points to is on the heap
+/// (as defined by the allocator Rust is configured to use by default), and its
+/// pointer points to [`len`] initialized, contiguous elements in order (what
+/// you would see if you coerced it to a slice), followed by <code>[capacity] - [len]</code>
+/// logically uninitialized, contiguous elements.
+///
+/// A vector containing the elements `'a'` and `'b'` with capacity 4 can be
+/// visualized as below. The top part is the `Vec` struct, it contains a
+/// pointer to the head of the allocation in the heap, length and capacity.
+/// The bottom part is the allocation on the heap, a contiguous memory block.
+///
+/// ```text
+/// ptr len capacity
+/// +--------+--------+--------+
+/// | 0x0123 | 2 | 4 |
+/// +--------+--------+--------+
+/// |
+/// v
+/// Heap +--------+--------+--------+--------+
+/// | 'a' | 'b' | uninit | uninit |
+/// +--------+--------+--------+--------+
+/// ```
+///
+/// - **uninit** represents memory that is not initialized, see [`MaybeUninit`].
+/// - Note: the ABI is not stable and `Vec` makes no guarantees about its memory
+/// layout (including the order of fields).
+///
+/// `Vec` will never perform a "small optimization" where elements are actually
+/// stored on the stack for two reasons:
+///
+/// * It would make it more difficult for unsafe code to correctly manipulate
+/// a `Vec`. The contents of a `Vec` wouldn't have a stable address if it were
+/// only moved, and it would be more difficult to determine if a `Vec` had
+/// actually allocated memory.
+///
+/// * It would penalize the general case, incurring an additional branch
+/// on every access.
+///
+/// `Vec` will never automatically shrink itself, even if completely empty. This
+/// ensures no unnecessary allocations or deallocations occur. Emptying a `Vec`
+/// and then filling it back up to the same [`len`] should incur no calls to
+/// the allocator. If you wish to free up unused memory, use
+/// [`shrink_to_fit`] or [`shrink_to`].
+///
+/// [`push`] and [`insert`] will never (re)allocate if the reported capacity is
+/// sufficient. [`push`] and [`insert`] *will* (re)allocate if
+/// <code>[len] == [capacity]</code>. That is, the reported capacity is completely
+/// accurate, and can be relied on. It can even be used to manually free the memory
+/// allocated by a `Vec` if desired. Bulk insertion methods *may* reallocate, even
+/// when not necessary.
+///
+/// `Vec` does not guarantee any particular growth strategy when reallocating
+/// when full, nor when [`reserve`] is called. The current strategy is basic
+/// and it may prove desirable to use a non-constant growth factor. Whatever
+/// strategy is used will of course guarantee *O*(1) amortized [`push`].
+///
+/// `vec![x; n]`, `vec![a, b, c, d]`, and
+/// [`Vec::with_capacity(n)`][`Vec::with_capacity`], will all produce a `Vec`
+/// with exactly the requested capacity. If <code>[len] == [capacity]</code>,
+/// (as is the case for the [`vec!`] macro), then a `Vec<T>` can be converted to
+/// and from a [`Box<[T]>`][owned slice] without reallocating or moving the elements.
+///
+/// `Vec` will not specifically overwrite any data that is removed from it,
+/// but also won't specifically preserve it. Its uninitialized memory is
+/// scratch space that it may use however it wants. It will generally just do
+/// whatever is most efficient or otherwise easy to implement. Do not rely on
+/// removed data to be erased for security purposes. Even if you drop a `Vec`, its
+/// buffer may simply be reused by another allocation. Even if you zero a `Vec`'s memory
+/// first, that might not actually happen because the optimizer does not consider
+/// this a side-effect that must be preserved. There is one case which we will
+/// not break, however: using `unsafe` code to write to the excess capacity,
+/// and then increasing the length to match, is always valid.
+///
+/// Currently, `Vec` does not guarantee the order in which elements are dropped.
+/// The order has changed in the past and may change again.
+///
+/// [`get`]: ../../std/vec/struct.Vec.html#method.get
+/// [`get_mut`]: ../../std/vec/struct.Vec.html#method.get_mut
+/// [`String`]: crate::string::String
+/// [`&str`]: type@str
+/// [`shrink_to_fit`]: Vec::shrink_to_fit
+/// [`shrink_to`]: Vec::shrink_to
+/// [capacity]: Vec::capacity
+/// [`capacity`]: Vec::capacity
+/// [mem::size_of::\<T>]: core::mem::size_of
+/// [len]: Vec::len
+/// [`len`]: Vec::len
+/// [`push`]: Vec::push
+/// [`insert`]: Vec::insert
+/// [`reserve`]: Vec::reserve
+/// [`MaybeUninit`]: core::mem::MaybeUninit
+/// [owned slice]: Box
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "Vec")]
+#[rustc_insignificant_dtor]
+pub struct Vec<T, #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global> {
+ buf: RawVec<T, A>,
+ len: usize,
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Inherent methods
+////////////////////////////////////////////////////////////////////////////////
+
+impl<T> Vec<T> {
+ /// Constructs a new, empty `Vec<T>`.
+ ///
+ /// The vector will not allocate until elements are pushed onto it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![allow(unused_mut)]
+ /// let mut vec: Vec<i32> = Vec::new();
+ /// ```
+ #[inline]
+ #[rustc_const_stable(feature = "const_vec_new", since = "1.39.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub const fn new() -> Self {
+ Vec { buf: RawVec::NEW, len: 0 }
+ }
+
+ /// Constructs a new, empty `Vec<T>` with the specified capacity.
+ ///
+ /// The vector will be able to hold exactly `capacity` elements without
+ /// reallocating. If `capacity` is 0, the vector will not allocate.
+ ///
+ /// It is important to note that although the returned vector has the
+ /// *capacity* specified, the vector will have a zero *length*. For an
+ /// explanation of the difference between length and capacity, see
+ /// *[Capacity and reallocation]*.
+ ///
+ /// [Capacity and reallocation]: #capacity-and-reallocation
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ ///
+ /// // The vector contains no items, even though it has capacity for more
+ /// assert_eq!(vec.len(), 0);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // These are all done without reallocating...
+ /// for i in 0..10 {
+ /// vec.push(i);
+ /// }
+ /// assert_eq!(vec.len(), 10);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // ...but this may make the vector reallocate
+ /// vec.push(11);
+ /// assert_eq!(vec.len(), 11);
+ /// assert!(vec.capacity() >= 11);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn with_capacity(capacity: usize) -> Self {
+ Self::with_capacity_in(capacity, Global)
+ }
+
+ /// Tries to construct a new, empty `Vec<T>` with the specified capacity.
+ ///
+ /// The vector will be able to hold exactly `capacity` elements without
+ /// reallocating. If `capacity` is 0, the vector will not allocate.
+ ///
+ /// It is important to note that although the returned vector has the
+ /// *capacity* specified, the vector will have a zero *length*. For an
+ /// explanation of the difference between length and capacity, see
+ /// *[Capacity and reallocation]*.
+ ///
+ /// [Capacity and reallocation]: #capacity-and-reallocation
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = Vec::try_with_capacity(10).unwrap();
+ ///
+ /// // The vector contains no items, even though it has capacity for more
+ /// assert_eq!(vec.len(), 0);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // These are all done without reallocating...
+ /// for i in 0..10 {
+ /// vec.push(i);
+ /// }
+ /// assert_eq!(vec.len(), 10);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // ...but this may make the vector reallocate
+ /// vec.push(11);
+ /// assert_eq!(vec.len(), 11);
+ /// assert!(vec.capacity() >= 11);
+ ///
+ /// let mut result = Vec::try_with_capacity(usize::MAX);
+ /// assert!(result.is_err());
+ /// ```
+ #[inline]
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_with_capacity(capacity: usize) -> Result<Self, TryReserveError> {
+ Self::try_with_capacity_in(capacity, Global)
+ }
+
+ /// Creates a `Vec<T>` directly from the raw components of another vector.
+ ///
+ /// # Safety
+ ///
+ /// This is highly unsafe, due to the number of invariants that aren't
+ /// checked:
+ ///
+ /// * `ptr` needs to have been previously allocated via [`String`]/`Vec<T>`
+ /// (at least, it's highly likely to be incorrect if it wasn't).
+ /// * `T` needs to have the same alignment as what `ptr` was allocated with.
+ /// (`T` having a less strict alignment is not sufficient, the alignment really
+ /// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
+ /// allocated and deallocated with the same layout.)
+ /// * The size of `T` times the `capacity` (ie. the allocated size in bytes) needs
+ /// to be the same size as the pointer was allocated with. (Because similar to
+ /// alignment, [`dealloc`] must be called with the same layout `size`.)
+ /// * `length` needs to be less than or equal to `capacity`.
+ ///
+ /// Violating these may cause problems like corrupting the allocator's
+ /// internal data structures. For example it is normally **not** safe
+ /// to build a `Vec<u8>` from a pointer to a C `char` array with length
+ /// `size_t`, doing so is only safe if the array was initially allocated by
+ /// a `Vec` or `String`.
+ /// It's also not safe to build one from a `Vec<u16>` and its length, because
+ /// the allocator cares about the alignment, and these two types have different
+ /// alignments. The buffer was allocated with alignment 2 (for `u16`), but after
+ /// turning it into a `Vec<u8>` it'll be deallocated with alignment 1. To avoid
+ /// these issues, it is often preferable to do casting/transmuting using
+ /// [`slice::from_raw_parts`] instead.
+ ///
+ /// The ownership of `ptr` is effectively transferred to the
+ /// `Vec<T>` which may then deallocate, reallocate or change the
+ /// contents of memory pointed to by the pointer at will. Ensure
+ /// that nothing else uses the pointer after calling this
+ /// function.
+ ///
+ /// [`String`]: crate::string::String
+ /// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ptr;
+ /// use std::mem;
+ ///
+ /// let v = vec![1, 2, 3];
+ ///
+ // FIXME Update this when vec_into_raw_parts is stabilized
+ /// // Prevent running `v`'s destructor so we are in complete control
+ /// // of the allocation.
+ /// let mut v = mem::ManuallyDrop::new(v);
+ ///
+ /// // Pull out the various important pieces of information about `v`
+ /// let p = v.as_mut_ptr();
+ /// let len = v.len();
+ /// let cap = v.capacity();
+ ///
+ /// unsafe {
+ /// // Overwrite memory with 4, 5, 6
+ /// for i in 0..len as isize {
+ /// ptr::write(p.offset(i), 4 + i);
+ /// }
+ ///
+ /// // Put everything back together into a Vec
+ /// let rebuilt = Vec::from_raw_parts(p, len, cap);
+ /// assert_eq!(rebuilt, [4, 5, 6]);
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize) -> Self {
+ unsafe { Self::from_raw_parts_in(ptr, length, capacity, Global) }
+ }
+}
+
+impl<T, A: Allocator> Vec<T, A> {
+ /// Constructs a new, empty `Vec<T, A>`.
+ ///
+ /// The vector will not allocate until elements are pushed onto it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// # #[allow(unused_mut)]
+ /// let mut vec: Vec<i32, _> = Vec::new_in(System);
+ /// ```
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub const fn new_in(alloc: A) -> Self {
+ Vec { buf: RawVec::new_in(alloc), len: 0 }
+ }
+
+ /// Constructs a new, empty `Vec<T, A>` with the specified capacity with the provided
+ /// allocator.
+ ///
+ /// The vector will be able to hold exactly `capacity` elements without
+ /// reallocating. If `capacity` is 0, the vector will not allocate.
+ ///
+ /// It is important to note that although the returned vector has the
+ /// *capacity* specified, the vector will have a zero *length*. For an
+ /// explanation of the difference between length and capacity, see
+ /// *[Capacity and reallocation]*.
+ ///
+ /// [Capacity and reallocation]: #capacity-and-reallocation
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut vec = Vec::with_capacity_in(10, System);
+ ///
+ /// // The vector contains no items, even though it has capacity for more
+ /// assert_eq!(vec.len(), 0);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // These are all done without reallocating...
+ /// for i in 0..10 {
+ /// vec.push(i);
+ /// }
+ /// assert_eq!(vec.len(), 10);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // ...but this may make the vector reallocate
+ /// vec.push(11);
+ /// assert_eq!(vec.len(), 11);
+ /// assert!(vec.capacity() >= 11);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
+ Vec { buf: RawVec::with_capacity_in(capacity, alloc), len: 0 }
+ }
+
+ /// Tries to construct a new, empty `Vec<T, A>` with the specified capacity
+ /// with the provided allocator.
+ ///
+ /// The vector will be able to hold exactly `capacity` elements without
+ /// reallocating. If `capacity` is 0, the vector will not allocate.
+ ///
+ /// It is important to note that although the returned vector has the
+ /// *capacity* specified, the vector will have a zero *length*. For an
+ /// explanation of the difference between length and capacity, see
+ /// *[Capacity and reallocation]*.
+ ///
+ /// [Capacity and reallocation]: #capacity-and-reallocation
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut vec = Vec::try_with_capacity_in(10, System).unwrap();
+ ///
+ /// // The vector contains no items, even though it has capacity for more
+ /// assert_eq!(vec.len(), 0);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // These are all done without reallocating...
+ /// for i in 0..10 {
+ /// vec.push(i);
+ /// }
+ /// assert_eq!(vec.len(), 10);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // ...but this may make the vector reallocate
+ /// vec.push(11);
+ /// assert_eq!(vec.len(), 11);
+ /// assert!(vec.capacity() >= 11);
+ ///
+ /// let mut result = Vec::try_with_capacity_in(usize::MAX, System);
+ /// assert!(result.is_err());
+ /// ```
+ #[inline]
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
+ Ok(Vec { buf: RawVec::try_with_capacity_in(capacity, alloc)?, len: 0 })
+ }
+
+ /// Creates a `Vec<T, A>` directly from the raw components of another vector.
+ ///
+ /// # Safety
+ ///
+ /// This is highly unsafe, due to the number of invariants that aren't
+ /// checked:
+ ///
+ /// * `ptr` needs to have been previously allocated via [`String`]/`Vec<T>`
+ /// (at least, it's highly likely to be incorrect if it wasn't).
+ /// * `T` needs to have the same size and alignment as what `ptr` was allocated with.
+ /// (`T` having a less strict alignment is not sufficient, the alignment really
+ /// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
+ /// allocated and deallocated with the same layout.)
+ /// * `length` needs to be less than or equal to `capacity`.
+ /// * `capacity` needs to be the capacity that the pointer was allocated with.
+ ///
+ /// Violating these may cause problems like corrupting the allocator's
+ /// internal data structures. For example it is **not** safe
+ /// to build a `Vec<u8>` from a pointer to a C `char` array with length `size_t`.
+ /// It's also not safe to build one from a `Vec<u16>` and its length, because
+ /// the allocator cares about the alignment, and these two types have different
+ /// alignments. The buffer was allocated with alignment 2 (for `u16`), but after
+ /// turning it into a `Vec<u8>` it'll be deallocated with alignment 1.
+ ///
+ /// The ownership of `ptr` is effectively transferred to the
+ /// `Vec<T>` which may then deallocate, reallocate or change the
+ /// contents of memory pointed to by the pointer at will. Ensure
+ /// that nothing else uses the pointer after calling this
+ /// function.
+ ///
+ /// [`String`]: crate::string::String
+ /// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// use std::ptr;
+ /// use std::mem;
+ ///
+ /// let mut v = Vec::with_capacity_in(3, System);
+ /// v.push(1);
+ /// v.push(2);
+ /// v.push(3);
+ ///
+ // FIXME Update this when vec_into_raw_parts is stabilized
+ /// // Prevent running `v`'s destructor so we are in complete control
+ /// // of the allocation.
+ /// let mut v = mem::ManuallyDrop::new(v);
+ ///
+ /// // Pull out the various important pieces of information about `v`
+ /// let p = v.as_mut_ptr();
+ /// let len = v.len();
+ /// let cap = v.capacity();
+ /// let alloc = v.allocator();
+ ///
+ /// unsafe {
+ /// // Overwrite memory with 4, 5, 6
+ /// for i in 0..len as isize {
+ /// ptr::write(p.offset(i), 4 + i);
+ /// }
+ ///
+ /// // Put everything back together into a Vec
+ /// let rebuilt = Vec::from_raw_parts_in(p, len, cap, alloc.clone());
+ /// assert_eq!(rebuilt, [4, 5, 6]);
+ /// }
+ /// ```
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub unsafe fn from_raw_parts_in(ptr: *mut T, length: usize, capacity: usize, alloc: A) -> Self {
+ unsafe { Vec { buf: RawVec::from_raw_parts_in(ptr, capacity, alloc), len: length } }
+ }
+
+ /// Decomposes a `Vec<T>` into its raw components.
+ ///
+ /// Returns the raw pointer to the underlying data, the length of
+ /// the vector (in elements), and the allocated capacity of the
+ /// data (in elements). These are the same arguments in the same
+ /// order as the arguments to [`from_raw_parts`].
+ ///
+ /// After calling this function, the caller is responsible for the
+ /// memory previously managed by the `Vec`. The only way to do
+ /// this is to convert the raw pointer, length, and capacity back
+ /// into a `Vec` with the [`from_raw_parts`] function, allowing
+ /// the destructor to perform the cleanup.
+ ///
+ /// [`from_raw_parts`]: Vec::from_raw_parts
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(vec_into_raw_parts)]
+ /// let v: Vec<i32> = vec![-1, 0, 1];
+ ///
+ /// let (ptr, len, cap) = v.into_raw_parts();
+ ///
+ /// let rebuilt = unsafe {
+ /// // We can now make changes to the components, such as
+ /// // transmuting the raw pointer to a compatible type.
+ /// let ptr = ptr as *mut u32;
+ ///
+ /// Vec::from_raw_parts(ptr, len, cap)
+ /// };
+ /// assert_eq!(rebuilt, [4294967295, 0, 1]);
+ /// ```
+ #[unstable(feature = "vec_into_raw_parts", reason = "new API", issue = "65816")]
+ pub fn into_raw_parts(self) -> (*mut T, usize, usize) {
+ let mut me = ManuallyDrop::new(self);
+ (me.as_mut_ptr(), me.len(), me.capacity())
+ }
+
+ /// Decomposes a `Vec<T>` into its raw components.
+ ///
+ /// Returns the raw pointer to the underlying data, the length of the vector (in elements),
+ /// the allocated capacity of the data (in elements), and the allocator. These are the same
+ /// arguments in the same order as the arguments to [`from_raw_parts_in`].
+ ///
+ /// After calling this function, the caller is responsible for the
+ /// memory previously managed by the `Vec`. The only way to do
+ /// this is to convert the raw pointer, length, and capacity back
+ /// into a `Vec` with the [`from_raw_parts_in`] function, allowing
+ /// the destructor to perform the cleanup.
+ ///
+ /// [`from_raw_parts_in`]: Vec::from_raw_parts_in
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, vec_into_raw_parts)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut v: Vec<i32, System> = Vec::new_in(System);
+ /// v.push(-1);
+ /// v.push(0);
+ /// v.push(1);
+ ///
+ /// let (ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
+ ///
+ /// let rebuilt = unsafe {
+ /// // We can now make changes to the components, such as
+ /// // transmuting the raw pointer to a compatible type.
+ /// let ptr = ptr as *mut u32;
+ ///
+ /// Vec::from_raw_parts_in(ptr, len, cap, alloc)
+ /// };
+ /// assert_eq!(rebuilt, [4294967295, 0, 1]);
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "vec_into_raw_parts", reason = "new API", issue = "65816")]
+ pub fn into_raw_parts_with_alloc(self) -> (*mut T, usize, usize, A) {
+ let mut me = ManuallyDrop::new(self);
+ let len = me.len();
+ let capacity = me.capacity();
+ let ptr = me.as_mut_ptr();
+ let alloc = unsafe { ptr::read(me.allocator()) };
+ (ptr, len, capacity, alloc)
+ }
+
+ /// Returns the number of elements the vector can hold without
+ /// reallocating.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let vec: Vec<i32> = Vec::with_capacity(10);
+ /// assert_eq!(vec.capacity(), 10);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn capacity(&self) -> usize {
+ self.buf.capacity()
+ }
+
+ /// Reserves capacity for at least `additional` more elements to be inserted
+ /// in the given `Vec<T>`. The collection may reserve more space to avoid
+ /// frequent reallocations. After calling `reserve`, capacity will be
+ /// greater than or equal to `self.len() + additional`. Does nothing if
+ /// capacity is already sufficient.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1];
+ /// vec.reserve(10);
+ /// assert!(vec.capacity() >= 11);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve(&mut self, additional: usize) {
+ self.buf.reserve(self.len, additional);
+ }
+
+ /// Reserves the minimum capacity for exactly `additional` more elements to
+ /// be inserted in the given `Vec<T>`. After calling `reserve_exact`,
+ /// capacity will be greater than or equal to `self.len() + additional`.
+ /// Does nothing if the capacity is already sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer [`reserve`] if future insertions are expected.
+ ///
+ /// [`reserve`]: Vec::reserve
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1];
+ /// vec.reserve_exact(10);
+ /// assert!(vec.capacity() >= 11);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.buf.reserve_exact(self.len, additional);
+ }
+
+ /// Tries to reserve capacity for at least `additional` more elements to be inserted
+ /// in the given `Vec<T>`. The collection may reserve more space to avoid
+ /// frequent reallocations. After calling `try_reserve`, capacity will be
+ /// greater than or equal to `self.len() + additional`. Does nothing if
+ /// capacity is already sufficient.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn process_data(data: &[u32]) -> Result<Vec<u32>, TryReserveError> {
+ /// let mut output = Vec::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// output.try_reserve(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// output.extend(data.iter().map(|&val| {
+ /// val * 2 + 5 // very complicated
+ /// }));
+ ///
+ /// Ok(output)
+ /// }
+ /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve", since = "1.57.0")]
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.buf.try_reserve(self.len, additional)
+ }
+
+ /// Tries to reserve the minimum capacity for exactly `additional`
+ /// elements to be inserted in the given `Vec<T>`. After calling
+ /// `try_reserve_exact`, capacity will be greater than or equal to
+ /// `self.len() + additional` if it returns `Ok(())`.
+ /// Does nothing if the capacity is already sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer [`try_reserve`] if future insertions are expected.
+ ///
+ /// [`try_reserve`]: Vec::try_reserve
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn process_data(data: &[u32]) -> Result<Vec<u32>, TryReserveError> {
+ /// let mut output = Vec::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// output.try_reserve_exact(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// output.extend(data.iter().map(|&val| {
+ /// val * 2 + 5 // very complicated
+ /// }));
+ ///
+ /// Ok(output)
+ /// }
+ /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve", since = "1.57.0")]
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.buf.try_reserve_exact(self.len, additional)
+ }
+
+ /// Shrinks the capacity of the vector as much as possible.
+ ///
+ /// It will drop down as close as possible to the length but the allocator
+ /// may still inform the vector that there is space for a few more elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ /// assert_eq!(vec.capacity(), 10);
+ /// vec.shrink_to_fit();
+ /// assert!(vec.capacity() >= 3);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn shrink_to_fit(&mut self) {
+ // The capacity is never less than the length, and there's nothing to do when
+ // they are equal, so we can avoid the panic case in `RawVec::shrink_to_fit`
+ // by only calling it with a greater capacity.
+ if self.capacity() > self.len {
+ self.buf.shrink_to_fit(self.len);
+ }
+ }
+
+ /// Tries to shrink the capacity of the vector as much as possible.
+ ///
+ /// It will drop down as close as possible to the length but the allocator
+ /// may still inform the vector that there is space for a few more elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ /// assert_eq!(vec.capacity(), 10);
+ /// vec.try_shrink_to_fit().unwrap();
+ /// assert!(vec.capacity() >= 3);
+ /// ```
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_shrink_to_fit(&mut self) -> Result<(), TryReserveError> {
+ // The capacity is never less than the length, and there's nothing to do when
+ // they are equal, so we can avoid the panic case in `RawVec::try_shrink_to_fit`
+ // by only calling it with a greater capacity.
+ if self.capacity() <= self.len {
+ return Ok(());
+ }
+
+ self.buf.try_shrink_to_fit(self.len)
+ }
+
+ /// Shrinks the capacity of the vector with a lower bound.
+ ///
+ /// The capacity will remain at least as large as both the length
+ /// and the supplied value.
+ ///
+ /// If the current capacity is less than the lower limit, this is a no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ /// assert_eq!(vec.capacity(), 10);
+ /// vec.shrink_to(4);
+ /// assert!(vec.capacity() >= 4);
+ /// vec.shrink_to(0);
+ /// assert!(vec.capacity() >= 3);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "shrink_to", since = "1.56.0")]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ if self.capacity() > min_capacity {
+ self.buf.shrink_to_fit(cmp::max(self.len, min_capacity));
+ }
+ }
+
+ /// Converts the vector into [`Box<[T]>`][owned slice].
+ ///
+ /// Note that this will drop any excess capacity.
+ ///
+ /// [owned slice]: Box
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = vec![1, 2, 3];
+ ///
+ /// let slice = v.into_boxed_slice();
+ /// ```
+ ///
+ /// Any excess capacity is removed:
+ ///
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ ///
+ /// assert_eq!(vec.capacity(), 10);
+ /// let slice = vec.into_boxed_slice();
+ /// assert_eq!(slice.into_vec().capacity(), 3);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_boxed_slice(mut self) -> Box<[T], A> {
+ unsafe {
+ self.shrink_to_fit();
+ let me = ManuallyDrop::new(self);
+ let buf = ptr::read(&me.buf);
+ let len = me.len();
+ buf.into_box(len).assume_init()
+ }
+ }
+
+ /// Tries to convert the vector into [`Box<[T]>`][owned slice].
+ ///
+ /// Note that this will drop any excess capacity.
+ ///
+ /// [owned slice]: Box
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = vec![1, 2, 3];
+ ///
+ /// let slice = v.try_into_boxed_slice().unwrap();
+ /// ```
+ ///
+ /// Any excess capacity is removed:
+ ///
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ ///
+ /// assert_eq!(vec.capacity(), 10);
+ /// let slice = vec.try_into_boxed_slice().unwrap();
+ /// assert_eq!(slice.into_vec().capacity(), 3);
+ /// ```
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_into_boxed_slice(mut self) -> Result<Box<[T], A>, TryReserveError> {
+ unsafe {
+ self.try_shrink_to_fit()?;
+ let me = ManuallyDrop::new(self);
+ let buf = ptr::read(&me.buf);
+ let len = me.len();
+ Ok(buf.into_box(len).assume_init())
+ }
+ }
+
+ /// Shortens the vector, keeping the first `len` elements and dropping
+ /// the rest.
+ ///
+ /// If `len` is greater than the vector's current length, this has no
+ /// effect.
+ ///
+ /// The [`drain`] method can emulate `truncate`, but causes the excess
+ /// elements to be returned instead of dropped.
+ ///
+ /// Note that this method has no effect on the allocated capacity
+ /// of the vector.
+ ///
+ /// # Examples
+ ///
+ /// Truncating a five element vector to two elements:
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3, 4, 5];
+ /// vec.truncate(2);
+ /// assert_eq!(vec, [1, 2]);
+ /// ```
+ ///
+ /// No truncation occurs when `len` is greater than the vector's current
+ /// length:
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// vec.truncate(8);
+ /// assert_eq!(vec, [1, 2, 3]);
+ /// ```
+ ///
+ /// Truncating when `len == 0` is equivalent to calling the [`clear`]
+ /// method.
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// vec.truncate(0);
+ /// assert_eq!(vec, []);
+ /// ```
+ ///
+ /// [`clear`]: Vec::clear
+ /// [`drain`]: Vec::drain
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn truncate(&mut self, len: usize) {
+ // This is safe because:
+ //
+ // * the slice passed to `drop_in_place` is valid; the `len > self.len`
+ // case avoids creating an invalid slice, and
+ // * the `len` of the vector is shrunk before calling `drop_in_place`,
+ // such that no value will be dropped twice in case `drop_in_place`
+ // were to panic once (if it panics twice, the program aborts).
+ unsafe {
+ // Note: It's intentional that this is `>` and not `>=`.
+ // Changing it to `>=` has negative performance
+ // implications in some cases. See #78884 for more.
+ if len > self.len {
+ return;
+ }
+ let remaining_len = self.len - len;
+ let s = ptr::slice_from_raw_parts_mut(self.as_mut_ptr().add(len), remaining_len);
+ self.len = len;
+ ptr::drop_in_place(s);
+ }
+ }
+
+ /// Extracts a slice containing the entire vector.
+ ///
+ /// Equivalent to `&s[..]`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::{self, Write};
+ /// let buffer = vec![1, 2, 3, 5, 8];
+ /// io::sink().write(buffer.as_slice()).unwrap();
+ /// ```
+ #[inline]
+ #[stable(feature = "vec_as_slice", since = "1.7.0")]
+ pub fn as_slice(&self) -> &[T] {
+ self
+ }
+
+ /// Extracts a mutable slice of the entire vector.
+ ///
+ /// Equivalent to `&mut s[..]`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::{self, Read};
+ /// let mut buffer = vec![0; 3];
+ /// io::repeat(0b101).read_exact(buffer.as_mut_slice()).unwrap();
+ /// ```
+ #[inline]
+ #[stable(feature = "vec_as_slice", since = "1.7.0")]
+ pub fn as_mut_slice(&mut self) -> &mut [T] {
+ self
+ }
+
+ /// Returns a raw pointer to the vector's buffer.
+ ///
+ /// The caller must ensure that the vector outlives the pointer this
+ /// function returns, or else it will end up pointing to garbage.
+ /// Modifying the vector may cause its buffer to be reallocated,
+ /// which would also make any pointers to it invalid.
+ ///
+ /// The caller must also ensure that the memory the pointer (non-transitively) points to
+ /// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
+ /// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = vec![1, 2, 4];
+ /// let x_ptr = x.as_ptr();
+ ///
+ /// unsafe {
+ /// for i in 0..x.len() {
+ /// assert_eq!(*x_ptr.add(i), 1 << i);
+ /// }
+ /// }
+ /// ```
+ ///
+ /// [`as_mut_ptr`]: Vec::as_mut_ptr
+ #[stable(feature = "vec_as_ptr", since = "1.37.0")]
+ #[inline]
+ pub fn as_ptr(&self) -> *const T {
+ // We shadow the slice method of the same name to avoid going through
+ // `deref`, which creates an intermediate reference.
+ let ptr = self.buf.ptr();
+ unsafe {
+ assume(!ptr.is_null());
+ }
+ ptr
+ }
+
+ /// Returns an unsafe mutable pointer to the vector's buffer.
+ ///
+ /// The caller must ensure that the vector outlives the pointer this
+ /// function returns, or else it will end up pointing to garbage.
+ /// Modifying the vector may cause its buffer to be reallocated,
+ /// which would also make any pointers to it invalid.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// // Allocate vector big enough for 4 elements.
+ /// let size = 4;
+ /// let mut x: Vec<i32> = Vec::with_capacity(size);
+ /// let x_ptr = x.as_mut_ptr();
+ ///
+ /// // Initialize elements via raw pointer writes, then set length.
+ /// unsafe {
+ /// for i in 0..size {
+ /// *x_ptr.add(i) = i as i32;
+ /// }
+ /// x.set_len(size);
+ /// }
+ /// assert_eq!(&*x, &[0, 1, 2, 3]);
+ /// ```
+ #[stable(feature = "vec_as_ptr", since = "1.37.0")]
+ #[inline]
+ pub fn as_mut_ptr(&mut self) -> *mut T {
+ // We shadow the slice method of the same name to avoid going through
+ // `deref_mut`, which creates an intermediate reference.
+ let ptr = self.buf.ptr();
+ unsafe {
+ assume(!ptr.is_null());
+ }
+ ptr
+ }
+
+ /// Returns a reference to the underlying allocator.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn allocator(&self) -> &A {
+ self.buf.allocator()
+ }
+
+ /// Forces the length of the vector to `new_len`.
+ ///
+ /// This is a low-level operation that maintains none of the normal
+ /// invariants of the type. Normally changing the length of a vector
+ /// is done using one of the safe operations instead, such as
+ /// [`truncate`], [`resize`], [`extend`], or [`clear`].
+ ///
+ /// [`truncate`]: Vec::truncate
+ /// [`resize`]: Vec::resize
+ /// [`extend`]: Extend::extend
+ /// [`clear`]: Vec::clear
+ ///
+ /// # Safety
+ ///
+ /// - `new_len` must be less than or equal to [`capacity()`].
+ /// - The elements at `old_len..new_len` must be initialized.
+ ///
+ /// [`capacity()`]: Vec::capacity
+ ///
+ /// # Examples
+ ///
+ /// This method can be useful for situations in which the vector
+ /// is serving as a buffer for other code, particularly over FFI:
+ ///
+ /// ```no_run
+ /// # #![allow(dead_code)]
+ /// # // This is just a minimal skeleton for the doc example;
+ /// # // don't use this as a starting point for a real library.
+ /// # pub struct StreamWrapper { strm: *mut std::ffi::c_void }
+ /// # const Z_OK: i32 = 0;
+ /// # extern "C" {
+ /// # fn deflateGetDictionary(
+ /// # strm: *mut std::ffi::c_void,
+ /// # dictionary: *mut u8,
+ /// # dictLength: *mut usize,
+ /// # ) -> i32;
+ /// # }
+ /// # impl StreamWrapper {
+ /// pub fn get_dictionary(&self) -> Option<Vec<u8>> {
+ /// // Per the FFI method's docs, "32768 bytes is always enough".
+ /// let mut dict = Vec::with_capacity(32_768);
+ /// let mut dict_length = 0;
+ /// // SAFETY: When `deflateGetDictionary` returns `Z_OK`, it holds that:
+ /// // 1. `dict_length` elements were initialized.
+ /// // 2. `dict_length` <= the capacity (32_768)
+ /// // which makes `set_len` safe to call.
+ /// unsafe {
+ /// // Make the FFI call...
+ /// let r = deflateGetDictionary(self.strm, dict.as_mut_ptr(), &mut dict_length);
+ /// if r == Z_OK {
+ /// // ...and update the length to what was initialized.
+ /// dict.set_len(dict_length);
+ /// Some(dict)
+ /// } else {
+ /// None
+ /// }
+ /// }
+ /// }
+ /// # }
+ /// ```
+ ///
+ /// While the following example is sound, there is a memory leak since
+ /// the inner vectors were not freed prior to the `set_len` call:
+ ///
+ /// ```
+ /// let mut vec = vec![vec![1, 0, 0],
+ /// vec![0, 1, 0],
+ /// vec![0, 0, 1]];
+ /// // SAFETY:
+ /// // 1. `old_len..0` is empty so no elements need to be initialized.
+ /// // 2. `0 <= capacity` always holds whatever `capacity` is.
+ /// unsafe {
+ /// vec.set_len(0);
+ /// }
+ /// ```
+ ///
+ /// Normally, here, one would use [`clear`] instead to correctly drop
+ /// the contents and thus not leak memory.
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub unsafe fn set_len(&mut self, new_len: usize) {
+ debug_assert!(new_len <= self.capacity());
+
+ self.len = new_len;
+ }
+
+ /// Removes an element from the vector and returns it.
+ ///
+ /// The removed element is replaced by the last element of the vector.
+ ///
+ /// This does not preserve ordering, but is *O*(1).
+ /// If you need to preserve the element order, use [`remove`] instead.
+ ///
+ /// [`remove`]: Vec::remove
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec!["foo", "bar", "baz", "qux"];
+ ///
+ /// assert_eq!(v.swap_remove(1), "bar");
+ /// assert_eq!(v, ["foo", "qux", "baz"]);
+ ///
+ /// assert_eq!(v.swap_remove(0), "foo");
+ /// assert_eq!(v, ["baz", "qux"]);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn swap_remove(&mut self, index: usize) -> T {
+ #[cold]
+ #[inline(never)]
+ fn assert_failed(index: usize, len: usize) -> ! {
+ panic!("swap_remove index (is {index}) should be < len (is {len})");
+ }
+
+ let len = self.len();
+ if index >= len {
+ assert_failed(index, len);
+ }
+ unsafe {
+ // We replace self[index] with the last element. Note that if the
+ // bounds check above succeeds there must be a last element (which
+ // can be self[index] itself).
+ let value = ptr::read(self.as_ptr().add(index));
+ let base_ptr = self.as_mut_ptr();
+ ptr::copy(base_ptr.add(len - 1), base_ptr.add(index), 1);
+ self.set_len(len - 1);
+ value
+ }
+ }
+
+ /// Inserts an element at position `index` within the vector, shifting all
+ /// elements after it to the right.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// vec.insert(1, 4);
+ /// assert_eq!(vec, [1, 4, 2, 3]);
+ /// vec.insert(4, 5);
+ /// assert_eq!(vec, [1, 4, 2, 3, 5]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn insert(&mut self, index: usize, element: T) {
+ #[cold]
+ #[inline(never)]
+ fn assert_failed(index: usize, len: usize) -> ! {
+ panic!("insertion index (is {index}) should be <= len (is {len})");
+ }
+
+ let len = self.len();
+ if index > len {
+ assert_failed(index, len);
+ }
+
+ // space for the new element
+ if len == self.buf.capacity() {
+ self.reserve(1);
+ }
+
+ unsafe {
+ // infallible
+ // The spot to put the new value
+ {
+ let p = self.as_mut_ptr().add(index);
+ // Shift everything over to make space. (Duplicating the
+ // `index`th element into two consecutive places.)
+ ptr::copy(p, p.offset(1), len - index);
+ // Write it in, overwriting the first copy of the `index`th
+ // element.
+ ptr::write(p, element);
+ }
+ self.set_len(len + 1);
+ }
+ }
+
+ /// Removes and returns the element at position `index` within the vector,
+ /// shifting all elements after it to the left.
+ ///
+ /// Note: Because this shifts over the remaining elements, it has a
+ /// worst-case performance of *O*(*n*). If you don't need the order of elements
+ /// to be preserved, use [`swap_remove`] instead. If you'd like to remove
+ /// elements from the beginning of the `Vec`, consider using
+ /// [`VecDeque::pop_front`] instead.
+ ///
+ /// [`swap_remove`]: Vec::swap_remove
+ /// [`VecDeque::pop_front`]: crate::collections::VecDeque::pop_front
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec![1, 2, 3];
+ /// assert_eq!(v.remove(1), 2);
+ /// assert_eq!(v, [1, 3]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[track_caller]
+ pub fn remove(&mut self, index: usize) -> T {
+ #[cold]
+ #[inline(never)]
+ #[track_caller]
+ fn assert_failed(index: usize, len: usize) -> ! {
+ panic!("removal index (is {index}) should be < len (is {len})");
+ }
+
+ let len = self.len();
+ if index >= len {
+ assert_failed(index, len);
+ }
+ unsafe {
+ // infallible
+ let ret;
+ {
+ // the place we are taking from.
+ let ptr = self.as_mut_ptr().add(index);
+ // copy it out, unsafely having a copy of the value on
+ // the stack and in the vector at the same time.
+ ret = ptr::read(ptr);
+
+ // Shift everything down to fill in that spot.
+ ptr::copy(ptr.offset(1), ptr, len - index - 1);
+ }
+ self.set_len(len - 1);
+ ret
+ }
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` for which `f(&e)` returns `false`.
+ /// This method operates in place, visiting each element exactly once in the
+ /// original order, and preserves the order of the retained elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3, 4];
+ /// vec.retain(|&x| x % 2 == 0);
+ /// assert_eq!(vec, [2, 4]);
+ /// ```
+ ///
+ /// Because the elements are visited exactly once in the original order,
+ /// external state may be used to decide which elements to keep.
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3, 4, 5];
+ /// let keep = [false, true, true, false, true];
+ /// let mut iter = keep.iter();
+ /// vec.retain(|_| *iter.next().unwrap());
+ /// assert_eq!(vec, [2, 3, 5]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&T) -> bool,
+ {
+ self.retain_mut(|elem| f(elem));
+ }
+
+ /// Retains only the elements specified by the predicate, passing a mutable reference to it.
+ ///
+ /// In other words, remove all elements `e` such that `f(&mut e)` returns `false`.
+ /// This method operates in place, visiting each element exactly once in the
+ /// original order, and preserves the order of the retained elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3, 4];
+ /// vec.retain_mut(|x| if *x > 3 {
+ /// false
+ /// } else {
+ /// *x += 1;
+ /// true
+ /// });
+ /// assert_eq!(vec, [2, 3, 4]);
+ /// ```
+ #[stable(feature = "vec_retain_mut", since = "1.61.0")]
+ pub fn retain_mut<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ let original_len = self.len();
+ // Avoid double drop if the drop guard is not executed,
+ // since we may make some holes during the process.
+ unsafe { self.set_len(0) };
+
+ // Vec: [Kept, Kept, Hole, Hole, Hole, Hole, Unchecked, Unchecked]
+ // |<- processed len ->| ^- next to check
+ // |<- deleted cnt ->|
+ // |<- original_len ->|
+ // Kept: Elements which predicate returns true on.
+ // Hole: Moved or dropped element slot.
+ // Unchecked: Unchecked valid elements.
+ //
+ // This drop guard will be invoked when predicate or `drop` of element panicked.
+ // It shifts unchecked elements to cover holes and `set_len` to the correct length.
+ // In cases when predicate and `drop` never panick, it will be optimized out.
+ struct BackshiftOnDrop<'a, T, A: Allocator> {
+ v: &'a mut Vec<T, A>,
+ processed_len: usize,
+ deleted_cnt: usize,
+ original_len: usize,
+ }
+
+ impl<T, A: Allocator> Drop for BackshiftOnDrop<'_, T, A> {
+ fn drop(&mut self) {
+ if self.deleted_cnt > 0 {
+ // SAFETY: Trailing unchecked items must be valid since we never touch them.
+ unsafe {
+ ptr::copy(
+ self.v.as_ptr().add(self.processed_len),
+ self.v.as_mut_ptr().add(self.processed_len - self.deleted_cnt),
+ self.original_len - self.processed_len,
+ );
+ }
+ }
+ // SAFETY: After filling holes, all items are in contiguous memory.
+ unsafe {
+ self.v.set_len(self.original_len - self.deleted_cnt);
+ }
+ }
+ }
+
+ let mut g = BackshiftOnDrop { v: self, processed_len: 0, deleted_cnt: 0, original_len };
+
+ fn process_loop<F, T, A: Allocator, const DELETED: bool>(
+ original_len: usize,
+ f: &mut F,
+ g: &mut BackshiftOnDrop<'_, T, A>,
+ ) where
+ F: FnMut(&mut T) -> bool,
+ {
+ while g.processed_len != original_len {
+ // SAFETY: Unchecked element must be valid.
+ let cur = unsafe { &mut *g.v.as_mut_ptr().add(g.processed_len) };
+ if !f(cur) {
+ // Advance early to avoid double drop if `drop_in_place` panicked.
+ g.processed_len += 1;
+ g.deleted_cnt += 1;
+ // SAFETY: We never touch this element again after dropped.
+ unsafe { ptr::drop_in_place(cur) };
+ // We already advanced the counter.
+ if DELETED {
+ continue;
+ } else {
+ break;
+ }
+ }
+ if DELETED {
+ // SAFETY: `deleted_cnt` > 0, so the hole slot must not overlap with current element.
+ // We use copy for move, and never touch this element again.
+ unsafe {
+ let hole_slot = g.v.as_mut_ptr().add(g.processed_len - g.deleted_cnt);
+ ptr::copy_nonoverlapping(cur, hole_slot, 1);
+ }
+ }
+ g.processed_len += 1;
+ }
+ }
+
+ // Stage 1: Nothing was deleted.
+ process_loop::<F, T, A, false>(original_len, &mut f, &mut g);
+
+ // Stage 2: Some elements were deleted.
+ process_loop::<F, T, A, true>(original_len, &mut f, &mut g);
+
+ // All item are processed. This can be optimized to `set_len` by LLVM.
+ drop(g);
+ }
+
+ /// Removes all but the first of consecutive elements in the vector that resolve to the same
+ /// key.
+ ///
+ /// If the vector is sorted, this removes all duplicates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![10, 20, 21, 30, 20];
+ ///
+ /// vec.dedup_by_key(|i| *i / 10);
+ ///
+ /// assert_eq!(vec, [10, 20, 30, 20]);
+ /// ```
+ #[stable(feature = "dedup_by", since = "1.16.0")]
+ #[inline]
+ pub fn dedup_by_key<F, K>(&mut self, mut key: F)
+ where
+ F: FnMut(&mut T) -> K,
+ K: PartialEq,
+ {
+ self.dedup_by(|a, b| key(a) == key(b))
+ }
+
+ /// Removes all but the first of consecutive elements in the vector satisfying a given equality
+ /// relation.
+ ///
+ /// The `same_bucket` function is passed references to two elements from the vector and
+ /// must determine if the elements compare equal. The elements are passed in opposite order
+ /// from their order in the slice, so if `same_bucket(a, b)` returns `true`, `a` is removed.
+ ///
+ /// If the vector is sorted, this removes all duplicates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec!["foo", "bar", "Bar", "baz", "bar"];
+ ///
+ /// vec.dedup_by(|a, b| a.eq_ignore_ascii_case(b));
+ ///
+ /// assert_eq!(vec, ["foo", "bar", "baz", "bar"]);
+ /// ```
+ #[stable(feature = "dedup_by", since = "1.16.0")]
+ pub fn dedup_by<F>(&mut self, mut same_bucket: F)
+ where
+ F: FnMut(&mut T, &mut T) -> bool,
+ {
+ let len = self.len();
+ if len <= 1 {
+ return;
+ }
+
+ /* INVARIANT: vec.len() > read >= write > write-1 >= 0 */
+ struct FillGapOnDrop<'a, T, A: core::alloc::Allocator> {
+ /* Offset of the element we want to check if it is duplicate */
+ read: usize,
+
+ /* Offset of the place where we want to place the non-duplicate
+ * when we find it. */
+ write: usize,
+
+ /* The Vec that would need correction if `same_bucket` panicked */
+ vec: &'a mut Vec<T, A>,
+ }
+
+ impl<'a, T, A: core::alloc::Allocator> Drop for FillGapOnDrop<'a, T, A> {
+ fn drop(&mut self) {
+ /* This code gets executed when `same_bucket` panics */
+
+ /* SAFETY: invariant guarantees that `read - write`
+ * and `len - read` never overflow and that the copy is always
+ * in-bounds. */
+ unsafe {
+ let ptr = self.vec.as_mut_ptr();
+ let len = self.vec.len();
+
+ /* How many items were left when `same_bucket` panicked.
+ * Basically vec[read..].len() */
+ let items_left = len.wrapping_sub(self.read);
+
+ /* Pointer to first item in vec[write..write+items_left] slice */
+ let dropped_ptr = ptr.add(self.write);
+ /* Pointer to first item in vec[read..] slice */
+ let valid_ptr = ptr.add(self.read);
+
+ /* Copy `vec[read..]` to `vec[write..write+items_left]`.
+ * The slices can overlap, so `copy_nonoverlapping` cannot be used */
+ ptr::copy(valid_ptr, dropped_ptr, items_left);
+
+ /* How many items have been already dropped
+ * Basically vec[read..write].len() */
+ let dropped = self.read.wrapping_sub(self.write);
+
+ self.vec.set_len(len - dropped);
+ }
+ }
+ }
+
+ let mut gap = FillGapOnDrop { read: 1, write: 1, vec: self };
+ let ptr = gap.vec.as_mut_ptr();
+
+ /* Drop items while going through Vec, it should be more efficient than
+ * doing slice partition_dedup + truncate */
+
+ /* SAFETY: Because of the invariant, read_ptr, prev_ptr and write_ptr
+ * are always in-bounds and read_ptr never aliases prev_ptr */
+ unsafe {
+ while gap.read < len {
+ let read_ptr = ptr.add(gap.read);
+ let prev_ptr = ptr.add(gap.write.wrapping_sub(1));
+
+ if same_bucket(&mut *read_ptr, &mut *prev_ptr) {
+ // Increase `gap.read` now since the drop may panic.
+ gap.read += 1;
+ /* We have found duplicate, drop it in-place */
+ ptr::drop_in_place(read_ptr);
+ } else {
+ let write_ptr = ptr.add(gap.write);
+
+ /* Because `read_ptr` can be equal to `write_ptr`, we either
+ * have to use `copy` or conditional `copy_nonoverlapping`.
+ * Looks like the first option is faster. */
+ ptr::copy(read_ptr, write_ptr, 1);
+
+ /* We have filled that place, so go further */
+ gap.write += 1;
+ gap.read += 1;
+ }
+ }
+
+ /* Technically we could let `gap` clean up with its Drop, but
+ * when `same_bucket` is guaranteed to not panic, this bloats a little
+ * the codegen, so we just do it manually */
+ gap.vec.set_len(gap.write);
+ mem::forget(gap);
+ }
+ }
+
+ /// Appends an element to the back of a collection.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2];
+ /// vec.push(3);
+ /// assert_eq!(vec, [1, 2, 3]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push(&mut self, value: T) {
+ // This will panic or abort if we would allocate > isize::MAX bytes
+ // or if the length increment would overflow for zero-sized types.
+ if self.len == self.buf.capacity() {
+ self.buf.reserve_for_push(self.len);
+ }
+ unsafe {
+ let end = self.as_mut_ptr().add(self.len);
+ ptr::write(end, value);
+ self.len += 1;
+ }
+ }
+
+ /// Tries to append an element to the back of a collection.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2];
+ /// vec.try_push(3).unwrap();
+ /// assert_eq!(vec, [1, 2, 3]);
+ /// ```
+ #[inline]
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_push(&mut self, value: T) -> Result<(), TryReserveError> {
+ if self.len == self.buf.capacity() {
+ self.buf.try_reserve_for_push(self.len)?;
+ }
+ unsafe {
+ let end = self.as_mut_ptr().add(self.len);
+ ptr::write(end, value);
+ self.len += 1;
+ }
+ Ok(())
+ }
+
+ /// Removes the last element from a vector and returns it, or [`None`] if it
+ /// is empty.
+ ///
+ /// If you'd like to pop the first element, consider using
+ /// [`VecDeque::pop_front`] instead.
+ ///
+ /// [`VecDeque::pop_front`]: crate::collections::VecDeque::pop_front
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// assert_eq!(vec.pop(), Some(3));
+ /// assert_eq!(vec, [1, 2]);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pop(&mut self) -> Option<T> {
+ if self.len == 0 {
+ None
+ } else {
+ unsafe {
+ self.len -= 1;
+ Some(ptr::read(self.as_ptr().add(self.len())))
+ }
+ }
+ }
+
+ /// Moves all the elements of `other` into `self`, leaving `other` empty.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the number of elements in the vector overflows a `usize`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// let mut vec2 = vec![4, 5, 6];
+ /// vec.append(&mut vec2);
+ /// assert_eq!(vec, [1, 2, 3, 4, 5, 6]);
+ /// assert_eq!(vec2, []);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "append", since = "1.4.0")]
+ pub fn append(&mut self, other: &mut Self) {
+ unsafe {
+ self.append_elements(other.as_slice() as _);
+ other.set_len(0);
+ }
+ }
+
+ /// Appends elements to `self` from other buffer.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ unsafe fn append_elements(&mut self, other: *const [T]) {
+ let count = unsafe { (*other).len() };
+ self.reserve(count);
+ let len = self.len();
+ unsafe { ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count) };
+ self.len += count;
+ }
+
+ /// Tries to append elements to `self` from other buffer.
+ #[inline]
+ unsafe fn try_append_elements(&mut self, other: *const [T]) -> Result<(), TryReserveError> {
+ let count = unsafe { (*other).len() };
+ self.try_reserve(count)?;
+ let len = self.len();
+ unsafe { ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count) };
+ self.len += count;
+ Ok(())
+ }
+
+ /// Removes the specified range from the vector in bulk, returning all
+ /// removed elements as an iterator. If the iterator is dropped before
+ /// being fully consumed, it drops the remaining removed elements.
+ ///
+ /// The returned iterator keeps a mutable borrow on the vector to optimize
+ /// its implementation.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the vector.
+ ///
+ /// # Leaking
+ ///
+ /// If the returned iterator goes out of scope without being dropped (due to
+ /// [`mem::forget`], for example), the vector may have lost and leaked
+ /// elements arbitrarily, including elements outside the range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec![1, 2, 3];
+ /// let u: Vec<_> = v.drain(1..).collect();
+ /// assert_eq!(v, &[1]);
+ /// assert_eq!(u, &[2, 3]);
+ ///
+ /// // A full range clears the vector, like `clear()` does
+ /// v.drain(..);
+ /// assert_eq!(v, &[]);
+ /// ```
+ #[stable(feature = "drain", since = "1.6.0")]
+ pub fn drain<R>(&mut self, range: R) -> Drain<'_, T, A>
+ where
+ R: RangeBounds<usize>,
+ {
+ // Memory safety
+ //
+ // When the Drain is first created, it shortens the length of
+ // the source vector to make sure no uninitialized or moved-from elements
+ // are accessible at all if the Drain's destructor never gets to run.
+ //
+ // Drain will ptr::read out the values to remove.
+ // When finished, remaining tail of the vec is copied back to cover
+ // the hole, and the vector length is restored to the new length.
+ //
+ let len = self.len();
+ let Range { start, end } = slice::range(range, ..len);
+
+ unsafe {
+ // set self.vec length's to start, to be safe in case Drain is leaked
+ self.set_len(start);
+ // Use the borrow in the IterMut to indicate borrowing behavior of the
+ // whole Drain iterator (like &mut T).
+ let range_slice = slice::from_raw_parts_mut(self.as_mut_ptr().add(start), end - start);
+ Drain {
+ tail_start: end,
+ tail_len: len - end,
+ iter: range_slice.iter(),
+ vec: NonNull::from(self),
+ }
+ }
+ }
+
+ /// Clears the vector, removing all values.
+ ///
+ /// Note that this method has no effect on the allocated capacity
+ /// of the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec![1, 2, 3];
+ ///
+ /// v.clear();
+ ///
+ /// assert!(v.is_empty());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn clear(&mut self) {
+ let elems: *mut [T] = self.as_mut_slice();
+
+ // SAFETY:
+ // - `elems` comes directly from `as_mut_slice` and is therefore valid.
+ // - Setting `self.len` before calling `drop_in_place` means that,
+ // if an element's `Drop` impl panics, the vector's `Drop` impl will
+ // do nothing (leaking the rest of the elements) instead of dropping
+ // some twice.
+ unsafe {
+ self.len = 0;
+ ptr::drop_in_place(elems);
+ }
+ }
+
+ /// Returns the number of elements in the vector, also referred to
+ /// as its 'length'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = vec![1, 2, 3];
+ /// assert_eq!(a.len(), 3);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn len(&self) -> usize {
+ self.len
+ }
+
+ /// Returns `true` if the vector contains no elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = Vec::new();
+ /// assert!(v.is_empty());
+ ///
+ /// v.push(1);
+ /// assert!(!v.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Splits the collection into two at the given index.
+ ///
+ /// Returns a newly allocated vector containing the elements in the range
+ /// `[at, len)`. After the call, the original vector will be left containing
+ /// the elements `[0, at)` with its previous capacity unchanged.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `at > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// let vec2 = vec.split_off(1);
+ /// assert_eq!(vec, [1]);
+ /// assert_eq!(vec2, [2, 3]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[must_use = "use `.truncate()` if you don't need the other half"]
+ #[stable(feature = "split_off", since = "1.4.0")]
+ pub fn split_off(&mut self, at: usize) -> Self
+ where
+ A: Clone,
+ {
+ #[cold]
+ #[inline(never)]
+ fn assert_failed(at: usize, len: usize) -> ! {
+ panic!("`at` split index (is {at}) should be <= len (is {len})");
+ }
+
+ if at > self.len() {
+ assert_failed(at, self.len());
+ }
+
+ if at == 0 {
+ // the new vector can take over the original buffer and avoid the copy
+ return mem::replace(
+ self,
+ Vec::with_capacity_in(self.capacity(), self.allocator().clone()),
+ );
+ }
+
+ let other_len = self.len - at;
+ let mut other = Vec::with_capacity_in(other_len, self.allocator().clone());
+
+ // Unsafely `set_len` and copy items to `other`.
+ unsafe {
+ self.set_len(at);
+ other.set_len(other_len);
+
+ ptr::copy_nonoverlapping(self.as_ptr().add(at), other.as_mut_ptr(), other.len());
+ }
+ other
+ }
+
+ /// Resizes the `Vec` in-place so that `len` is equal to `new_len`.
+ ///
+ /// If `new_len` is greater than `len`, the `Vec` is extended by the
+ /// difference, with each additional slot filled with the result of
+ /// calling the closure `f`. The return values from `f` will end up
+ /// in the `Vec` in the order they have been generated.
+ ///
+ /// If `new_len` is less than `len`, the `Vec` is simply truncated.
+ ///
+ /// This method uses a closure to create new values on every push. If
+ /// you'd rather [`Clone`] a given value, use [`Vec::resize`]. If you
+ /// want to use the [`Default`] trait to generate values, you can
+ /// pass [`Default::default`] as the second argument.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// vec.resize_with(5, Default::default);
+ /// assert_eq!(vec, [1, 2, 3, 0, 0]);
+ ///
+ /// let mut vec = vec![];
+ /// let mut p = 1;
+ /// vec.resize_with(4, || { p *= 2; p });
+ /// assert_eq!(vec, [2, 4, 8, 16]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "vec_resize_with", since = "1.33.0")]
+ pub fn resize_with<F>(&mut self, new_len: usize, f: F)
+ where
+ F: FnMut() -> T,
+ {
+ let len = self.len();
+ if new_len > len {
+ self.extend_with(new_len - len, ExtendFunc(f));
+ } else {
+ self.truncate(new_len);
+ }
+ }
+
+ /// Consumes and leaks the `Vec`, returning a mutable reference to the contents,
+ /// `&'a mut [T]`. Note that the type `T` must outlive the chosen lifetime
+ /// `'a`. If the type has only static references, or none at all, then this
+ /// may be chosen to be `'static`.
+ ///
+ /// As of Rust 1.57, this method does not reallocate or shrink the `Vec`,
+ /// so the leaked allocation may include unused capacity that is not part
+ /// of the returned slice.
+ ///
+ /// This function is mainly useful for data that lives for the remainder of
+ /// the program's life. Dropping the returned reference will cause a memory
+ /// leak.
+ ///
+ /// # Examples
+ ///
+ /// Simple usage:
+ ///
+ /// ```
+ /// let x = vec![1, 2, 3];
+ /// let static_ref: &'static mut [usize] = x.leak();
+ /// static_ref[0] += 1;
+ /// assert_eq!(static_ref, &[2, 2, 3]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "vec_leak", since = "1.47.0")]
+ #[inline]
+ pub fn leak<'a>(self) -> &'a mut [T]
+ where
+ A: 'a,
+ {
+ let mut me = ManuallyDrop::new(self);
+ unsafe { slice::from_raw_parts_mut(me.as_mut_ptr(), me.len) }
+ }
+
+ /// Returns the remaining spare capacity of the vector as a slice of
+ /// `MaybeUninit<T>`.
+ ///
+ /// The returned slice can be used to fill the vector with data (e.g. by
+ /// reading from a file) before marking the data as initialized using the
+ /// [`set_len`] method.
+ ///
+ /// [`set_len`]: Vec::set_len
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// // Allocate vector big enough for 10 elements.
+ /// let mut v = Vec::with_capacity(10);
+ ///
+ /// // Fill in the first 3 elements.
+ /// let uninit = v.spare_capacity_mut();
+ /// uninit[0].write(0);
+ /// uninit[1].write(1);
+ /// uninit[2].write(2);
+ ///
+ /// // Mark the first 3 elements of the vector as being initialized.
+ /// unsafe {
+ /// v.set_len(3);
+ /// }
+ ///
+ /// assert_eq!(&v, &[0, 1, 2]);
+ /// ```
+ #[stable(feature = "vec_spare_capacity", since = "1.60.0")]
+ #[inline]
+ pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<T>] {
+ // Note:
+ // This method is not implemented in terms of `split_at_spare_mut`,
+ // to prevent invalidation of pointers to the buffer.
+ unsafe {
+ slice::from_raw_parts_mut(
+ self.as_mut_ptr().add(self.len) as *mut MaybeUninit<T>,
+ self.buf.capacity() - self.len,
+ )
+ }
+ }
+
+ /// Returns vector content as a slice of `T`, along with the remaining spare
+ /// capacity of the vector as a slice of `MaybeUninit<T>`.
+ ///
+ /// The returned spare capacity slice can be used to fill the vector with data
+ /// (e.g. by reading from a file) before marking the data as initialized using
+ /// the [`set_len`] method.
+ ///
+ /// [`set_len`]: Vec::set_len
+ ///
+ /// Note that this is a low-level API, which should be used with care for
+ /// optimization purposes. If you need to append data to a `Vec`
+ /// you can use [`push`], [`extend`], [`extend_from_slice`],
+ /// [`extend_from_within`], [`insert`], [`append`], [`resize`] or
+ /// [`resize_with`], depending on your exact needs.
+ ///
+ /// [`push`]: Vec::push
+ /// [`extend`]: Vec::extend
+ /// [`extend_from_slice`]: Vec::extend_from_slice
+ /// [`extend_from_within`]: Vec::extend_from_within
+ /// [`insert`]: Vec::insert
+ /// [`append`]: Vec::append
+ /// [`resize`]: Vec::resize
+ /// [`resize_with`]: Vec::resize_with
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(vec_split_at_spare)]
+ ///
+ /// let mut v = vec![1, 1, 2];
+ ///
+ /// // Reserve additional space big enough for 10 elements.
+ /// v.reserve(10);
+ ///
+ /// let (init, uninit) = v.split_at_spare_mut();
+ /// let sum = init.iter().copied().sum::<u32>();
+ ///
+ /// // Fill in the next 4 elements.
+ /// uninit[0].write(sum);
+ /// uninit[1].write(sum * 2);
+ /// uninit[2].write(sum * 3);
+ /// uninit[3].write(sum * 4);
+ ///
+ /// // Mark the 4 elements of the vector as being initialized.
+ /// unsafe {
+ /// let len = v.len();
+ /// v.set_len(len + 4);
+ /// }
+ ///
+ /// assert_eq!(&v, &[1, 1, 2, 4, 8, 12, 16]);
+ /// ```
+ #[unstable(feature = "vec_split_at_spare", issue = "81944")]
+ #[inline]
+ pub fn split_at_spare_mut(&mut self) -> (&mut [T], &mut [MaybeUninit<T>]) {
+ // SAFETY:
+ // - len is ignored and so never changed
+ let (init, spare, _) = unsafe { self.split_at_spare_mut_with_len() };
+ (init, spare)
+ }
+
+ /// Safety: changing returned .2 (&mut usize) is considered the same as calling `.set_len(_)`.
+ ///
+ /// This method provides unique access to all vec parts at once in `extend_from_within`.
+ unsafe fn split_at_spare_mut_with_len(
+ &mut self,
+ ) -> (&mut [T], &mut [MaybeUninit<T>], &mut usize) {
+ let ptr = self.as_mut_ptr();
+ // SAFETY:
+ // - `ptr` is guaranteed to be valid for `self.len` elements
+ // - but the allocation extends out to `self.buf.capacity()` elements, possibly
+ // uninitialized
+ let spare_ptr = unsafe { ptr.add(self.len) };
+ let spare_ptr = spare_ptr.cast::<MaybeUninit<T>>();
+ let spare_len = self.buf.capacity() - self.len;
+
+ // SAFETY:
+ // - `ptr` is guaranteed to be valid for `self.len` elements
+ // - `spare_ptr` is pointing one element past the buffer, so it doesn't overlap with `initialized`
+ unsafe {
+ let initialized = slice::from_raw_parts_mut(ptr, self.len);
+ let spare = slice::from_raw_parts_mut(spare_ptr, spare_len);
+
+ (initialized, spare, &mut self.len)
+ }
+ }
+}
+
+impl<T: Clone, A: Allocator> Vec<T, A> {
+ /// Resizes the `Vec` in-place so that `len` is equal to `new_len`.
+ ///
+ /// If `new_len` is greater than `len`, the `Vec` is extended by the
+ /// difference, with each additional slot filled with `value`.
+ /// If `new_len` is less than `len`, the `Vec` is simply truncated.
+ ///
+ /// This method requires `T` to implement [`Clone`],
+ /// in order to be able to clone the passed value.
+ /// If you need more flexibility (or want to rely on [`Default`] instead of
+ /// [`Clone`]), use [`Vec::resize_with`].
+ /// If you only need to resize to a smaller size, use [`Vec::truncate`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec!["hello"];
+ /// vec.resize(3, "world");
+ /// assert_eq!(vec, ["hello", "world", "world"]);
+ ///
+ /// let mut vec = vec![1, 2, 3, 4];
+ /// vec.resize(2, 0);
+ /// assert_eq!(vec, [1, 2]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "vec_resize", since = "1.5.0")]
+ pub fn resize(&mut self, new_len: usize, value: T) {
+ let len = self.len();
+
+ if new_len > len {
+ self.extend_with(new_len - len, ExtendElement(value))
+ } else {
+ self.truncate(new_len);
+ }
+ }
+
+ /// Tries to resize the `Vec` in-place so that `len` is equal to `new_len`.
+ ///
+ /// If `new_len` is greater than `len`, the `Vec` is extended by the
+ /// difference, with each additional slot filled with `value`.
+ /// If `new_len` is less than `len`, the `Vec` is simply truncated.
+ ///
+ /// This method requires `T` to implement [`Clone`],
+ /// in order to be able to clone the passed value.
+ /// If you need more flexibility (or want to rely on [`Default`] instead of
+ /// [`Clone`]), use [`Vec::resize_with`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec!["hello"];
+ /// vec.try_resize(3, "world").unwrap();
+ /// assert_eq!(vec, ["hello", "world", "world"]);
+ ///
+ /// let mut vec = vec![1, 2, 3, 4];
+ /// vec.try_resize(2, 0).unwrap();
+ /// assert_eq!(vec, [1, 2]);
+ ///
+ /// let mut vec = vec![42];
+ /// let result = vec.try_resize(usize::MAX, 0);
+ /// assert!(result.is_err());
+ /// ```
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_resize(&mut self, new_len: usize, value: T) -> Result<(), TryReserveError> {
+ let len = self.len();
+
+ if new_len > len {
+ self.try_extend_with(new_len - len, ExtendElement(value))
+ } else {
+ self.truncate(new_len);
+ Ok(())
+ }
+ }
+
+ /// Clones and appends all elements in a slice to the `Vec`.
+ ///
+ /// Iterates over the slice `other`, clones each element, and then appends
+ /// it to this `Vec`. The `other` slice is traversed in-order.
+ ///
+ /// Note that this function is same as [`extend`] except that it is
+ /// specialized to work with slices instead. If and when Rust gets
+ /// specialization this function will likely be deprecated (but still
+ /// available).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1];
+ /// vec.extend_from_slice(&[2, 3, 4]);
+ /// assert_eq!(vec, [1, 2, 3, 4]);
+ /// ```
+ ///
+ /// [`extend`]: Vec::extend
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "vec_extend_from_slice", since = "1.6.0")]
+ pub fn extend_from_slice(&mut self, other: &[T]) {
+ self.spec_extend(other.iter())
+ }
+
+ /// Tries to clone and append all elements in a slice to the `Vec`.
+ ///
+ /// Iterates over the slice `other`, clones each element, and then appends
+ /// it to this `Vec`. The `other` slice is traversed in-order.
+ ///
+ /// Note that this function is same as [`extend`] except that it is
+ /// specialized to work with slices instead. If and when Rust gets
+ /// specialization this function will likely be deprecated (but still
+ /// available).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1];
+ /// vec.try_extend_from_slice(&[2, 3, 4]).unwrap();
+ /// assert_eq!(vec, [1, 2, 3, 4]);
+ /// ```
+ ///
+ /// [`extend`]: Vec::extend
+ #[stable(feature = "kernel", since = "1.0.0")]
+ pub fn try_extend_from_slice(&mut self, other: &[T]) -> Result<(), TryReserveError> {
+ self.try_spec_extend(other.iter())
+ }
+
+ /// Copies elements from `src` range to the end of the vector.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![0, 1, 2, 3, 4];
+ ///
+ /// vec.extend_from_within(2..);
+ /// assert_eq!(vec, [0, 1, 2, 3, 4, 2, 3, 4]);
+ ///
+ /// vec.extend_from_within(..2);
+ /// assert_eq!(vec, [0, 1, 2, 3, 4, 2, 3, 4, 0, 1]);
+ ///
+ /// vec.extend_from_within(4..8);
+ /// assert_eq!(vec, [0, 1, 2, 3, 4, 2, 3, 4, 0, 1, 4, 2, 3, 4]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "vec_extend_from_within", since = "1.53.0")]
+ pub fn extend_from_within<R>(&mut self, src: R)
+ where
+ R: RangeBounds<usize>,
+ {
+ let range = slice::range(src, ..self.len());
+ self.reserve(range.len());
+
+ // SAFETY:
+ // - `slice::range` guarantees that the given range is valid for indexing self
+ unsafe {
+ self.spec_extend_from_within(range);
+ }
+ }
+}
+
+impl<T, A: Allocator, const N: usize> Vec<[T; N], A> {
+ /// Takes a `Vec<[T; N]>` and flattens it into a `Vec<T>`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the length of the resulting vector would overflow a `usize`.
+ ///
+ /// This is only possible when flattening a vector of arrays of zero-sized
+ /// types, and thus tends to be irrelevant in practice. If
+ /// `size_of::<T>() > 0`, this will never panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_flatten)]
+ ///
+ /// let mut vec = vec![[1, 2, 3], [4, 5, 6], [7, 8, 9]];
+ /// assert_eq!(vec.pop(), Some([7, 8, 9]));
+ ///
+ /// let mut flattened = vec.into_flattened();
+ /// assert_eq!(flattened.pop(), Some(6));
+ /// ```
+ #[unstable(feature = "slice_flatten", issue = "95629")]
+ pub fn into_flattened(self) -> Vec<T, A> {
+ let (ptr, len, cap, alloc) = self.into_raw_parts_with_alloc();
+ let (new_len, new_cap) = if mem::size_of::<T>() == 0 {
+ (len.checked_mul(N).expect("vec len overflow"), usize::MAX)
+ } else {
+ // SAFETY:
+ // - `cap * N` cannot overflow because the allocation is already in
+ // the address space.
+ // - Each `[T; N]` has `N` valid elements, so there are `len * N`
+ // valid elements in the allocation.
+ unsafe { (len.unchecked_mul(N), cap.unchecked_mul(N)) }
+ };
+ // SAFETY:
+ // - `ptr` was allocated by `self`
+ // - `ptr` is well-aligned because `[T; N]` has the same alignment as `T`.
+ // - `new_cap` refers to the same sized allocation as `cap` because
+ // `new_cap * size_of::<T>()` == `cap * size_of::<[T; N]>()`
+ // - `len` <= `cap`, so `len * N` <= `cap * N`.
+ unsafe { Vec::<T, A>::from_raw_parts_in(ptr.cast(), new_len, new_cap, alloc) }
+ }
+}
+
+// This code generalizes `extend_with_{element,default}`.
+trait ExtendWith<T> {
+ fn next(&mut self) -> T;
+ fn last(self) -> T;
+}
+
+struct ExtendElement<T>(T);
+impl<T: Clone> ExtendWith<T> for ExtendElement<T> {
+ fn next(&mut self) -> T {
+ self.0.clone()
+ }
+ fn last(self) -> T {
+ self.0
+ }
+}
+
+struct ExtendFunc<F>(F);
+impl<T, F: FnMut() -> T> ExtendWith<T> for ExtendFunc<F> {
+ fn next(&mut self) -> T {
+ (self.0)()
+ }
+ fn last(mut self) -> T {
+ (self.0)()
+ }
+}
+
+impl<T, A: Allocator> Vec<T, A> {
+ #[cfg(not(no_global_oom_handling))]
+ /// Extend the vector by `n` values, using the given generator.
+ fn extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) {
+ self.reserve(n);
+
+ unsafe {
+ let mut ptr = self.as_mut_ptr().add(self.len());
+ // Use SetLenOnDrop to work around bug where compiler
+ // might not realize the store through `ptr` through self.set_len()
+ // don't alias.
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+
+ // Write all elements except the last one
+ for _ in 1..n {
+ ptr::write(ptr, value.next());
+ ptr = ptr.offset(1);
+ // Increment the length in every step in case next() panics
+ local_len.increment_len(1);
+ }
+
+ if n > 0 {
+ // We can write the last element directly without cloning needlessly
+ ptr::write(ptr, value.last());
+ local_len.increment_len(1);
+ }
+
+ // len set by scope guard
+ }
+ }
+
+ /// Try to extend the vector by `n` values, using the given generator.
+ fn try_extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) -> Result<(), TryReserveError> {
+ self.try_reserve(n)?;
+
+ unsafe {
+ let mut ptr = self.as_mut_ptr().add(self.len());
+ // Use SetLenOnDrop to work around bug where compiler
+ // might not realize the store through `ptr` through self.set_len()
+ // don't alias.
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+
+ // Write all elements except the last one
+ for _ in 1..n {
+ ptr::write(ptr, value.next());
+ ptr = ptr.offset(1);
+ // Increment the length in every step in case next() panics
+ local_len.increment_len(1);
+ }
+
+ if n > 0 {
+ // We can write the last element directly without cloning needlessly
+ ptr::write(ptr, value.last());
+ local_len.increment_len(1);
+ }
+
+ // len set by scope guard
+ Ok(())
+ }
+ }
+}
+
+impl<T: PartialEq, A: Allocator> Vec<T, A> {
+ /// Removes consecutive repeated elements in the vector according to the
+ /// [`PartialEq`] trait implementation.
+ ///
+ /// If the vector is sorted, this removes all duplicates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 2, 3, 2];
+ ///
+ /// vec.dedup();
+ ///
+ /// assert_eq!(vec, [1, 2, 3, 2]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn dedup(&mut self) {
+ self.dedup_by(|a, b| a == b)
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Internal methods and functions
+////////////////////////////////////////////////////////////////////////////////
+
+#[doc(hidden)]
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn from_elem<T: Clone>(elem: T, n: usize) -> Vec<T> {
+ <T as SpecFromElem>::from_elem(elem, n, Global)
+}
+
+#[doc(hidden)]
+#[cfg(not(no_global_oom_handling))]
+#[unstable(feature = "allocator_api", issue = "32838")]
+pub fn from_elem_in<T: Clone, A: Allocator>(elem: T, n: usize, alloc: A) -> Vec<T, A> {
+ <T as SpecFromElem>::from_elem(elem, n, alloc)
+}
+
+trait ExtendFromWithinSpec {
+ /// # Safety
+ ///
+ /// - `src` needs to be valid index
+ /// - `self.capacity() - self.len()` must be `>= src.len()`
+ unsafe fn spec_extend_from_within(&mut self, src: Range<usize>);
+}
+
+impl<T: Clone, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
+ default unsafe fn spec_extend_from_within(&mut self, src: Range<usize>) {
+ // SAFETY:
+ // - len is increased only after initializing elements
+ let (this, spare, len) = unsafe { self.split_at_spare_mut_with_len() };
+
+ // SAFETY:
+ // - caller guaratees that src is a valid index
+ let to_clone = unsafe { this.get_unchecked(src) };
+
+ iter::zip(to_clone, spare)
+ .map(|(src, dst)| dst.write(src.clone()))
+ // Note:
+ // - Element was just initialized with `MaybeUninit::write`, so it's ok to increase len
+ // - len is increased after each element to prevent leaks (see issue #82533)
+ .for_each(|_| *len += 1);
+ }
+}
+
+impl<T: Copy, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
+ unsafe fn spec_extend_from_within(&mut self, src: Range<usize>) {
+ let count = src.len();
+ {
+ let (init, spare) = self.split_at_spare_mut();
+
+ // SAFETY:
+ // - caller guaratees that `src` is a valid index
+ let source = unsafe { init.get_unchecked(src) };
+
+ // SAFETY:
+ // - Both pointers are created from unique slice references (`&mut [_]`)
+ // so they are valid and do not overlap.
+ // - Elements are :Copy so it's OK to to copy them, without doing
+ // anything with the original values
+ // - `count` is equal to the len of `source`, so source is valid for
+ // `count` reads
+ // - `.reserve(count)` guarantees that `spare.len() >= count` so spare
+ // is valid for `count` writes
+ unsafe { ptr::copy_nonoverlapping(source.as_ptr(), spare.as_mut_ptr() as _, count) };
+ }
+
+ // SAFETY:
+ // - The elements were just initialized by `copy_nonoverlapping`
+ self.len += count;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Common trait implementations for Vec
+////////////////////////////////////////////////////////////////////////////////
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> ops::Deref for Vec<T, A> {
+ type Target = [T];
+
+ fn deref(&self) -> &[T] {
+ unsafe { slice::from_raw_parts(self.as_ptr(), self.len) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> ops::DerefMut for Vec<T, A> {
+ fn deref_mut(&mut self) -> &mut [T] {
+ unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+trait SpecCloneFrom {
+ fn clone_from(this: &mut Self, other: &Self);
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Clone, A: Allocator> SpecCloneFrom for Vec<T, A> {
+ default fn clone_from(this: &mut Self, other: &Self) {
+ // drop anything that will not be overwritten
+ this.truncate(other.len());
+
+ // self.len <= other.len due to the truncate above, so the
+ // slices here are always in-bounds.
+ let (init, tail) = other.split_at(this.len());
+
+ // reuse the contained values' allocations/resources.
+ this.clone_from_slice(init);
+ this.extend_from_slice(tail);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Copy, A: Allocator> SpecCloneFrom for Vec<T, A> {
+ fn clone_from(this: &mut Self, other: &Self) {
+ this.clear();
+ this.extend_from_slice(other);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
+ #[cfg(not(test))]
+ fn clone(&self) -> Self {
+ let alloc = self.allocator().clone();
+ <[T]>::to_vec_in(&**self, alloc)
+ }
+
+ // HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is
+ // required for this method definition, is not available. Instead use the
+ // `slice::to_vec` function which is only available with cfg(test)
+ // NB see the slice::hack module in slice.rs for more information
+ #[cfg(test)]
+ fn clone(&self) -> Self {
+ let alloc = self.allocator().clone();
+ crate::slice::to_vec(&**self, alloc)
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ SpecCloneFrom::clone_from(self, other)
+ }
+}
+
+/// The hash of a vector is the same as that of the corresponding slice,
+/// as required by the `core::borrow::Borrow` implementation.
+///
+/// ```
+/// #![feature(build_hasher_simple_hash_one)]
+/// use std::hash::BuildHasher;
+///
+/// let b = std::collections::hash_map::RandomState::new();
+/// let v: Vec<u8> = vec![0xa8, 0x3c, 0x09];
+/// let s: &[u8] = &[0xa8, 0x3c, 0x09];
+/// assert_eq!(b.hash_one(v), b.hash_one(s));
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Hash, A: Allocator> Hash for Vec<T, A> {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ Hash::hash(&**self, state)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "vector indices are of type `usize` or ranges of `usize`",
+ label = "vector indices are of type `usize` or ranges of `usize`"
+)]
+impl<T, I: SliceIndex<[T]>, A: Allocator> Index<I> for Vec<T, A> {
+ type Output = I::Output;
+
+ #[inline]
+ fn index(&self, index: I) -> &Self::Output {
+ Index::index(&**self, index)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "vector indices are of type `usize` or ranges of `usize`",
+ label = "vector indices are of type `usize` or ranges of `usize`"
+)]
+impl<T, I: SliceIndex<[T]>, A: Allocator> IndexMut<I> for Vec<T, A> {
+ #[inline]
+ fn index_mut(&mut self, index: I) -> &mut Self::Output {
+ IndexMut::index_mut(&mut **self, index)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> FromIterator<T> for Vec<T> {
+ #[inline]
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Vec<T> {
+ <Self as SpecFromIter<T, I::IntoIter>>::from_iter(iter.into_iter())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> IntoIterator for Vec<T, A> {
+ type Item = T;
+ type IntoIter = IntoIter<T, A>;
+
+ /// Creates a consuming iterator, that is, one that moves each value out of
+ /// the vector (from start to end). The vector cannot be used after calling
+ /// this.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = vec!["a".to_string(), "b".to_string()];
+ /// for s in v.into_iter() {
+ /// // s has type String, not &String
+ /// println!("{s}");
+ /// }
+ /// ```
+ #[inline]
+ fn into_iter(self) -> IntoIter<T, A> {
+ unsafe {
+ let mut me = ManuallyDrop::new(self);
+ let alloc = ManuallyDrop::new(ptr::read(me.allocator()));
+ let begin = me.as_mut_ptr();
+ let end = if mem::size_of::<T>() == 0 {
+ arith_offset(begin as *const i8, me.len() as isize) as *const T
+ } else {
+ begin.add(me.len()) as *const T
+ };
+ let cap = me.buf.capacity();
+ IntoIter {
+ buf: NonNull::new_unchecked(begin),
+ phantom: PhantomData,
+ cap,
+ alloc,
+ ptr: begin,
+ end,
+ }
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, A: Allocator> IntoIterator for &'a Vec<T, A> {
+ type Item = &'a T;
+ type IntoIter = slice::Iter<'a, T>;
+
+ fn into_iter(self) -> slice::Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec<T, A> {
+ type Item = &'a mut T;
+ type IntoIter = slice::IterMut<'a, T>;
+
+ fn into_iter(self) -> slice::IterMut<'a, T> {
+ self.iter_mut()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> Extend<T> for Vec<T, A> {
+ #[inline]
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ <Self as SpecExtend<T, I::IntoIter>>::spec_extend(self, iter.into_iter())
+ }
+
+ #[inline]
+ fn extend_one(&mut self, item: T) {
+ self.push(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+impl<T, A: Allocator> Vec<T, A> {
+ // leaf method to which various SpecFrom/SpecExtend implementations delegate when
+ // they have no further optimizations to apply
+ #[cfg(not(no_global_oom_handling))]
+ fn extend_desugared<I: Iterator<Item = T>>(&mut self, mut iterator: I) {
+ // This is the case for a general iterator.
+ //
+ // This function should be the moral equivalent of:
+ //
+ // for item in iterator {
+ // self.push(item);
+ // }
+ while let Some(element) = iterator.next() {
+ let len = self.len();
+ if len == self.capacity() {
+ let (lower, _) = iterator.size_hint();
+ self.reserve(lower.saturating_add(1));
+ }
+ unsafe {
+ ptr::write(self.as_mut_ptr().add(len), element);
+ // Since next() executes user code which can panic we have to bump the length
+ // after each step.
+ // NB can't overflow since we would have had to alloc the address space
+ self.set_len(len + 1);
+ }
+ }
+ }
+
+ // leaf method to which various SpecFrom/SpecExtend implementations delegate when
+ // they have no further optimizations to apply
+ fn try_extend_desugared<I: Iterator<Item = T>>(&mut self, mut iterator: I) -> Result<(), TryReserveError> {
+ // This is the case for a general iterator.
+ //
+ // This function should be the moral equivalent of:
+ //
+ // for item in iterator {
+ // self.push(item);
+ // }
+ while let Some(element) = iterator.next() {
+ let len = self.len();
+ if len == self.capacity() {
+ let (lower, _) = iterator.size_hint();
+ self.try_reserve(lower.saturating_add(1))?;
+ }
+ unsafe {
+ ptr::write(self.as_mut_ptr().add(len), element);
+ // Since next() executes user code which can panic we have to bump the length
+ // after each step.
+ // NB can't overflow since we would have had to alloc the address space
+ self.set_len(len + 1);
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Creates a splicing iterator that replaces the specified range in the vector
+ /// with the given `replace_with` iterator and yields the removed items.
+ /// `replace_with` does not need to be the same length as `range`.
+ ///
+ /// `range` is removed even if the iterator is not consumed until the end.
+ ///
+ /// It is unspecified how many elements are removed from the vector
+ /// if the `Splice` value is leaked.
+ ///
+ /// The input iterator `replace_with` is only consumed when the `Splice` value is dropped.
+ ///
+ /// This is optimal if:
+ ///
+ /// * The tail (elements in the vector after `range`) is empty,
+ /// * or `replace_with` yields fewer or equal elements than `range`’s length
+ /// * or the lower bound of its `size_hint()` is exact.
+ ///
+ /// Otherwise, a temporary vector is allocated and the tail is moved twice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec![1, 2, 3, 4];
+ /// let new = [7, 8, 9];
+ /// let u: Vec<_> = v.splice(1..3, new).collect();
+ /// assert_eq!(v, &[1, 7, 8, 9, 4]);
+ /// assert_eq!(u, &[2, 3]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "vec_splice", since = "1.21.0")]
+ pub fn splice<R, I>(&mut self, range: R, replace_with: I) -> Splice<'_, I::IntoIter, A>
+ where
+ R: RangeBounds<usize>,
+ I: IntoIterator<Item = T>,
+ {
+ Splice { drain: self.drain(range), replace_with: replace_with.into_iter() }
+ }
+
+ /// Creates an iterator which uses a closure to determine if an element should be removed.
+ ///
+ /// If the closure returns true, then the element is removed and yielded.
+ /// If the closure returns false, the element will remain in the vector and will not be yielded
+ /// by the iterator.
+ ///
+ /// Using this method is equivalent to the following code:
+ ///
+ /// ```
+ /// # let some_predicate = |x: &mut i32| { *x == 2 || *x == 3 || *x == 6 };
+ /// # let mut vec = vec![1, 2, 3, 4, 5, 6];
+ /// let mut i = 0;
+ /// while i < vec.len() {
+ /// if some_predicate(&mut vec[i]) {
+ /// let val = vec.remove(i);
+ /// // your code here
+ /// } else {
+ /// i += 1;
+ /// }
+ /// }
+ ///
+ /// # assert_eq!(vec, vec![1, 4, 5]);
+ /// ```
+ ///
+ /// But `drain_filter` is easier to use. `drain_filter` is also more efficient,
+ /// because it can backshift the elements of the array in bulk.
+ ///
+ /// Note that `drain_filter` also lets you mutate every element in the filter closure,
+ /// regardless of whether you choose to keep or remove it.
+ ///
+ /// # Examples
+ ///
+ /// Splitting an array into evens and odds, reusing the original allocation:
+ ///
+ /// ```
+ /// #![feature(drain_filter)]
+ /// let mut numbers = vec![1, 2, 3, 4, 5, 6, 8, 9, 11, 13, 14, 15];
+ ///
+ /// let evens = numbers.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
+ /// let odds = numbers;
+ ///
+ /// assert_eq!(evens, vec![2, 4, 6, 8, 14]);
+ /// assert_eq!(odds, vec![1, 3, 5, 9, 11, 13, 15]);
+ /// ```
+ #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+ pub fn drain_filter<F>(&mut self, filter: F) -> DrainFilter<'_, T, F, A>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ let old_len = self.len();
+
+ // Guard against us getting leaked (leak amplification)
+ unsafe {
+ self.set_len(0);
+ }
+
+ DrainFilter { vec: self, idx: 0, del: 0, old_len, pred: filter, panic_flag: false }
+ }
+}
+
+/// Extend implementation that copies elements out of references before pushing them onto the Vec.
+///
+/// This implementation is specialized for slice iterators, where it uses [`copy_from_slice`] to
+/// append the entire slice at once.
+///
+/// [`copy_from_slice`]: slice::copy_from_slice
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "extend_ref", since = "1.2.0")]
+impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec<T, A> {
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+ self.spec_extend(iter.into_iter())
+ }
+
+ #[inline]
+ fn extend_one(&mut self, &item: &'a T) {
+ self.push(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+/// Implements comparison of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialOrd, A: Allocator> PartialOrd for Vec<T, A> {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ PartialOrd::partial_cmp(&**self, &**other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Eq, A: Allocator> Eq for Vec<T, A> {}
+
+/// Implements ordering of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord, A: Allocator> Ord for Vec<T, A> {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ Ord::cmp(&**self, &**other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<#[may_dangle] T, A: Allocator> Drop for Vec<T, A> {
+ fn drop(&mut self) {
+ unsafe {
+ // use drop for [T]
+ // use a raw slice to refer to the elements of the vector as weakest necessary type;
+ // could avoid questions of validity in certain cases
+ ptr::drop_in_place(ptr::slice_from_raw_parts_mut(self.as_mut_ptr(), self.len))
+ }
+ // RawVec handles deallocation
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+impl<T> const Default for Vec<T> {
+ /// Creates an empty `Vec<T>`.
+ fn default() -> Vec<T> {
+ Vec::new()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for Vec<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> AsRef<Vec<T, A>> for Vec<T, A> {
+ fn as_ref(&self) -> &Vec<T, A> {
+ self
+ }
+}
+
+#[stable(feature = "vec_as_mut", since = "1.5.0")]
+impl<T, A: Allocator> AsMut<Vec<T, A>> for Vec<T, A> {
+ fn as_mut(&mut self) -> &mut Vec<T, A> {
+ self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> AsRef<[T]> for Vec<T, A> {
+ fn as_ref(&self) -> &[T] {
+ self
+ }
+}
+
+#[stable(feature = "vec_as_mut", since = "1.5.0")]
+impl<T, A: Allocator> AsMut<[T]> for Vec<T, A> {
+ fn as_mut(&mut self) -> &mut [T] {
+ self
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone> From<&[T]> for Vec<T> {
+ /// Allocate a `Vec<T>` and fill it by cloning `s`'s items.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Vec::from(&[1, 2, 3][..]), vec![1, 2, 3]);
+ /// ```
+ #[cfg(not(test))]
+ fn from(s: &[T]) -> Vec<T> {
+ s.to_vec()
+ }
+ #[cfg(test)]
+ fn from(s: &[T]) -> Vec<T> {
+ crate::slice::to_vec(s, Global)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "vec_from_mut", since = "1.19.0")]
+impl<T: Clone> From<&mut [T]> for Vec<T> {
+ /// Allocate a `Vec<T>` and fill it by cloning `s`'s items.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Vec::from(&mut [1, 2, 3][..]), vec![1, 2, 3]);
+ /// ```
+ #[cfg(not(test))]
+ fn from(s: &mut [T]) -> Vec<T> {
+ s.to_vec()
+ }
+ #[cfg(test)]
+ fn from(s: &mut [T]) -> Vec<T> {
+ crate::slice::to_vec(s, Global)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "vec_from_array", since = "1.44.0")]
+impl<T, const N: usize> From<[T; N]> for Vec<T> {
+ /// Allocate a `Vec<T>` and move `s`'s items into it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Vec::from([1, 2, 3]), vec![1, 2, 3]);
+ /// ```
+ #[cfg(not(test))]
+ fn from(s: [T; N]) -> Vec<T> {
+ <[T]>::into_vec(box s)
+ }
+
+ #[cfg(test)]
+ fn from(s: [T; N]) -> Vec<T> {
+ crate::slice::into_vec(box s)
+ }
+}
+
+#[stable(feature = "vec_from_cow_slice", since = "1.14.0")]
+impl<'a, T> From<Cow<'a, [T]>> for Vec<T>
+where
+ [T]: ToOwned<Owned = Vec<T>>,
+{
+ /// Convert a clone-on-write slice into a vector.
+ ///
+ /// If `s` already owns a `Vec<T>`, it will be returned directly.
+ /// If `s` is borrowing a slice, a new `Vec<T>` will be allocated and
+ /// filled by cloning `s`'s items into it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::borrow::Cow;
+ /// let o: Cow<[i32]> = Cow::Owned(vec![1, 2, 3]);
+ /// let b: Cow<[i32]> = Cow::Borrowed(&[1, 2, 3]);
+ /// assert_eq!(Vec::from(o), Vec::from(b));
+ /// ```
+ fn from(s: Cow<'a, [T]>) -> Vec<T> {
+ s.into_owned()
+ }
+}
+
+// note: test pulls in libstd, which causes errors here
+#[cfg(not(test))]
+#[stable(feature = "vec_from_box", since = "1.18.0")]
+impl<T, A: Allocator> From<Box<[T], A>> for Vec<T, A> {
+ /// Convert a boxed slice into a vector by transferring ownership of
+ /// the existing heap allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let b: Box<[i32]> = vec![1, 2, 3].into_boxed_slice();
+ /// assert_eq!(Vec::from(b), vec![1, 2, 3]);
+ /// ```
+ fn from(s: Box<[T], A>) -> Self {
+ s.into_vec()
+ }
+}
+
+// note: test pulls in libstd, which causes errors here
+#[cfg(not(no_global_oom_handling))]
+#[cfg(not(test))]
+#[stable(feature = "box_from_vec", since = "1.20.0")]
+impl<T, A: Allocator> From<Vec<T, A>> for Box<[T], A> {
+ /// Convert a vector into a boxed slice.
+ ///
+ /// If `v` has excess capacity, its items will be moved into a
+ /// newly-allocated buffer with exactly the right capacity.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Box::from(vec![1, 2, 3]), vec![1, 2, 3].into_boxed_slice());
+ /// ```
+ fn from(v: Vec<T, A>) -> Self {
+ v.into_boxed_slice()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl From<&str> for Vec<u8> {
+ /// Allocate a `Vec<u8>` and fill it with a UTF-8 string.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Vec::from("123"), vec![b'1', b'2', b'3']);
+ /// ```
+ fn from(s: &str) -> Vec<u8> {
+ From::from(s.as_bytes())
+ }
+}
+
+#[stable(feature = "array_try_from_vec", since = "1.48.0")]
+impl<T, A: Allocator, const N: usize> TryFrom<Vec<T, A>> for [T; N] {
+ type Error = Vec<T, A>;
+
+ /// Gets the entire contents of the `Vec<T>` as an array,
+ /// if its size exactly matches that of the requested array.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(vec![1, 2, 3].try_into(), Ok([1, 2, 3]));
+ /// assert_eq!(<Vec<i32>>::new().try_into(), Ok([]));
+ /// ```
+ ///
+ /// If the length doesn't match, the input comes back in `Err`:
+ /// ```
+ /// let r: Result<[i32; 4], _> = (0..10).collect::<Vec<_>>().try_into();
+ /// assert_eq!(r, Err(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]));
+ /// ```
+ ///
+ /// If you're fine with just getting a prefix of the `Vec<T>`,
+ /// you can call [`.truncate(N)`](Vec::truncate) first.
+ /// ```
+ /// let mut v = String::from("hello world").into_bytes();
+ /// v.sort();
+ /// v.truncate(2);
+ /// let [a, b]: [_; 2] = v.try_into().unwrap();
+ /// assert_eq!(a, b' ');
+ /// assert_eq!(b, b'd');
+ /// ```
+ fn try_from(mut vec: Vec<T, A>) -> Result<[T; N], Vec<T, A>> {
+ if vec.len() != N {
+ return Err(vec);
+ }
+
+ // SAFETY: `.set_len(0)` is always sound.
+ unsafe { vec.set_len(0) };
+
+ // SAFETY: A `Vec`'s pointer is always aligned properly, and
+ // the alignment the array needs is the same as the items.
+ // We checked earlier that we have sufficient items.
+ // The items will not double-drop as the `set_len`
+ // tells the `Vec` not to also drop them.
+ let array = unsafe { ptr::read(vec.as_ptr() as *const [T; N]) };
+ Ok(array)
+ }
+}
diff --git a/rust/alloc/vec/partial_eq.rs b/rust/alloc/vec/partial_eq.rs
new file mode 100644
index 000000000000..10ad4e492287
--- /dev/null
+++ b/rust/alloc/vec/partial_eq.rs
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+use crate::alloc::Allocator;
+#[cfg(not(no_global_oom_handling))]
+use crate::borrow::Cow;
+
+use super::Vec;
+
+macro_rules! __impl_slice_eq1 {
+ ([$($vars:tt)*] $lhs:ty, $rhs:ty $(where $ty:ty: $bound:ident)?, #[$stability:meta]) => {
+ #[$stability]
+ impl<T, U, $($vars)*> PartialEq<$rhs> for $lhs
+ where
+ T: PartialEq<U>,
+ $($ty: $bound)?
+ {
+ #[inline]
+ fn eq(&self, other: &$rhs) -> bool { self[..] == other[..] }
+ #[inline]
+ fn ne(&self, other: &$rhs) -> bool { self[..] != other[..] }
+ }
+ }
+}
+
+__impl_slice_eq1! { [A1: Allocator, A2: Allocator] Vec<T, A1>, Vec<U, A2>, #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &[U], #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &mut [U], #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [A: Allocator] &[T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] }
+__impl_slice_eq1! { [A: Allocator] &mut [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] }
+__impl_slice_eq1! { [A: Allocator] Vec<T, A>, [U], #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] }
+__impl_slice_eq1! { [A: Allocator] [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] }
+#[cfg(not(no_global_oom_handling))]
+__impl_slice_eq1! { [A: Allocator] Cow<'_, [T]>, Vec<U, A> where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
+#[cfg(not(no_global_oom_handling))]
+__impl_slice_eq1! { [] Cow<'_, [T]>, &[U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
+#[cfg(not(no_global_oom_handling))]
+__impl_slice_eq1! { [] Cow<'_, [T]>, &mut [U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, [U; N], #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, &[U; N], #[stable(feature = "rust1", since = "1.0.0")] }
+
+// NOTE: some less important impls are omitted to reduce code bloat
+// FIXME(Centril): Reconsider this?
+//__impl_slice_eq1! { [const N: usize] Vec<A>, &mut [B; N], }
+//__impl_slice_eq1! { [const N: usize] [A; N], Vec<B>, }
+//__impl_slice_eq1! { [const N: usize] &[A; N], Vec<B>, }
+//__impl_slice_eq1! { [const N: usize] &mut [A; N], Vec<B>, }
+//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, [B; N], }
+//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &[B; N], }
+//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &mut [B; N], }
diff --git a/rust/alloc/vec/set_len_on_drop.rs b/rust/alloc/vec/set_len_on_drop.rs
new file mode 100644
index 000000000000..448bf5076a0b
--- /dev/null
+++ b/rust/alloc/vec/set_len_on_drop.rs
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+// Set the length of the vec when the `SetLenOnDrop` value goes out of scope.
+//
+// The idea is: The length field in SetLenOnDrop is a local variable
+// that the optimizer will see does not alias with any stores through the Vec's data
+// pointer. This is a workaround for alias analysis issue #32155
+pub(super) struct SetLenOnDrop<'a> {
+ len: &'a mut usize,
+ local_len: usize,
+}
+
+impl<'a> SetLenOnDrop<'a> {
+ #[inline]
+ pub(super) fn new(len: &'a mut usize) -> Self {
+ SetLenOnDrop { local_len: *len, len }
+ }
+
+ #[inline]
+ pub(super) fn increment_len(&mut self, increment: usize) {
+ self.local_len += increment;
+ }
+}
+
+impl Drop for SetLenOnDrop<'_> {
+ #[inline]
+ fn drop(&mut self) {
+ *self.len = self.local_len;
+ }
+}
diff --git a/rust/alloc/vec/spec_extend.rs b/rust/alloc/vec/spec_extend.rs
new file mode 100644
index 000000000000..5ce2d00991bc
--- /dev/null
+++ b/rust/alloc/vec/spec_extend.rs
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+use crate::alloc::Allocator;
+use crate::collections::{TryReserveError, TryReserveErrorKind};
+use core::iter::TrustedLen;
+use core::ptr::{self};
+use core::slice::{self};
+
+use super::{IntoIter, SetLenOnDrop, Vec};
+
+// Specialization trait used for Vec::extend
+#[cfg(not(no_global_oom_handling))]
+pub(super) trait SpecExtend<T, I> {
+ fn spec_extend(&mut self, iter: I);
+}
+
+// Specialization trait used for Vec::try_extend
+pub(super) trait TrySpecExtend<T, I> {
+ fn try_spec_extend(&mut self, iter: I) -> Result<(), TryReserveError>;
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
+where
+ I: Iterator<Item = T>,
+{
+ default fn spec_extend(&mut self, iter: I) {
+ self.extend_desugared(iter)
+ }
+}
+
+impl<T, I, A: Allocator> TrySpecExtend<T, I> for Vec<T, A>
+where
+ I: Iterator<Item = T>,
+{
+ default fn try_spec_extend(&mut self, iter: I) -> Result<(), TryReserveError> {
+ self.try_extend_desugared(iter)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
+where
+ I: TrustedLen<Item = T>,
+{
+ default fn spec_extend(&mut self, iterator: I) {
+ // This is the case for a TrustedLen iterator.
+ let (low, high) = iterator.size_hint();
+ if let Some(additional) = high {
+ debug_assert_eq!(
+ low,
+ additional,
+ "TrustedLen iterator's size hint is not exact: {:?}",
+ (low, high)
+ );
+ self.reserve(additional);
+ unsafe {
+ let mut ptr = self.as_mut_ptr().add(self.len());
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+ iterator.for_each(move |element| {
+ ptr::write(ptr, element);
+ ptr = ptr.offset(1);
+ // Since the loop executes user code which can panic we have to bump the pointer
+ // after each step.
+ // NB can't overflow since we would have had to alloc the address space
+ local_len.increment_len(1);
+ });
+ }
+ } else {
+ // Per TrustedLen contract a `None` upper bound means that the iterator length
+ // truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
+ // Since the other branch already panics eagerly (via `reserve()`) we do the same here.
+ // This avoids additional codegen for a fallback code path which would eventually
+ // panic anyway.
+ panic!("capacity overflow");
+ }
+ }
+}
+
+impl<T, I, A: Allocator> TrySpecExtend<T, I> for Vec<T, A>
+where
+ I: TrustedLen<Item = T>,
+{
+ default fn try_spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> {
+ // This is the case for a TrustedLen iterator.
+ let (low, high) = iterator.size_hint();
+ if let Some(additional) = high {
+ debug_assert_eq!(
+ low,
+ additional,
+ "TrustedLen iterator's size hint is not exact: {:?}",
+ (low, high)
+ );
+ self.try_reserve(additional)?;
+ unsafe {
+ let mut ptr = self.as_mut_ptr().add(self.len());
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+ iterator.for_each(move |element| {
+ ptr::write(ptr, element);
+ ptr = ptr.offset(1);
+ // Since the loop executes user code which can panic we have to bump the pointer
+ // after each step.
+ // NB can't overflow since we would have had to alloc the address space
+ local_len.increment_len(1);
+ });
+ }
+ Ok(())
+ } else {
+ Err(TryReserveErrorKind::CapacityOverflow.into())
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, A: Allocator> SpecExtend<T, IntoIter<T>> for Vec<T, A> {
+ fn spec_extend(&mut self, mut iterator: IntoIter<T>) {
+ unsafe {
+ self.append_elements(iterator.as_slice() as _);
+ }
+ iterator.forget_remaining_elements();
+ }
+}
+
+impl<T, A: Allocator> TrySpecExtend<T, IntoIter<T>> for Vec<T, A> {
+ fn try_spec_extend(&mut self, mut iterator: IntoIter<T>) -> Result<(), TryReserveError> {
+ unsafe {
+ self.try_append_elements(iterator.as_slice() as _)?;
+ }
+ iterator.forget_remaining_elements();
+ Ok(())
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<'a, T: 'a, I, A: Allocator + 'a> SpecExtend<&'a T, I> for Vec<T, A>
+where
+ I: Iterator<Item = &'a T>,
+ T: Clone,
+{
+ default fn spec_extend(&mut self, iterator: I) {
+ self.spec_extend(iterator.cloned())
+ }
+}
+
+impl<'a, T: 'a, I, A: Allocator + 'a> TrySpecExtend<&'a T, I> for Vec<T, A>
+where
+ I: Iterator<Item = &'a T>,
+ T: Clone,
+{
+ default fn try_spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> {
+ self.try_spec_extend(iterator.cloned())
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<'a, T: 'a, A: Allocator + 'a> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
+where
+ T: Copy,
+{
+ fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) {
+ let slice = iterator.as_slice();
+ unsafe { self.append_elements(slice) };
+ }
+}
+
+impl<'a, T: 'a, A: Allocator + 'a> TrySpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
+where
+ T: Copy,
+{
+ fn try_spec_extend(&mut self, iterator: slice::Iter<'a, T>) -> Result<(), TryReserveError> {
+ let slice = iterator.as_slice();
+ unsafe { self.try_append_elements(slice) }
+ }
+}
diff --git a/rust/bindgen_parameters b/rust/bindgen_parameters
new file mode 100644
index 000000000000..be4963bf7203
--- /dev/null
+++ b/rust/bindgen_parameters
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0
+
+--opaque-type xregs_state
+--opaque-type desc_struct
+--opaque-type arch_lbr_state
+--opaque-type local_apic
+
+# Packed type cannot transitively contain a `#[repr(align)]` type.
+--opaque-type x86_msi_data
+--opaque-type x86_msi_addr_lo
+
+# `try` is a reserved keyword since Rust 2018; solved in `bindgen` v0.59.2,
+# commit 2aed6b021680 ("context: Escape the try keyword properly").
+--opaque-type kunit_try_catch
+
+# If SMP is disabled, `arch_spinlock_t` is defined as a ZST which triggers a Rust
+# warning. We don't need to peek into it anyway.
+--opaque-type spinlock
+
+# `seccomp`'s comment gets understood as a doctest
+--no-doc-comments
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
new file mode 100644
index 000000000000..284793085d55
--- /dev/null
+++ b/rust/bindings/bindings_helper.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header that contains the code (mostly headers) for which Rust bindings
+ * will be automatically generated by `bindgen`.
+ *
+ * Sorted alphabetically.
+ */
+
+#include <kunit/test.h>
+#include <linux/amba/bus.h>
+#include <linux/cdev.h>
+#include <linux/clk.h>
+#include <linux/errname.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/fs_parser.h>
+#include <linux/gpio/driver.h>
+#include <linux/hw_random.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/random.h>
+#include <linux/security.h>
+#include <linux/slab.h>
+#include <linux/sysctl.h>
+#include <linux/uaccess.h>
+#include <linux/uio.h>
+#include <uapi/linux/android/binder.h>
+
+/* `bindgen` gets confused at certain things. */
+const gfp_t BINDINGS_GFP_KERNEL = GFP_KERNEL;
+const gfp_t BINDINGS___GFP_ZERO = __GFP_ZERO;
+const __poll_t BINDINGS_EPOLLIN = EPOLLIN;
+const __poll_t BINDINGS_EPOLLOUT = EPOLLOUT;
+const __poll_t BINDINGS_EPOLLERR = EPOLLERR;
+const __poll_t BINDINGS_EPOLLHUP = EPOLLHUP;
+
+const loff_t BINDINGS_MAX_LFS_FILESIZE = MAX_LFS_FILESIZE;
diff --git a/rust/bindings/lib.rs b/rust/bindings/lib.rs
new file mode 100644
index 000000000000..7fdd74e68b60
--- /dev/null
+++ b/rust/bindings/lib.rs
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Bindings.
+//!
+//! Imports the generated bindings by `bindgen`.
+//!
+//! This crate may not be directly used. If you need a kernel C API that is
+//! not ported or wrapped in the `kernel` crate, then do so first instead of
+//! using this crate.
+
+#![no_std]
+#![feature(core_ffi_c)]
+// See <https://github.com/rust-lang/rust-bindgen/issues/1651>.
+#![cfg_attr(test, allow(deref_nullptr))]
+#![cfg_attr(test, allow(unaligned_references))]
+#![cfg_attr(test, allow(unsafe_op_in_unsafe_fn))]
+#![allow(
+ clippy::all,
+ missing_docs,
+ non_camel_case_types,
+ non_upper_case_globals,
+ non_snake_case,
+ improper_ctypes,
+ unreachable_pub,
+ unsafe_op_in_unsafe_fn
+)]
+
+mod bindings_raw {
+ // Use glob import here to expose all helpers.
+ // Symbols defined within the module will take precedence to the glob import.
+ pub use super::bindings_helper::*;
+ include!(concat!(
+ env!("OBJTREE"),
+ "/rust/bindings/bindings_generated.rs"
+ ));
+}
+
+// When both a directly exposed symbol and a helper exists for the same function,
+// the directly exposed symbol is preferred and the helper becomes dead code, so
+// ignore the warning here.
+#[allow(dead_code)]
+mod bindings_helper {
+ // Import the generated bindings for types.
+ use super::bindings_raw::*;
+ include!(concat!(
+ env!("OBJTREE"),
+ "/rust/bindings/bindings_helpers_generated.rs"
+ ));
+}
+
+pub use bindings_raw::*;
+
+pub const GFP_KERNEL: gfp_t = BINDINGS_GFP_KERNEL;
+pub const __GFP_ZERO: gfp_t = BINDINGS___GFP_ZERO;
+pub const __GFP_HIGHMEM: gfp_t = ___GFP_HIGHMEM;
+
+pub const MAX_LFS_FILESIZE: loff_t = BINDINGS_MAX_LFS_FILESIZE;
diff --git a/rust/build_error.rs b/rust/build_error.rs
new file mode 100644
index 000000000000..bbd3a05440b2
--- /dev/null
+++ b/rust/build_error.rs
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Build-time error.
+//!
+//! This crate provides a function `build_error`, which will panic in
+//! compile-time if executed in const context, and will cause a build error
+//! if not executed at compile time and the optimizer does not optimise away the
+//! call.
+//!
+//! It is used by `build_assert!` in the kernel crate, allowing checking of
+//! conditions that could be checked statically, but could not be enforced in
+//! Rust yet (e.g. perform some checks in const functions, but those
+//! functions could still be called in the runtime).
+
+#![no_std]
+
+/// Panics if executed in const context, or triggers a build error if not.
+#[inline(never)]
+#[cold]
+#[export_name = "rust_build_error"]
+#[track_caller]
+pub const fn build_error(msg: &'static str) -> ! {
+ panic!("{}", msg);
+}
+
+#[cfg(CONFIG_RUST_BUILD_ASSERT_WARN)]
+#[link_section = ".gnu.warning.rust_build_error"]
+#[used]
+static BUILD_ERROR_WARNING: [u8; 45] = *b"call to build_error present after compilation";
diff --git a/rust/compiler_builtins.rs b/rust/compiler_builtins.rs
new file mode 100644
index 000000000000..e57011b7c3da
--- /dev/null
+++ b/rust/compiler_builtins.rs
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Our own `compiler_builtins`.
+//!
+//! Rust provides [`compiler_builtins`] as a port of LLVM's [`compiler-rt`].
+//! Since we do not need the vast majority of them, we avoid the dependency
+//! by providing this file.
+//!
+//! At the moment, some builtins are required that should not be. For instance,
+//! [`core`] has 128-bit integers functionality which we should not be compiling
+//! in. We will work with upstream [`core`] to provide feature flags to disable
+//! the parts we do not need. For the moment, we define them to [`panic!`] at
+//! runtime for simplicity to catch mistakes, instead of performing surgery
+//! on `core.o`.
+//!
+//! In any case, all these symbols are weakened to ensure we do not override
+//! those that may be provided by the rest of the kernel.
+//!
+//! [`compiler_builtins`]: https://github.com/rust-lang/compiler-builtins
+//! [`compiler-rt`]: https://compiler-rt.llvm.org/
+
+#![feature(compiler_builtins)]
+#![compiler_builtins]
+#![no_builtins]
+#![no_std]
+
+macro_rules! define_panicking_intrinsics(
+ ($reason: tt, { $($ident: ident, )* }) => {
+ $(
+ #[doc(hidden)]
+ #[no_mangle]
+ pub extern "C" fn $ident() {
+ panic!($reason);
+ }
+ )*
+ }
+);
+
+define_panicking_intrinsics!("`f32` should not be used", {
+ __eqsf2,
+ __gesf2,
+ __lesf2,
+ __nesf2,
+ __unordsf2,
+});
+
+define_panicking_intrinsics!("`f64` should not be used", {
+ __unorddf2,
+});
+
+define_panicking_intrinsics!("`i128` should not be used", {
+ __ashrti3,
+ __muloti4,
+ __multi3,
+});
+
+define_panicking_intrinsics!("`u128` should not be used", {
+ __ashlti3,
+ __lshrti3,
+ __udivmodti4,
+ __udivti3,
+ __umodti3,
+});
+
+#[cfg(target_arch = "arm")]
+define_panicking_intrinsics!("`f32` should not be used", {
+ __aeabi_fcmpeq,
+ __aeabi_fcmpun,
+});
+
+#[cfg(target_arch = "arm")]
+define_panicking_intrinsics!("`f64` should not be used", {
+ __aeabi_dcmpun,
+});
+
+#[cfg(target_arch = "arm")]
+define_panicking_intrinsics!("`u64` division/modulo should not be used", {
+ __aeabi_uldivmod,
+});
diff --git a/rust/exports.c b/rust/exports.c
new file mode 100644
index 000000000000..bb7cc64cecd0
--- /dev/null
+++ b/rust/exports.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A hack to export Rust symbols for loadable modules without having to redo
+ * the entire `include/linux/export.h` logic in Rust.
+ *
+ * This requires the Rust's new/future `v0` mangling scheme because the default
+ * one ("legacy") uses invalid characters for C identifiers (thus we cannot use
+ * the `EXPORT_SYMBOL_*` macros).
+ *
+ * All symbols are exported as GPL-only to guarantee no GPL-only feature is
+ * accidentally exposed.
+ */
+
+#include <linux/module.h>
+
+#define EXPORT_SYMBOL_RUST_GPL(sym) extern int sym; EXPORT_SYMBOL_GPL(sym)
+
+#include "exports_core_generated.h"
+#include "exports_alloc_generated.h"
+#include "exports_bindings_generated.h"
+#include "exports_kernel_generated.h"
diff --git a/rust/helpers.c b/rust/helpers.c
new file mode 100644
index 000000000000..bf790f46c763
--- /dev/null
+++ b/rust/helpers.c
@@ -0,0 +1,679 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Non-trivial C macros cannot be used in Rust. Similarly, inlined C functions
+ * cannot be called either. This file explicitly creates functions ("helpers")
+ * that wrap those so that they can be called from Rust.
+ *
+ * Even though Rust kernel modules should never use directly the bindings, some
+ * of these helpers need to be exported because Rust generics and inlined
+ * functions may not get their code generated in the crate where they are
+ * defined. Other helpers, called from non-inline functions, may not be
+ * exported, in principle. However, in general, the Rust compiler does not
+ * guarantee codegen will be performed for a non-inline function either.
+ * Therefore, this file exports all the helpers. In the future, this may be
+ * revisited to reduce the number of exports after the compiler is informed
+ * about the places codegen is required.
+ *
+ * All symbols are exported as GPL-only to guarantee no GPL-only feature is
+ * accidentally exposed.
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/bug.h>
+#include <linux/build_bug.h>
+#include <linux/clk.h>
+#include <linux/errname.h>
+#include <linux/fs_parser.h>
+#include <linux/gfp.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/sched/signal.h>
+#include <linux/security.h>
+#include <linux/skbuff.h>
+#include <linux/uaccess.h>
+#include <linux/uio.h>
+
+__noreturn void rust_helper_BUG(void)
+{
+ BUG();
+}
+EXPORT_SYMBOL_GPL(rust_helper_BUG);
+
+void rust_helper_clk_disable_unprepare(struct clk *clk)
+{
+ return clk_disable_unprepare(clk);
+}
+EXPORT_SYMBOL_GPL(rust_helper_clk_disable_unprepare);
+
+int rust_helper_clk_prepare_enable(struct clk *clk)
+{
+ return clk_prepare_enable(clk);
+}
+EXPORT_SYMBOL_GPL(rust_helper_clk_prepare_enable);
+
+unsigned long rust_helper_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ return copy_from_user(to, from, n);
+}
+EXPORT_SYMBOL_GPL(rust_helper_copy_from_user);
+
+unsigned long rust_helper_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ return copy_to_user(to, from, n);
+}
+EXPORT_SYMBOL_GPL(rust_helper_copy_to_user);
+
+unsigned long rust_helper_clear_user(void __user *to, unsigned long n)
+{
+ return clear_user(to, n);
+}
+EXPORT_SYMBOL_GPL(rust_helper_clear_user);
+
+void __iomem *rust_helper_ioremap(resource_size_t offset, unsigned long size)
+{
+ return ioremap(offset, size);
+}
+EXPORT_SYMBOL_GPL(rust_helper_ioremap);
+
+u8 rust_helper_readb(const volatile void __iomem *addr)
+{
+ return readb(addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_readb);
+
+u16 rust_helper_readw(const volatile void __iomem *addr)
+{
+ return readw(addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_readw);
+
+u32 rust_helper_readl(const volatile void __iomem *addr)
+{
+ return readl(addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_readl);
+
+#ifdef CONFIG_64BIT
+u64 rust_helper_readq(const volatile void __iomem *addr)
+{
+ return readq(addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_readq);
+#endif
+
+void rust_helper_writeb(u8 value, volatile void __iomem *addr)
+{
+ writeb(value, addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_writeb);
+
+void rust_helper_writew(u16 value, volatile void __iomem *addr)
+{
+ writew(value, addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_writew);
+
+void rust_helper_writel(u32 value, volatile void __iomem *addr)
+{
+ writel(value, addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_writel);
+
+#ifdef CONFIG_64BIT
+void rust_helper_writeq(u64 value, volatile void __iomem *addr)
+{
+ writeq(value, addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_writeq);
+#endif
+
+u8 rust_helper_readb_relaxed(const volatile void __iomem *addr)
+{
+ return readb_relaxed(addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_readb_relaxed);
+
+u16 rust_helper_readw_relaxed(const volatile void __iomem *addr)
+{
+ return readw_relaxed(addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_readw_relaxed);
+
+u32 rust_helper_readl_relaxed(const volatile void __iomem *addr)
+{
+ return readl_relaxed(addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_readl_relaxed);
+
+#ifdef CONFIG_64BIT
+u64 rust_helper_readq_relaxed(const volatile void __iomem *addr)
+{
+ return readq_relaxed(addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_readq_relaxed);
+#endif
+
+void rust_helper_writeb_relaxed(u8 value, volatile void __iomem *addr)
+{
+ writeb_relaxed(value, addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_writeb_relaxed);
+
+void rust_helper_writew_relaxed(u16 value, volatile void __iomem *addr)
+{
+ writew_relaxed(value, addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_writew_relaxed);
+
+void rust_helper_writel_relaxed(u32 value, volatile void __iomem *addr)
+{
+ writel_relaxed(value, addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_writel_relaxed);
+
+#ifdef CONFIG_64BIT
+void rust_helper_writeq_relaxed(u64 value, volatile void __iomem *addr)
+{
+ writeq_relaxed(value, addr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_writeq_relaxed);
+#endif
+
+void rust_helper_memcpy_fromio(void *to, const volatile void __iomem *from, long count)
+{
+ memcpy_fromio(to, from, count);
+}
+EXPORT_SYMBOL_GPL(rust_helper_memcpy_fromio);
+
+void rust_helper___spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ __spin_lock_init(lock, name, key);
+#else
+ spin_lock_init(lock);
+#endif
+}
+EXPORT_SYMBOL_GPL(rust_helper___spin_lock_init);
+
+void rust_helper_spin_lock(spinlock_t *lock)
+{
+ spin_lock(lock);
+}
+EXPORT_SYMBOL_GPL(rust_helper_spin_lock);
+
+void rust_helper_spin_unlock(spinlock_t *lock)
+{
+ spin_unlock(lock);
+}
+EXPORT_SYMBOL_GPL(rust_helper_spin_unlock);
+
+unsigned long rust_helper_spin_lock_irqsave(spinlock_t *lock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+
+ return flags;
+}
+EXPORT_SYMBOL_GPL(rust_helper_spin_lock_irqsave);
+
+void rust_helper_spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
+{
+ spin_unlock_irqrestore(lock, flags);
+}
+EXPORT_SYMBOL_GPL(rust_helper_spin_unlock_irqrestore);
+
+void rust_helper__raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_SPINLOCK
+ _raw_spin_lock_init(lock, name, key);
+#else
+ raw_spin_lock_init(lock);
+#endif
+}
+EXPORT_SYMBOL_GPL(rust_helper__raw_spin_lock_init);
+
+void rust_helper_raw_spin_lock(raw_spinlock_t *lock)
+{
+ raw_spin_lock(lock);
+}
+EXPORT_SYMBOL_GPL(rust_helper_raw_spin_lock);
+
+void rust_helper_raw_spin_unlock(raw_spinlock_t *lock)
+{
+ raw_spin_unlock(lock);
+}
+EXPORT_SYMBOL_GPL(rust_helper_raw_spin_unlock);
+
+unsigned long rust_helper_raw_spin_lock_irqsave(raw_spinlock_t *lock)
+{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(lock, flags);
+
+ return flags;
+}
+EXPORT_SYMBOL_GPL(rust_helper_raw_spin_lock_irqsave);
+
+void rust_helper_raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
+ unsigned long flags)
+{
+ raw_spin_unlock_irqrestore(lock, flags);
+}
+EXPORT_SYMBOL_GPL(rust_helper_raw_spin_unlock_irqrestore);
+
+void rust_helper_init_wait(struct wait_queue_entry *wq_entry)
+{
+ init_wait(wq_entry);
+}
+EXPORT_SYMBOL_GPL(rust_helper_init_wait);
+
+void rust_helper_init_waitqueue_func_entry(struct wait_queue_entry *wq_entry,
+ wait_queue_func_t func)
+{
+ init_waitqueue_func_entry(wq_entry, func);
+}
+EXPORT_SYMBOL_GPL(rust_helper_init_waitqueue_func_entry);
+
+int rust_helper_signal_pending(struct task_struct *t)
+{
+ return signal_pending(t);
+}
+EXPORT_SYMBOL_GPL(rust_helper_signal_pending);
+
+struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order)
+{
+ return alloc_pages(gfp_mask, order);
+}
+EXPORT_SYMBOL_GPL(rust_helper_alloc_pages);
+
+void *rust_helper_kmap(struct page *page)
+{
+ return kmap(page);
+}
+EXPORT_SYMBOL_GPL(rust_helper_kmap);
+
+void rust_helper_kunmap(struct page *page)
+{
+ return kunmap(page);
+}
+EXPORT_SYMBOL_GPL(rust_helper_kunmap);
+
+int rust_helper_cond_resched(void)
+{
+ return cond_resched();
+}
+EXPORT_SYMBOL_GPL(rust_helper_cond_resched);
+
+size_t rust_helper_copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
+{
+ return copy_from_iter(addr, bytes, i);
+}
+EXPORT_SYMBOL_GPL(rust_helper_copy_from_iter);
+
+size_t rust_helper_copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
+{
+ return copy_to_iter(addr, bytes, i);
+}
+EXPORT_SYMBOL_GPL(rust_helper_copy_to_iter);
+
+bool rust_helper_IS_ERR(__force const void *ptr)
+{
+ return IS_ERR(ptr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_IS_ERR);
+
+long rust_helper_PTR_ERR(__force const void *ptr)
+{
+ return PTR_ERR(ptr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_PTR_ERR);
+
+const char *rust_helper_errname(int err)
+{
+ return errname(err);
+}
+EXPORT_SYMBOL_GPL(rust_helper_errname);
+
+void rust_helper_mutex_lock(struct mutex *lock)
+{
+ mutex_lock(lock);
+}
+EXPORT_SYMBOL_GPL(rust_helper_mutex_lock);
+
+void rust_helper_amba_set_drvdata(struct amba_device *dev, void *data)
+{
+ amba_set_drvdata(dev, data);
+}
+EXPORT_SYMBOL_GPL(rust_helper_amba_set_drvdata);
+
+void *rust_helper_amba_get_drvdata(struct amba_device *dev)
+{
+ return amba_get_drvdata(dev);
+}
+EXPORT_SYMBOL_GPL(rust_helper_amba_get_drvdata);
+
+void *
+rust_helper_platform_get_drvdata(const struct platform_device *pdev)
+{
+ return platform_get_drvdata(pdev);
+}
+EXPORT_SYMBOL_GPL(rust_helper_platform_get_drvdata);
+
+void
+rust_helper_platform_set_drvdata(struct platform_device *pdev,
+ void *data)
+{
+ return platform_set_drvdata(pdev, data);
+}
+EXPORT_SYMBOL_GPL(rust_helper_platform_set_drvdata);
+
+refcount_t rust_helper_REFCOUNT_INIT(int n)
+{
+ return (refcount_t)REFCOUNT_INIT(n);
+}
+EXPORT_SYMBOL_GPL(rust_helper_REFCOUNT_INIT);
+
+void rust_helper_refcount_inc(refcount_t *r)
+{
+ refcount_inc(r);
+}
+EXPORT_SYMBOL_GPL(rust_helper_refcount_inc);
+
+bool rust_helper_refcount_dec_and_test(refcount_t *r)
+{
+ return refcount_dec_and_test(r);
+}
+EXPORT_SYMBOL_GPL(rust_helper_refcount_dec_and_test);
+
+void rust_helper_rb_link_node(struct rb_node *node, struct rb_node *parent,
+ struct rb_node **rb_link)
+{
+ rb_link_node(node, parent, rb_link);
+}
+EXPORT_SYMBOL_GPL(rust_helper_rb_link_node);
+
+struct task_struct *rust_helper_get_current(void)
+{
+ return current;
+}
+EXPORT_SYMBOL_GPL(rust_helper_get_current);
+
+void rust_helper_get_task_struct(struct task_struct *t)
+{
+ get_task_struct(t);
+}
+EXPORT_SYMBOL_GPL(rust_helper_get_task_struct);
+
+void rust_helper_put_task_struct(struct task_struct *t)
+{
+ put_task_struct(t);
+}
+EXPORT_SYMBOL_GPL(rust_helper_put_task_struct);
+
+int rust_helper_security_binder_set_context_mgr(const struct cred *mgr)
+{
+ return security_binder_set_context_mgr(mgr);
+}
+EXPORT_SYMBOL_GPL(rust_helper_security_binder_set_context_mgr);
+
+int rust_helper_security_binder_transaction(const struct cred *from,
+ const struct cred *to)
+{
+ return security_binder_transaction(from, to);
+}
+EXPORT_SYMBOL_GPL(rust_helper_security_binder_transaction);
+
+int rust_helper_security_binder_transfer_binder(const struct cred *from,
+ const struct cred *to)
+{
+ return security_binder_transfer_binder(from, to);
+}
+EXPORT_SYMBOL_GPL(rust_helper_security_binder_transfer_binder);
+
+int rust_helper_security_binder_transfer_file(const struct cred *from,
+ const struct cred *to,
+ struct file *file)
+{
+ return security_binder_transfer_file(from, to, file);
+}
+EXPORT_SYMBOL_GPL(rust_helper_security_binder_transfer_file);
+
+struct file *rust_helper_get_file(struct file *f)
+{
+ return get_file(f);
+}
+EXPORT_SYMBOL_GPL(rust_helper_get_file);
+
+void rust_helper_rcu_read_lock(void)
+{
+ rcu_read_lock();
+}
+EXPORT_SYMBOL_GPL(rust_helper_rcu_read_lock);
+
+void rust_helper_rcu_read_unlock(void)
+{
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(rust_helper_rcu_read_unlock);
+
+void rust_helper_synchronize_rcu(void)
+{
+ synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(rust_helper_synchronize_rcu);
+
+void *rust_helper_dev_get_drvdata(struct device *dev)
+{
+ return dev_get_drvdata(dev);
+}
+EXPORT_SYMBOL_GPL(rust_helper_dev_get_drvdata);
+
+const char *rust_helper_dev_name(const struct device *dev)
+{
+ return dev_name(dev);
+}
+EXPORT_SYMBOL_GPL(rust_helper_dev_name);
+
+void rust_helper___seqcount_init(seqcount_t *s, const char *name,
+ struct lock_class_key *key)
+{
+ __seqcount_init(s, name, key);
+}
+EXPORT_SYMBOL_GPL(rust_helper___seqcount_init);
+
+unsigned rust_helper_read_seqcount_begin(seqcount_t *s)
+{
+ return read_seqcount_begin(s);
+}
+EXPORT_SYMBOL_GPL(rust_helper_read_seqcount_begin);
+
+int rust_helper_read_seqcount_retry(seqcount_t *s, unsigned start)
+{
+ return read_seqcount_retry(s, start);
+}
+EXPORT_SYMBOL_GPL(rust_helper_read_seqcount_retry);
+
+void rust_helper_write_seqcount_begin(seqcount_t *s)
+{
+ do_write_seqcount_begin(s);
+}
+EXPORT_SYMBOL_GPL(rust_helper_write_seqcount_begin);
+
+void rust_helper_write_seqcount_end(seqcount_t *s)
+{
+ do_write_seqcount_end(s);
+}
+EXPORT_SYMBOL_GPL(rust_helper_write_seqcount_end);
+
+void rust_helper_irq_set_handler_locked(struct irq_data *data,
+ irq_flow_handler_t handler)
+{
+ irq_set_handler_locked(data, handler);
+}
+EXPORT_SYMBOL_GPL(rust_helper_irq_set_handler_locked);
+
+void *rust_helper_irq_data_get_irq_chip_data(struct irq_data *d)
+{
+ return irq_data_get_irq_chip_data(d);
+}
+EXPORT_SYMBOL_GPL(rust_helper_irq_data_get_irq_chip_data);
+
+struct irq_chip *rust_helper_irq_desc_get_chip(struct irq_desc *desc)
+{
+ return irq_desc_get_chip(desc);
+}
+EXPORT_SYMBOL_GPL(rust_helper_irq_desc_get_chip);
+
+void *rust_helper_irq_desc_get_handler_data(struct irq_desc *desc)
+{
+ return irq_desc_get_handler_data(desc);
+}
+EXPORT_SYMBOL_GPL(rust_helper_irq_desc_get_handler_data);
+
+void rust_helper_chained_irq_enter(struct irq_chip *chip,
+ struct irq_desc *desc)
+{
+ chained_irq_enter(chip, desc);
+}
+EXPORT_SYMBOL_GPL(rust_helper_chained_irq_enter);
+
+void rust_helper_chained_irq_exit(struct irq_chip *chip,
+ struct irq_desc *desc)
+{
+ chained_irq_exit(chip, desc);
+}
+EXPORT_SYMBOL_GPL(rust_helper_chained_irq_exit);
+
+const struct cred *rust_helper_get_cred(const struct cred *cred)
+{
+ return get_cred(cred);
+}
+EXPORT_SYMBOL_GPL(rust_helper_get_cred);
+
+void rust_helper_put_cred(const struct cred *cred)
+{
+ put_cred(cred);
+}
+EXPORT_SYMBOL_GPL(rust_helper_put_cred);
+
+const struct of_device_id *rust_helper_of_match_device(
+ const struct of_device_id *matches, const struct device *dev)
+{
+ return of_match_device(matches, dev);
+}
+EXPORT_SYMBOL_GPL(rust_helper_of_match_device);
+
+void rust_helper_init_completion(struct completion *c)
+{
+ init_completion(c);
+}
+EXPORT_SYMBOL_GPL(rust_helper_init_completion);
+
+struct sk_buff *rust_helper_skb_get(struct sk_buff *skb)
+{
+ return skb_get(skb);
+}
+EXPORT_SYMBOL_GPL(rust_helper_skb_get);
+
+unsigned int rust_helper_skb_headlen(const struct sk_buff *skb)
+{
+ return skb_headlen(skb);
+}
+EXPORT_SYMBOL_GPL(rust_helper_skb_headlen);
+
+void rust_helper_dev_hold(struct net_device *dev)
+{
+ return dev_hold(dev);
+}
+EXPORT_SYMBOL_GPL(rust_helper_dev_hold);
+
+void rust_helper_dev_put(struct net_device *dev)
+{
+ return dev_put(dev);
+}
+EXPORT_SYMBOL_GPL(rust_helper_dev_put);
+
+struct net *rust_helper_get_net(struct net *net)
+{
+ return get_net(net);
+}
+EXPORT_SYMBOL_GPL(rust_helper_get_net);
+
+void rust_helper_put_net(struct net *net)
+{
+ return put_net(net);
+}
+EXPORT_SYMBOL_GPL(rust_helper_put_net);
+
+unsigned int rust_helper_NF_QUEUE_NR(unsigned int n)
+{
+ return NF_QUEUE_NR(n);
+}
+EXPORT_SYMBOL_GPL(rust_helper_NF_QUEUE_NR);
+
+void rust_helper___INIT_WORK_WITH_KEY(struct work_struct *work,
+ work_func_t func, bool on_stack, struct lock_class_key *key)
+{
+ __INIT_WORK_WITH_KEY(work, func, on_stack, key);
+}
+EXPORT_SYMBOL_GPL(rust_helper___INIT_WORK_WITH_KEY);
+
+struct dentry *rust_helper_dget(struct dentry *dentry)
+{
+ return dget(dentry);
+}
+EXPORT_SYMBOL_GPL(rust_helper_dget);
+
+void rust_helper_lockdep_register_key(struct lock_class_key *key)
+{
+ lockdep_register_key(key);
+}
+EXPORT_SYMBOL_GPL(rust_helper_lockdep_register_key);
+
+void rust_helper_lockdep_unregister_key(struct lock_class_key *key)
+{
+ lockdep_unregister_key(key);
+}
+EXPORT_SYMBOL_GPL(rust_helper_lockdep_unregister_key);
+
+int rust_helper_fs_parse(struct fs_context *fc,
+ const struct fs_parameter_spec *desc,
+ struct fs_parameter *param,
+ struct fs_parse_result *result)
+{
+ return fs_parse(fc, desc, param, result);
+}
+EXPORT_SYMBOL_GPL(rust_helper_fs_parse);
+
+/*
+ * We use `bindgen`'s `--size_t-is-usize` option to bind the C `size_t` type
+ * as the Rust `usize` type, so we can use it in contexts where Rust
+ * expects a `usize` like slice (array) indices. `usize` is defined to be
+ * the same as C's `uintptr_t` type (can hold any pointer) but not
+ * necessarily the same as `size_t` (can hold the size of any single
+ * object). Most modern platforms use the same concrete integer type for
+ * both of them, but in case we find ourselves on a platform where
+ * that's not true, fail early instead of risking ABI or
+ * integer-overflow issues.
+ *
+ * If your platform fails this assertion, it means that you are in
+ * danger of integer-overflow bugs (even if you attempt to remove
+ * `--size_t-is-usize`). It may be easiest to change the kernel ABI on
+ * your platform such that `size_t` matches `uintptr_t` (i.e., to increase
+ * `size_t`, because `uintptr_t` has to be at least as big as `size_t`).
+ */
+static_assert(
+ sizeof(size_t) == sizeof(uintptr_t) &&
+ __alignof__(size_t) == __alignof__(uintptr_t),
+ "Rust code expects C `size_t` to match Rust `usize`"
+);
diff --git a/rust/kernel/allocator.rs b/rust/kernel/allocator.rs
new file mode 100644
index 000000000000..397a3dd57a9b
--- /dev/null
+++ b/rust/kernel/allocator.rs
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Allocator support.
+
+use core::alloc::{GlobalAlloc, Layout};
+use core::ptr;
+
+use crate::bindings;
+
+struct KernelAllocator;
+
+unsafe impl GlobalAlloc for KernelAllocator {
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ // `krealloc()` is used instead of `kmalloc()` because the latter is
+ // an inline function and cannot be bound to as a result.
+ unsafe { bindings::krealloc(ptr::null(), layout.size(), bindings::GFP_KERNEL) as *mut u8 }
+ }
+
+ unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
+ unsafe {
+ bindings::kfree(ptr as *const core::ffi::c_void);
+ }
+ }
+}
+
+#[global_allocator]
+static ALLOCATOR: KernelAllocator = KernelAllocator;
+
+// `rustc` only generates these for some crate types. Even then, we would need
+// to extract the object file that has them from the archive. For the moment,
+// let's generate them ourselves instead.
+//
+// Note that `#[no_mangle]` implies exported too, nowadays.
+#[no_mangle]
+fn __rust_alloc(size: usize, _align: usize) -> *mut u8 {
+ unsafe { bindings::krealloc(core::ptr::null(), size, bindings::GFP_KERNEL) as *mut u8 }
+}
+
+#[no_mangle]
+fn __rust_dealloc(ptr: *mut u8, _size: usize, _align: usize) {
+ unsafe { bindings::kfree(ptr as *const core::ffi::c_void) };
+}
+
+#[no_mangle]
+fn __rust_realloc(ptr: *mut u8, _old_size: usize, _align: usize, new_size: usize) -> *mut u8 {
+ unsafe {
+ bindings::krealloc(
+ ptr as *const core::ffi::c_void,
+ new_size,
+ bindings::GFP_KERNEL,
+ ) as *mut u8
+ }
+}
+
+#[no_mangle]
+fn __rust_alloc_zeroed(size: usize, _align: usize) -> *mut u8 {
+ unsafe {
+ bindings::krealloc(
+ core::ptr::null(),
+ size,
+ bindings::GFP_KERNEL | bindings::__GFP_ZERO,
+ ) as *mut u8
+ }
+}
diff --git a/rust/kernel/amba.rs b/rust/kernel/amba.rs
new file mode 100644
index 000000000000..ec8808124a29
--- /dev/null
+++ b/rust/kernel/amba.rs
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Amba devices and drivers.
+//!
+//! C header: [`include/linux/amba/bus.h`](../../../../include/linux/amba/bus.h)
+
+use crate::{
+ bindings, device, driver, error::from_kernel_result, io_mem::Resource, power, str::CStr,
+ to_result, types::PointerWrapper, Result, ThisModule,
+};
+
+/// A registration of an amba driver.
+pub type Registration<T> = driver::Registration<Adapter<T>>;
+
+/// Id of an Amba device.
+#[derive(Clone, Copy)]
+pub struct DeviceId {
+ /// Device id.
+ pub id: u32,
+
+ /// Mask that identifies which bits are valid in the device id.
+ pub mask: u32,
+}
+
+// SAFETY: `ZERO` is all zeroed-out and `to_rawid` stores `offset` in `amba_id::data`.
+unsafe impl const driver::RawDeviceId for DeviceId {
+ type RawType = bindings::amba_id;
+ const ZERO: Self::RawType = bindings::amba_id {
+ id: 0,
+ mask: 0,
+ data: core::ptr::null_mut(),
+ };
+
+ fn to_rawid(&self, offset: isize) -> Self::RawType {
+ bindings::amba_id {
+ id: self.id,
+ mask: self.mask,
+ data: offset as _,
+ }
+ }
+}
+
+/// An amba driver.
+pub trait Driver {
+ /// Data stored on device by driver.
+ type Data: PointerWrapper + Send + Sync + driver::DeviceRemoval = ();
+
+ /// The type that implements the power-management operations.
+ ///
+ /// The default is a type that implements no power-management operations. Drivers that do
+ /// implement them need to specify the type (commonly [`Self`]).
+ type PowerOps: power::Operations<Data = Self::Data> = power::NoOperations<Self::Data>;
+
+ /// The type holding information about each device id supported by the driver.
+ type IdInfo: 'static = ();
+
+ /// The table of device ids supported by the driver.
+ const ID_TABLE: Option<driver::IdTable<'static, DeviceId, Self::IdInfo>> = None;
+
+ /// Probes for the device with the given id.
+ fn probe(dev: &mut Device, id_info: Option<&Self::IdInfo>) -> Result<Self::Data>;
+
+ /// Cleans any resources up that are associated with the device.
+ ///
+ /// This is called when the driver is detached from the device.
+ fn remove(_data: &Self::Data) {}
+}
+
+/// An adapter for the registration of Amba drivers.
+pub struct Adapter<T: Driver>(T);
+
+impl<T: Driver> driver::DriverOps for Adapter<T> {
+ type RegType = bindings::amba_driver;
+
+ unsafe fn register(
+ reg: *mut bindings::amba_driver,
+ name: &'static CStr,
+ module: &'static ThisModule,
+ ) -> Result {
+ // SAFETY: By the safety requirements of this function (defined in the trait definition),
+ // `reg` is non-null and valid.
+ let amba = unsafe { &mut *reg };
+ amba.drv.name = name.as_char_ptr();
+ amba.drv.owner = module.0;
+ amba.probe = Some(probe_callback::<T>);
+ amba.remove = Some(remove_callback::<T>);
+ if let Some(t) = T::ID_TABLE {
+ amba.id_table = t.as_ref();
+ }
+ if cfg!(CONFIG_PM) {
+ // SAFETY: `probe_callback` sets the driver data after calling `T::Data::into_pointer`,
+ // and we guarantee that `T::Data` is the same as `T::PowerOps::Data` by a constraint
+ // in the type declaration.
+ amba.drv.pm = unsafe { power::OpsTable::<T::PowerOps>::build() };
+ }
+ // SAFETY: By the safety requirements of this function, `reg` is valid and fully
+ // initialised.
+ to_result(unsafe { bindings::amba_driver_register(reg) })
+ }
+
+ unsafe fn unregister(reg: *mut bindings::amba_driver) {
+ // SAFETY: By the safety requirements of this function (defined in the trait definition),
+ // `reg` was passed (and updated) by a previous successful call to `amba_driver_register`.
+ unsafe { bindings::amba_driver_unregister(reg) };
+ }
+}
+
+unsafe extern "C" fn probe_callback<T: Driver>(
+ adev: *mut bindings::amba_device,
+ aid: *const bindings::amba_id,
+) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: `adev` is valid by the contract with the C code. `dev` is alive only for the
+ // duration of this call, so it is guaranteed to remain alive for the lifetime of `dev`.
+ let mut dev = unsafe { Device::from_ptr(adev) };
+ // SAFETY: `aid` is valid by the requirements the contract with the C code.
+ let offset = unsafe { (*aid).data };
+ let info = if offset.is_null() {
+ None
+ } else {
+ // SAFETY: The offset comes from a previous call to `offset_from` in `IdArray::new`,
+ // which guarantees that the resulting pointer is within the table.
+ let ptr = unsafe {
+ aid.cast::<u8>()
+ .offset(offset as _)
+ .cast::<Option<T::IdInfo>>()
+ };
+ // SAFETY: The id table has a static lifetime, so `ptr` is guaranteed to be valid for
+ // read.
+ unsafe { (&*ptr).as_ref() }
+ };
+ let data = T::probe(&mut dev, info)?;
+ let ptr = T::Data::into_pointer(data);
+ // SAFETY: `adev` is valid for write by the contract with the C code.
+ unsafe { bindings::amba_set_drvdata(adev, ptr as _) };
+ Ok(0)
+ }
+}
+
+unsafe extern "C" fn remove_callback<T: Driver>(adev: *mut bindings::amba_device) {
+ // SAFETY: `adev` is valid by the contract with the C code.
+ let ptr = unsafe { bindings::amba_get_drvdata(adev) };
+ // SAFETY: The value returned by `amba_get_drvdata` was stored by a previous call to
+ // `amba_set_drvdata` in `probe_callback` above; the value comes from a call to
+ // `T::Data::into_pointer`.
+ let data = unsafe { T::Data::from_pointer(ptr) };
+ T::remove(&data);
+ <T::Data as driver::DeviceRemoval>::device_remove(&data);
+}
+
+/// An Amba device.
+///
+/// # Invariants
+///
+/// The field `ptr` is non-null and valid for the lifetime of the object.
+pub struct Device {
+ ptr: *mut bindings::amba_device,
+ res: Option<Resource>,
+}
+
+impl Device {
+ /// Creates a new device from the given pointer.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must be non-null and valid. It must remain valid for the lifetime of the returned
+ /// instance.
+ unsafe fn from_ptr(ptr: *mut bindings::amba_device) -> Self {
+ // SAFETY: The safety requirements of the function ensure that `ptr` is valid.
+ let dev = unsafe { &mut *ptr };
+ // INVARIANT: The safety requirements of the function ensure the lifetime invariant.
+ Self {
+ ptr,
+ res: Resource::new(dev.res.start, dev.res.end),
+ }
+ }
+
+ /// Returns the io mem resource associated with the device, if there is one.
+ ///
+ /// Ownership of the resource is transferred to the caller, so subsequent calls to this
+ /// function will return [`None`].
+ pub fn take_resource(&mut self) -> Option<Resource> {
+ self.res.take()
+ }
+
+ /// Returns the index-th irq associated with the device, if one exists.
+ pub fn irq(&self, index: usize) -> Option<u32> {
+ // SAFETY: By the type invariants, `self.ptr` is valid for read.
+ let dev = unsafe { &*self.ptr };
+ if index >= dev.irq.len() || dev.irq[index] == 0 {
+ None
+ } else {
+ Some(dev.irq[index])
+ }
+ }
+}
+
+// SAFETY: The device returned by `raw_device` is the raw Amba device.
+unsafe impl device::RawDevice for Device {
+ fn raw_device(&self) -> *mut bindings::device {
+ // SAFETY: By the type invariants, we know that `self.ptr` is non-null and valid.
+ unsafe { &mut (*self.ptr).dev }
+ }
+}
+
+/// Declares a kernel module that exposes a single amba driver.
+///
+/// # Examples
+///
+/// ```ignore
+/// # use kernel::{amba, define_amba_id_table, module_amba_driver};
+/// #
+/// struct MyDriver;
+/// impl amba::Driver for MyDriver {
+/// // [...]
+/// # fn probe(_dev: &mut amba::Device, _id: Option<&Self::IdInfo>) -> Result {
+/// # Ok(())
+/// # }
+/// # define_amba_id_table! {(), [
+/// # ({ id: 0x00041061, mask: 0x000fffff }, None),
+/// # ]}
+/// }
+///
+/// module_amba_driver! {
+/// type: MyDriver,
+/// name: b"module_name",
+/// author: b"Author name",
+/// license: b"GPL",
+/// }
+/// ```
+#[macro_export]
+macro_rules! module_amba_driver {
+ ($($f:tt)*) => {
+ $crate::module_driver!(<T>, $crate::amba::Adapter<T>, { $($f)* });
+ };
+}
+
+/// Defines the id table for amba devices.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::{amba, define_amba_id_table};
+/// #
+/// # struct Sample;
+/// # impl kernel::amba::Driver for Sample {
+/// # fn probe(_dev: &mut amba::Device, _id: Option<&Self::IdInfo>) -> Result {
+/// # Ok(())
+/// # }
+/// define_amba_id_table! {(), [
+/// ({ id: 0x00041061, mask: 0x000fffff }, None),
+/// ]}
+/// # }
+/// ```
+#[macro_export]
+macro_rules! define_amba_id_table {
+ ($data_type:ty, $($t:tt)*) => {
+ type IdInfo = $data_type;
+ $crate::define_id_table!(ID_TABLE, $crate::amba::DeviceId, $data_type, $($t)*);
+ };
+}
diff --git a/rust/kernel/build_assert.rs b/rust/kernel/build_assert.rs
new file mode 100644
index 000000000000..72c533d8058d
--- /dev/null
+++ b/rust/kernel/build_assert.rs
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Build-time assert.
+
+/// Fails the build if the code path calling `build_error!` can possibly be executed.
+///
+/// If the macro is executed in const context, `build_error!` will panic.
+/// If the compiler or optimizer cannot guarantee that `build_error!` can never
+/// be called, a build error will be triggered.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::build_error;
+/// #[inline]
+/// fn foo(a: usize) -> usize {
+/// a.checked_add(1).unwrap_or_else(|| build_error!("overflow"))
+/// }
+///
+/// assert_eq!(foo(usize::MAX - 1), usize::MAX); // OK.
+/// // foo(usize::MAX); // Fails to compile.
+/// ```
+#[macro_export]
+macro_rules! build_error {
+ () => {{
+ $crate::build_error("")
+ }};
+ ($msg:expr) => {{
+ $crate::build_error($msg)
+ }};
+}
+
+/// Asserts that a boolean expression is `true` at compile time.
+///
+/// If the condition is evaluated to `false` in const context, `build_assert!`
+/// will panic. If the compiler or optimizer cannot guarantee the condition will
+/// be evaluated to `true`, a build error will be triggered.
+///
+/// [`static_assert!`] should be preferred to `build_assert!` whenever possible.
+///
+/// # Examples
+///
+/// These examples show that different types of [`assert!`] will trigger errors
+/// at different stage of compilation. It is preferred to err as early as
+/// possible, so [`static_assert!`] should be used whenever possible.
+// TODO: Could be `compile_fail` when supported.
+/// ```ignore
+/// fn foo() {
+/// static_assert!(1 > 1); // Compile-time error
+/// build_assert!(1 > 1); // Build-time error
+/// assert!(1 > 1); // Run-time error
+/// }
+/// ```
+///
+/// When the condition refers to generic parameters or parameters of an inline function,
+/// [`static_assert!`] cannot be used. Use `build_assert!` in this scenario.
+/// ```
+/// fn foo<const N: usize>() {
+/// // `static_assert!(N > 1);` is not allowed
+/// build_assert!(N > 1); // Build-time check
+/// assert!(N > 1); // Run-time check
+/// }
+///
+/// #[inline]
+/// fn bar(n: usize) {
+/// // `static_assert!(n > 1);` is not allowed
+/// build_assert!(n > 1); // Build-time check
+/// assert!(n > 1); // Run-time check
+/// }
+/// ```
+#[macro_export]
+macro_rules! build_assert {
+ ($cond:expr $(,)?) => {{
+ if !$cond {
+ $crate::build_error(concat!("assertion failed: ", stringify!($cond)));
+ }
+ }};
+ ($cond:expr, $msg:expr) => {{
+ if !$cond {
+ $crate::build_error($msg);
+ }
+ }};
+}
diff --git a/rust/kernel/chrdev.rs b/rust/kernel/chrdev.rs
new file mode 100644
index 000000000000..5b1e083c23b9
--- /dev/null
+++ b/rust/kernel/chrdev.rs
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Character devices.
+//!
+//! Also called "char devices", `chrdev`, `cdev`.
+//!
+//! C header: [`include/linux/cdev.h`](../../../../include/linux/cdev.h)
+//!
+//! Reference: <https://www.kernel.org/doc/html/latest/core-api/kernel-api.html#char-devices>
+
+use alloc::boxed::Box;
+use core::convert::TryInto;
+use core::marker::PhantomPinned;
+use core::pin::Pin;
+
+use crate::bindings;
+use crate::error::{code::*, Error, Result};
+use crate::file;
+use crate::str::CStr;
+
+/// Character device.
+///
+/// # Invariants
+///
+/// - [`self.0`] is valid and non-null.
+/// - [`(*self.0).ops`] is valid, non-null and has static lifetime.
+/// - [`(*self.0).owner`] is valid and, if non-null, has module lifetime.
+struct Cdev(*mut bindings::cdev);
+
+impl Cdev {
+ fn alloc(
+ fops: &'static bindings::file_operations,
+ module: &'static crate::ThisModule,
+ ) -> Result<Self> {
+ // SAFETY: FFI call.
+ let cdev = unsafe { bindings::cdev_alloc() };
+ if cdev.is_null() {
+ return Err(ENOMEM);
+ }
+ // SAFETY: `cdev` is valid and non-null since `cdev_alloc()`
+ // returned a valid pointer which was null-checked.
+ unsafe {
+ (*cdev).ops = fops;
+ (*cdev).owner = module.0;
+ }
+ // INVARIANTS:
+ // - [`self.0`] is valid and non-null.
+ // - [`(*self.0).ops`] is valid, non-null and has static lifetime,
+ // because it was coerced from a reference with static lifetime.
+ // - [`(*self.0).owner`] is valid and, if non-null, has module lifetime,
+ // guaranteed by the [`ThisModule`] invariant.
+ Ok(Self(cdev))
+ }
+
+ fn add(&mut self, dev: bindings::dev_t, count: core::ffi::c_uint) -> Result {
+ // SAFETY: According to the type invariants:
+ // - [`self.0`] can be safely passed to [`bindings::cdev_add`].
+ // - [`(*self.0).ops`] will live at least as long as [`self.0`].
+ // - [`(*self.0).owner`] will live at least as long as the
+ // module, which is an implicit requirement.
+ let rc = unsafe { bindings::cdev_add(self.0, dev, count) };
+ if rc != 0 {
+ return Err(Error::from_kernel_errno(rc));
+ }
+ Ok(())
+ }
+}
+
+impl Drop for Cdev {
+ fn drop(&mut self) {
+ // SAFETY: [`self.0`] is valid and non-null by the type invariants.
+ unsafe {
+ bindings::cdev_del(self.0);
+ }
+ }
+}
+
+struct RegistrationInner<const N: usize> {
+ dev: bindings::dev_t,
+ used: usize,
+ cdevs: [Option<Cdev>; N],
+ _pin: PhantomPinned,
+}
+
+/// Character device registration.
+///
+/// May contain up to a fixed number (`N`) of devices. Must be pinned.
+pub struct Registration<const N: usize> {
+ name: &'static CStr,
+ minors_start: u16,
+ this_module: &'static crate::ThisModule,
+ inner: Option<RegistrationInner<N>>,
+}
+
+impl<const N: usize> Registration<{ N }> {
+ /// Creates a [`Registration`] object for a character device.
+ ///
+ /// This does *not* register the device: see [`Self::register()`].
+ ///
+ /// This associated function is intended to be used when you need to avoid
+ /// a memory allocation, e.g. when the [`Registration`] is a member of
+ /// a bigger structure inside your [`crate::Module`] instance. If you
+ /// are going to pin the registration right away, call
+ /// [`Self::new_pinned()`] instead.
+ pub fn new(
+ name: &'static CStr,
+ minors_start: u16,
+ this_module: &'static crate::ThisModule,
+ ) -> Self {
+ Registration {
+ name,
+ minors_start,
+ this_module,
+ inner: None,
+ }
+ }
+
+ /// Creates a pinned [`Registration`] object for a character device.
+ ///
+ /// This does *not* register the device: see [`Self::register()`].
+ pub fn new_pinned(
+ name: &'static CStr,
+ minors_start: u16,
+ this_module: &'static crate::ThisModule,
+ ) -> Result<Pin<Box<Self>>> {
+ Ok(Pin::from(Box::try_new(Self::new(
+ name,
+ minors_start,
+ this_module,
+ ))?))
+ }
+
+ /// Registers a character device.
+ ///
+ /// You may call this once per device type, up to `N` times.
+ pub fn register<T: file::Operations<OpenData = ()>>(self: Pin<&mut Self>) -> Result {
+ // SAFETY: We must ensure that we never move out of `this`.
+ let this = unsafe { self.get_unchecked_mut() };
+ if this.inner.is_none() {
+ let mut dev: bindings::dev_t = 0;
+ // SAFETY: Calling unsafe function. `this.name` has `'static`
+ // lifetime.
+ let res = unsafe {
+ bindings::alloc_chrdev_region(
+ &mut dev,
+ this.minors_start.into(),
+ N.try_into()?,
+ this.name.as_char_ptr(),
+ )
+ };
+ if res != 0 {
+ return Err(Error::from_kernel_errno(res));
+ }
+ const NONE: Option<Cdev> = None;
+ this.inner = Some(RegistrationInner {
+ dev,
+ used: 0,
+ cdevs: [NONE; N],
+ _pin: PhantomPinned,
+ });
+ }
+
+ let mut inner = this.inner.as_mut().unwrap();
+ if inner.used == N {
+ return Err(EINVAL);
+ }
+
+ // SAFETY: The adapter doesn't retrieve any state yet, so it's compatible with any
+ // registration.
+ let fops = unsafe { file::OperationsVtable::<Self, T>::build() };
+ let mut cdev = Cdev::alloc(fops, this.this_module)?;
+ cdev.add(inner.dev + inner.used as bindings::dev_t, 1)?;
+ inner.cdevs[inner.used].replace(cdev);
+ inner.used += 1;
+ Ok(())
+ }
+}
+
+impl<const N: usize> file::OpenAdapter<()> for Registration<{ N }> {
+ unsafe fn convert(_inode: *mut bindings::inode, _file: *mut bindings::file) -> *const () {
+ // TODO: Update the SAFETY comment on the call to `FileOperationsVTable::build` above once
+ // this is updated to retrieve state.
+ &()
+ }
+}
+
+// SAFETY: `Registration` does not expose any of its state across threads
+// (it is fine for multiple threads to have a shared reference to it).
+unsafe impl<const N: usize> Sync for Registration<{ N }> {}
+
+impl<const N: usize> Drop for Registration<{ N }> {
+ fn drop(&mut self) {
+ if let Some(inner) = self.inner.as_mut() {
+ // Replicate kernel C behaviour: drop [`Cdev`]s before calling
+ // [`bindings::unregister_chrdev_region`].
+ for i in 0..inner.used {
+ inner.cdevs[i].take();
+ }
+ // SAFETY: [`self.inner`] is Some, so [`inner.dev`] was previously
+ // created using [`bindings::alloc_chrdev_region`].
+ unsafe {
+ bindings::unregister_chrdev_region(inner.dev, N.try_into().unwrap());
+ }
+ }
+ }
+}
diff --git a/rust/kernel/clk.rs b/rust/kernel/clk.rs
new file mode 100644
index 000000000000..1ec478d96abc
--- /dev/null
+++ b/rust/kernel/clk.rs
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Common clock framework.
+//!
+//! C header: [`include/linux/clk.h`](../../../../include/linux/clk.h)
+
+use crate::{bindings, error::Result, to_result};
+use core::mem::ManuallyDrop;
+
+/// Represents `struct clk *`.
+///
+/// # Invariants
+///
+/// The pointer is valid.
+pub struct Clk(*mut bindings::clk);
+
+impl Clk {
+ /// Creates new clock structure from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must be valid.
+ pub unsafe fn new(clk: *mut bindings::clk) -> Self {
+ Self(clk)
+ }
+
+ /// Returns value of the rate field of `struct clk`.
+ pub fn get_rate(&self) -> usize {
+ // SAFETY: The pointer is valid by the type invariant.
+ unsafe { bindings::clk_get_rate(self.0) as usize }
+ }
+
+ /// Prepares and enables the underlying hardware clock.
+ ///
+ /// This function should not be called in atomic context.
+ pub fn prepare_enable(self) -> Result<EnabledClk> {
+ // SAFETY: The pointer is valid by the type invariant.
+ to_result(unsafe { bindings::clk_prepare_enable(self.0) })?;
+ Ok(EnabledClk(self))
+ }
+}
+
+impl Drop for Clk {
+ fn drop(&mut self) {
+ // SAFETY: The pointer is valid by the type invariant.
+ unsafe { bindings::clk_put(self.0) };
+ }
+}
+
+// SAFETY: `Clk` is not restricted to a single thread so it is safe
+// to move it between threads.
+unsafe impl Send for Clk {}
+
+/// A clock variant that is prepared and enabled.
+pub struct EnabledClk(Clk);
+
+impl EnabledClk {
+ /// Returns value of the rate field of `struct clk`.
+ pub fn get_rate(&self) -> usize {
+ self.0.get_rate()
+ }
+
+ /// Disables and later unprepares the underlying hardware clock prematurely.
+ ///
+ /// This function should not be called in atomic context.
+ pub fn disable_unprepare(self) -> Clk {
+ let mut clk = ManuallyDrop::new(self);
+ // SAFETY: The pointer is valid by the type invariant.
+ unsafe { bindings::clk_disable_unprepare(clk.0 .0) };
+ core::mem::replace(&mut clk.0, Clk(core::ptr::null_mut()))
+ }
+}
+
+impl Drop for EnabledClk {
+ fn drop(&mut self) {
+ // SAFETY: The pointer is valid by the type invariant.
+ unsafe { bindings::clk_disable_unprepare(self.0 .0) };
+ }
+}
diff --git a/rust/kernel/cred.rs b/rust/kernel/cred.rs
new file mode 100644
index 000000000000..beacc71d92ac
--- /dev/null
+++ b/rust/kernel/cred.rs
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Credentials management.
+//!
+//! C header: [`include/linux/cred.h`](../../../../include/linux/cred.h)
+//!
+//! Reference: <https://www.kernel.org/doc/html/latest/security/credentials.html>
+
+use crate::{bindings, AlwaysRefCounted};
+use core::cell::UnsafeCell;
+
+/// Wraps the kernel's `struct cred`.
+///
+/// # Invariants
+///
+/// Instances of this type are always ref-counted, that is, a call to `get_cred` ensures that the
+/// allocation remains valid at least until the matching call to `put_cred`.
+#[repr(transparent)]
+pub struct Credential(pub(crate) UnsafeCell<bindings::cred>);
+
+impl Credential {
+ /// Creates a reference to a [`Credential`] from a valid pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `ptr` is valid and remains valid for the lifetime of the
+ /// returned [`Credential`] reference.
+ pub(crate) unsafe fn from_ptr<'a>(ptr: *const bindings::cred) -> &'a Self {
+ // SAFETY: The safety requirements guarantee the validity of the dereference, while the
+ // `Credential` type being transparent makes the cast ok.
+ unsafe { &*ptr.cast() }
+ }
+}
+
+// SAFETY: The type invariants guarantee that `Credential` is always ref-counted.
+unsafe impl AlwaysRefCounted for Credential {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference means that the refcount is nonzero.
+ unsafe { bindings::get_cred(self.0.get()) };
+ }
+
+ unsafe fn dec_ref(obj: core::ptr::NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is nonzero.
+ unsafe { bindings::put_cred(obj.cast().as_ptr()) };
+ }
+}
diff --git a/rust/kernel/delay.rs b/rust/kernel/delay.rs
new file mode 100644
index 000000000000..1e987fa65941
--- /dev/null
+++ b/rust/kernel/delay.rs
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Delay functions for operations like sleeping.
+//!
+//! C header: [`include/linux/delay.h`](../../../../include/linux/delay.h)
+
+use crate::bindings;
+use core::{cmp::min, time::Duration};
+
+const MILLIS_PER_SEC: u64 = 1_000;
+
+fn coarse_sleep_conversion(duration: Duration) -> core::ffi::c_uint {
+ let milli_as_nanos = Duration::MILLISECOND.subsec_nanos();
+
+ // Rounds the nanosecond component of `duration` up to the nearest millisecond.
+ let nanos_as_millis = duration.subsec_nanos().wrapping_add(milli_as_nanos - 1) / milli_as_nanos;
+
+ // Saturates the second component of `duration` to `c_uint::MAX`.
+ let seconds_as_millis = min(
+ duration.as_secs().saturating_mul(MILLIS_PER_SEC),
+ u64::from(core::ffi::c_uint::MAX),
+ ) as core::ffi::c_uint;
+
+ seconds_as_millis.saturating_add(nanos_as_millis)
+}
+
+/// Sleeps safely even with waitqueue interruptions.
+///
+/// This function forwards the call to the C side `msleep` function. As a result,
+/// `duration` will be rounded up to the nearest millisecond if granularity less
+/// than a millisecond is provided. Any [`Duration`] that exceeds
+/// [`c_uint::MAX`][core::ffi::c_uint::MAX] in milliseconds is saturated.
+///
+/// # Examples
+///
+// Keep these in sync with `test_coarse_sleep_examples`.
+/// ```
+/// # use core::time::Duration;
+/// # use kernel::delay::coarse_sleep;
+/// coarse_sleep(Duration::ZERO); // Equivalent to `msleep(0)`.
+/// coarse_sleep(Duration::from_nanos(1)); // Equivalent to `msleep(1)`.
+///
+/// coarse_sleep(Duration::from_nanos(1_000_000)); // Equivalent to `msleep(1)`.
+/// coarse_sleep(Duration::from_nanos(1_000_001)); // Equivalent to `msleep(2)`.
+/// coarse_sleep(Duration::from_nanos(1_999_999)); // Equivalent to `msleep(2)`.
+///
+/// coarse_sleep(Duration::from_millis(1)); // Equivalent to `msleep(1)`.
+/// coarse_sleep(Duration::from_millis(2)); // Equivalent to `msleep(2)`.
+///
+/// coarse_sleep(Duration::from_secs(1)); // Equivalent to `msleep(1000)`.
+/// coarse_sleep(Duration::new(1, 1)); // Equivalent to `msleep(1001)`.
+/// coarse_sleep(Duration::new(1, 2)); // Equivalent to `msleep(1001)`.
+/// ```
+pub fn coarse_sleep(duration: Duration) {
+ // SAFETY: `msleep` is safe for all values of its argument.
+ unsafe { bindings::msleep(coarse_sleep_conversion(duration)) }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{coarse_sleep_conversion, MILLIS_PER_SEC};
+ use core::time::Duration;
+
+ #[test]
+ fn test_coarse_sleep_examples() {
+ // Keep these in sync with `coarse_sleep`'s `# Examples` section.
+
+ assert_eq!(coarse_sleep_conversion(Duration::ZERO), 0);
+ assert_eq!(coarse_sleep_conversion(Duration::from_nanos(1)), 1);
+
+ assert_eq!(coarse_sleep_conversion(Duration::from_nanos(1_000_000)), 1);
+ assert_eq!(coarse_sleep_conversion(Duration::from_nanos(1_000_001)), 2);
+ assert_eq!(coarse_sleep_conversion(Duration::from_nanos(1_999_999)), 2);
+
+ assert_eq!(coarse_sleep_conversion(Duration::from_millis(1)), 1);
+ assert_eq!(coarse_sleep_conversion(Duration::from_millis(2)), 2);
+
+ assert_eq!(coarse_sleep_conversion(Duration::from_secs(1)), 1000);
+ assert_eq!(coarse_sleep_conversion(Duration::new(1, 1)), 1001);
+ assert_eq!(coarse_sleep_conversion(Duration::new(1, 2)), 1001);
+ }
+
+ #[test]
+ fn test_coarse_sleep_saturation() {
+ assert!(
+ coarse_sleep_conversion(Duration::new(
+ core::ffi::c_uint::MAX as u64 / MILLIS_PER_SEC,
+ 0
+ )) < core::ffi::c_uint::MAX
+ );
+ assert_eq!(
+ coarse_sleep_conversion(Duration::new(
+ core::ffi::c_uint::MAX as u64 / MILLIS_PER_SEC,
+ 999_999_999
+ )),
+ core::ffi::c_uint::MAX
+ );
+
+ assert_eq!(
+ coarse_sleep_conversion(Duration::MAX),
+ core::ffi::c_uint::MAX
+ );
+ }
+}
diff --git a/rust/kernel/device.rs b/rust/kernel/device.rs
new file mode 100644
index 000000000000..c37f555c534d
--- /dev/null
+++ b/rust/kernel/device.rs
@@ -0,0 +1,527 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Generic devices that are part of the kernel's driver model.
+//!
+//! C header: [`include/linux/device.h`](../../../../include/linux/device.h)
+
+#[cfg(CONFIG_COMMON_CLK)]
+use crate::{clk::Clk, error::from_kernel_err_ptr};
+
+use crate::{
+ bindings,
+ revocable::{Revocable, RevocableGuard},
+ str::CStr,
+ sync::{LockClassKey, NeedsLockClass, RevocableMutex, RevocableMutexGuard, UniqueRef},
+ Result,
+};
+use core::{
+ fmt,
+ ops::{Deref, DerefMut},
+ pin::Pin,
+};
+
+#[cfg(CONFIG_PRINTK)]
+use crate::c_str;
+
+/// A raw device.
+///
+/// # Safety
+///
+/// Implementers must ensure that the `*mut device` returned by [`RawDevice::raw_device`] is
+/// related to `self`, that is, actions on it will affect `self`. For example, if one calls
+/// `get_device`, then the refcount on the device represented by `self` will be incremented.
+///
+/// Additionally, implementers must ensure that the device is never renamed. Commit a5462516aa99
+/// ("driver-core: document restrictions on device_rename()") has details on why `device_rename`
+/// should not be used.
+pub unsafe trait RawDevice {
+ /// Returns the raw `struct device` related to `self`.
+ fn raw_device(&self) -> *mut bindings::device;
+
+ /// Returns the name of the device.
+ fn name(&self) -> &CStr {
+ let ptr = self.raw_device();
+
+ // SAFETY: `ptr` is valid because `self` keeps it alive.
+ let name = unsafe { bindings::dev_name(ptr) };
+
+ // SAFETY: The name of the device remains valid while it is alive (because the device is
+ // never renamed, per the safety requirement of this trait). This is guaranteed to be the
+ // case because the reference to `self` outlives the one of the returned `CStr` (enforced
+ // by the compiler because of their lifetimes).
+ unsafe { CStr::from_char_ptr(name) }
+ }
+
+ /// Lookups a clock producer consumed by this device.
+ ///
+ /// Returns a managed reference to the clock producer.
+ #[cfg(CONFIG_COMMON_CLK)]
+ fn clk_get(&self, id: Option<&CStr>) -> Result<Clk> {
+ let id_ptr = match id {
+ Some(cstr) => cstr.as_char_ptr(),
+ None => core::ptr::null(),
+ };
+
+ // SAFETY: `id_ptr` is optional and may be either a valid pointer
+ // from the type invariant or NULL otherwise.
+ let clk_ptr = unsafe { from_kernel_err_ptr(bindings::clk_get(self.raw_device(), id_ptr)) }?;
+
+ // SAFETY: Clock is initialized with valid pointer returned from `bindings::clk_get` call.
+ unsafe { Ok(Clk::new(clk_ptr)) }
+ }
+
+ /// Prints an emergency-level message (level 0) prefixed with device information.
+ ///
+ /// More details are available from [`dev_emerg`].
+ fn pr_emerg(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_EMERG, args) };
+ }
+
+ /// Prints an alert-level message (level 1) prefixed with device information.
+ ///
+ /// More details are available from [`dev_alert`].
+ fn pr_alert(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_ALERT, args) };
+ }
+
+ /// Prints a critical-level message (level 2) prefixed with device information.
+ ///
+ /// More details are available from [`dev_crit`].
+ fn pr_crit(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_CRIT, args) };
+ }
+
+ /// Prints an error-level message (level 3) prefixed with device information.
+ ///
+ /// More details are available from [`dev_err`].
+ fn pr_err(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_ERR, args) };
+ }
+
+ /// Prints a warning-level message (level 4) prefixed with device information.
+ ///
+ /// More details are available from [`dev_warn`].
+ fn pr_warn(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_WARNING, args) };
+ }
+
+ /// Prints a notice-level message (level 5) prefixed with device information.
+ ///
+ /// More details are available from [`dev_notice`].
+ fn pr_notice(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_NOTICE, args) };
+ }
+
+ /// Prints an info-level message (level 6) prefixed with device information.
+ ///
+ /// More details are available from [`dev_info`].
+ fn pr_info(&self, args: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_INFO, args) };
+ }
+
+ /// Prints a debug-level message (level 7) prefixed with device information.
+ ///
+ /// More details are available from [`dev_dbg`].
+ fn pr_dbg(&self, args: fmt::Arguments<'_>) {
+ if cfg!(debug_assertions) {
+ // SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
+ unsafe { self.printk(bindings::KERN_DEBUG, args) };
+ }
+ }
+
+ /// Prints the provided message to the console.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `klevel` is null-terminated; in particular, one of the
+ /// `KERN_*`constants, for example, `KERN_CRIT`, `KERN_ALERT`, etc.
+ #[cfg_attr(not(CONFIG_PRINTK), allow(unused_variables))]
+ unsafe fn printk(&self, klevel: &[u8], msg: fmt::Arguments<'_>) {
+ // SAFETY: `klevel` is null-terminated and one of the kernel constants. `self.raw_device`
+ // is valid because `self` is valid. The "%pA" format string expects a pointer to
+ // `fmt::Arguments`, which is what we're passing as the last argument.
+ #[cfg(CONFIG_PRINTK)]
+ unsafe {
+ bindings::_dev_printk(
+ klevel as *const _ as *const core::ffi::c_char,
+ self.raw_device(),
+ c_str!("%pA").as_char_ptr(),
+ &msg as *const _ as *const core::ffi::c_void,
+ )
+ };
+ }
+}
+
+/// A ref-counted device.
+///
+/// # Invariants
+///
+/// `ptr` is valid, non-null, and has a non-zero reference count. One of the references is owned by
+/// `self`, and will be decremented when `self` is dropped.
+pub struct Device {
+ pub(crate) ptr: *mut bindings::device,
+}
+
+// SAFETY: `Device` only holds a pointer to a C device, which is safe to be used from any thread.
+unsafe impl Send for Device {}
+
+// SAFETY: `Device` only holds a pointer to a C device, references to which are safe to be used
+// from any thread.
+unsafe impl Sync for Device {}
+
+impl Device {
+ /// Creates a new device instance.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `ptr` is valid, non-null, and has a non-zero reference count.
+ pub unsafe fn new(ptr: *mut bindings::device) -> Self {
+ // SAFETY: By the safety requirements, ptr is valid and its refcounted will be incremented.
+ unsafe { bindings::get_device(ptr) };
+ // INVARIANT: The safety requirements satisfy all but one invariant, which is that `self`
+ // owns a reference. This is satisfied by the call to `get_device` above.
+ Self { ptr }
+ }
+
+ /// Creates a new device instance from an existing [`RawDevice`] instance.
+ pub fn from_dev(dev: &dyn RawDevice) -> Self {
+ // SAFETY: The requirements are satisfied by the existence of `RawDevice` and its safety
+ // requirements.
+ unsafe { Self::new(dev.raw_device()) }
+ }
+}
+
+// SAFETY: The device returned by `raw_device` is the one for which we hold a reference.
+unsafe impl RawDevice for Device {
+ fn raw_device(&self) -> *mut bindings::device {
+ self.ptr
+ }
+}
+
+impl Drop for Device {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariants, we know that `self` owns a reference, so it is safe to
+ // relinquish it now.
+ unsafe { bindings::put_device(self.ptr) };
+ }
+}
+
+/// Device data.
+///
+/// When a device is removed (for whatever reason, for example, because the device was unplugged or
+/// because the user decided to unbind the driver), the driver is given a chance to clean its state
+/// up, and all io resources should ideally not be used anymore.
+///
+/// However, the device data is reference-counted because other subsystems hold pointers to it. So
+/// some device state must be freed and not used anymore, while others must remain accessible.
+///
+/// This struct separates the device data into three categories:
+/// 1. Registrations: are destroyed when the device is removed, but before the io resources
+/// become inaccessible.
+/// 2. Io resources: are available until the device is removed.
+/// 3. General data: remain available as long as the ref count is nonzero.
+///
+/// This struct implements the `DeviceRemoval` trait so that it can clean resources up even if not
+/// explicitly called by the device drivers.
+pub struct Data<T, U, V> {
+ registrations: RevocableMutex<T>,
+ resources: Revocable<U>,
+ general: V,
+}
+
+/// Safely creates an new reference-counted instance of [`Data`].
+#[doc(hidden)]
+#[macro_export]
+macro_rules! new_device_data {
+ ($reg:expr, $res:expr, $gen:expr, $name:literal) => {{
+ static CLASS1: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new();
+ static CLASS2: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new();
+ let regs = $reg;
+ let res = $res;
+ let gen = $gen;
+ let name = $crate::c_str!($name);
+ $crate::device::Data::try_new(regs, res, gen, name, &CLASS1, &CLASS2)
+ }};
+}
+
+impl<T, U, V> Data<T, U, V> {
+ /// Creates a new instance of `Data`.
+ ///
+ /// It is recommended that the [`new_device_data`] macro be used as it automatically creates
+ /// the lock classes.
+ pub fn try_new(
+ registrations: T,
+ resources: U,
+ general: V,
+ name: &'static CStr,
+ key1: &'static LockClassKey,
+ key2: &'static LockClassKey,
+ ) -> Result<Pin<UniqueRef<Self>>> {
+ let mut ret = Pin::from(UniqueRef::try_new(Self {
+ // SAFETY: We call `RevocableMutex::init` below.
+ registrations: unsafe { RevocableMutex::new(registrations) },
+ resources: Revocable::new(resources),
+ general,
+ })?);
+
+ // SAFETY: `Data::registrations` is pinned when `Data` is.
+ let pinned = unsafe { ret.as_mut().map_unchecked_mut(|d| &mut d.registrations) };
+ pinned.init(name, key1, key2);
+ Ok(ret)
+ }
+
+ /// Returns the resources if they're still available.
+ pub fn resources(&self) -> Option<RevocableGuard<'_, U>> {
+ self.resources.try_access()
+ }
+
+ /// Returns the locked registrations if they're still available.
+ pub fn registrations(&self) -> Option<RevocableMutexGuard<'_, T>> {
+ self.registrations.try_write()
+ }
+}
+
+impl<T, U, V> crate::driver::DeviceRemoval for Data<T, U, V> {
+ fn device_remove(&self) {
+ // We revoke the registrations first so that resources are still available to them during
+ // unregistration.
+ self.registrations.revoke();
+
+ // Release resources now. General data remains available.
+ self.resources.revoke();
+ }
+}
+
+impl<T, U, V> Deref for Data<T, U, V> {
+ type Target = V;
+
+ fn deref(&self) -> &V {
+ &self.general
+ }
+}
+
+impl<T, U, V> DerefMut for Data<T, U, V> {
+ fn deref_mut(&mut self) -> &mut V {
+ &mut self.general
+ }
+}
+
+#[doc(hidden)]
+#[macro_export]
+macro_rules! dev_printk {
+ ($method:ident, $dev:expr, $($f:tt)*) => {
+ {
+ // We have an explicity `use` statement here so that callers of this macro are not
+ // required to explicitly use the `RawDevice` trait to use its functions.
+ use $crate::device::RawDevice;
+ ($dev).$method(core::format_args!($($f)*));
+ }
+ }
+}
+
+/// Prints an emergency-level message (level 0) prefixed with device information.
+///
+/// This level should be used if the system is unusable.
+///
+/// Equivalent to the kernel's `dev_emerg` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and [`alloc::format!`].
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_emerg!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_emerg {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_emerg, $($f)*); }
+}
+
+/// Prints an alert-level message (level 1) prefixed with device information.
+///
+/// This level should be used if action must be taken immediately.
+///
+/// Equivalent to the kernel's `dev_alert` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and [`alloc::format!`].
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_alert!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_alert {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_alert, $($f)*); }
+}
+
+/// Prints a critical-level message (level 2) prefixed with device information.
+///
+/// This level should be used in critical conditions.
+///
+/// Equivalent to the kernel's `dev_crit` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and [`alloc::format!`].
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_crit!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_crit {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_crit, $($f)*); }
+}
+
+/// Prints an error-level message (level 3) prefixed with device information.
+///
+/// This level should be used in error conditions.
+///
+/// Equivalent to the kernel's `dev_err` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and [`alloc::format!`].
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_err!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_err {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_err, $($f)*); }
+}
+
+/// Prints a warning-level message (level 4) prefixed with device information.
+///
+/// This level should be used in warning conditions.
+///
+/// Equivalent to the kernel's `dev_warn` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and [`alloc::format!`].
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_warn!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_warn {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_warn, $($f)*); }
+}
+
+/// Prints a notice-level message (level 5) prefixed with device information.
+///
+/// This level should be used in normal but significant conditions.
+///
+/// Equivalent to the kernel's `dev_notice` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and [`alloc::format!`].
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_notice!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_notice {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_notice, $($f)*); }
+}
+
+/// Prints an info-level message (level 6) prefixed with device information.
+///
+/// This level should be used for informational messages.
+///
+/// Equivalent to the kernel's `dev_info` macro.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and [`alloc::format!`].
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_info!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_info {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_info, $($f)*); }
+}
+
+/// Prints a debug-level message (level 7) prefixed with device information.
+///
+/// This level should be used for debug messages.
+///
+/// Equivalent to the kernel's `dev_dbg` macro, except that it doesn't support dynamic debug yet.
+///
+/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
+/// [`core::fmt`] and [`alloc::format!`].
+///
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::device::Device;
+///
+/// fn example(dev: &Device) {
+/// dev_dbg!(dev, "hello {}\n", "there");
+/// }
+/// ```
+#[macro_export]
+macro_rules! dev_dbg {
+ ($($f:tt)*) => { $crate::dev_printk!(pr_dbg, $($f)*); }
+}
diff --git a/rust/kernel/driver.rs b/rust/kernel/driver.rs
new file mode 100644
index 000000000000..82b39231e311
--- /dev/null
+++ b/rust/kernel/driver.rs
@@ -0,0 +1,442 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Generic support for drivers of different buses (e.g., PCI, Platform, Amba, etc.).
+//!
+//! Each bus/subsystem is expected to implement [`DriverOps`], which allows drivers to register
+//! using the [`Registration`] class.
+
+use crate::{error::code::*, str::CStr, sync::Ref, Result, ThisModule};
+use alloc::boxed::Box;
+use core::{cell::UnsafeCell, marker::PhantomData, ops::Deref, pin::Pin};
+
+/// A subsystem (e.g., PCI, Platform, Amba, etc.) that allows drivers to be written for it.
+pub trait DriverOps {
+ /// The type that holds information about the registration. This is typically a struct defined
+ /// by the C portion of the kernel.
+ type RegType: Default;
+
+ /// Registers a driver.
+ ///
+ /// # Safety
+ ///
+ /// `reg` must point to valid, initialised, and writable memory. It may be modified by this
+ /// function to hold registration state.
+ ///
+ /// On success, `reg` must remain pinned and valid until the matching call to
+ /// [`DriverOps::unregister`].
+ unsafe fn register(
+ reg: *mut Self::RegType,
+ name: &'static CStr,
+ module: &'static ThisModule,
+ ) -> Result;
+
+ /// Unregisters a driver previously registered with [`DriverOps::register`].
+ ///
+ /// # Safety
+ ///
+ /// `reg` must point to valid writable memory, initialised by a previous successful call to
+ /// [`DriverOps::register`].
+ unsafe fn unregister(reg: *mut Self::RegType);
+}
+
+/// The registration of a driver.
+pub struct Registration<T: DriverOps> {
+ is_registered: bool,
+ concrete_reg: UnsafeCell<T::RegType>,
+}
+
+// SAFETY: `Registration` has no fields or methods accessible via `&Registration`, so it is safe to
+// share references to it with multiple threads as nothing can be done.
+unsafe impl<T: DriverOps> Sync for Registration<T> {}
+
+impl<T: DriverOps> Registration<T> {
+ /// Creates a new instance of the registration object.
+ pub fn new() -> Self {
+ Self {
+ is_registered: false,
+ concrete_reg: UnsafeCell::new(T::RegType::default()),
+ }
+ }
+
+ /// Allocates a pinned registration object and registers it.
+ ///
+ /// Returns a pinned heap-allocated representation of the registration.
+ pub fn new_pinned(name: &'static CStr, module: &'static ThisModule) -> Result<Pin<Box<Self>>> {
+ let mut reg = Pin::from(Box::try_new(Self::new())?);
+ reg.as_mut().register(name, module)?;
+ Ok(reg)
+ }
+
+ /// Registers a driver with its subsystem.
+ ///
+ /// It must be pinned because the memory block that represents the registration is potentially
+ /// self-referential.
+ pub fn register(
+ self: Pin<&mut Self>,
+ name: &'static CStr,
+ module: &'static ThisModule,
+ ) -> Result {
+ // SAFETY: We never move out of `this`.
+ let this = unsafe { self.get_unchecked_mut() };
+ if this.is_registered {
+ // Already registered.
+ return Err(EINVAL);
+ }
+
+ // SAFETY: `concrete_reg` was initialised via its default constructor. It is only freed
+ // after `Self::drop` is called, which first calls `T::unregister`.
+ unsafe { T::register(this.concrete_reg.get(), name, module) }?;
+
+ this.is_registered = true;
+ Ok(())
+ }
+}
+
+impl<T: DriverOps> Default for Registration<T> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<T: DriverOps> Drop for Registration<T> {
+ fn drop(&mut self) {
+ if self.is_registered {
+ // SAFETY: This path only runs if a previous call to `T::register` completed
+ // successfully.
+ unsafe { T::unregister(self.concrete_reg.get()) };
+ }
+ }
+}
+
+/// Conversion from a device id to a raw device id.
+///
+/// This is meant to be implemented by buses/subsystems so that they can use [`IdTable`] to
+/// guarantee (at compile-time) zero-termination of device id tables provided by drivers.
+///
+/// # Safety
+///
+/// Implementers must ensure that:
+/// - [`RawDeviceId::ZERO`] is actually a zeroed-out version of the raw device id.
+/// - [`RawDeviceId::to_rawid`] stores `offset` in the context/data field of the raw device id so
+/// that buses can recover the pointer to the data.
+pub unsafe trait RawDeviceId {
+ /// The raw type that holds the device id.
+ ///
+ /// Id tables created from [`Self`] are going to hold this type in its zero-terminated array.
+ type RawType: Copy;
+
+ /// A zeroed-out representation of the raw device id.
+ ///
+ /// Id tables created from [`Self`] use [`Self::ZERO`] as the sentinel to indicate the end of
+ /// the table.
+ const ZERO: Self::RawType;
+
+ /// Converts an id into a raw id.
+ ///
+ /// `offset` is the offset from the memory location where the raw device id is stored to the
+ /// location where its associated context information is stored. Implementations must store
+ /// this in the appropriate context/data field of the raw type.
+ fn to_rawid(&self, offset: isize) -> Self::RawType;
+}
+
+/// A zero-terminated device id array, followed by context data.
+#[repr(C)]
+pub struct IdArray<T: RawDeviceId, U, const N: usize> {
+ ids: [T::RawType; N],
+ sentinel: T::RawType,
+ id_infos: [Option<U>; N],
+}
+
+impl<T: RawDeviceId, U, const N: usize> IdArray<T, U, N> {
+ /// Creates a new instance of the array.
+ ///
+ /// The contents are derived from the given identifiers and context information.
+ pub const fn new(ids: [T; N], infos: [Option<U>; N]) -> Self
+ where
+ T: ~const RawDeviceId + Copy,
+ {
+ let mut array = Self {
+ ids: [T::ZERO; N],
+ sentinel: T::ZERO,
+ id_infos: infos,
+ };
+ let mut i = 0usize;
+ while i < N {
+ // SAFETY: Both pointers are within `array` (or one byte beyond), consequently they are
+ // derived from the same allocated object. We are using a `u8` pointer, whose size 1,
+ // so the pointers are necessarily 1-byte aligned.
+ let offset = unsafe {
+ (&array.id_infos[i] as *const _ as *const u8)
+ .offset_from(&array.ids[i] as *const _ as _)
+ };
+ array.ids[i] = ids[i].to_rawid(offset);
+ i += 1;
+ }
+ array
+ }
+
+ /// Returns an `IdTable` backed by `self`.
+ ///
+ /// This is used to essentially erase the array size.
+ pub const fn as_table(&self) -> IdTable<'_, T, U> {
+ IdTable {
+ first: &self.ids[0],
+ _p: PhantomData,
+ }
+ }
+}
+
+/// A device id table.
+///
+/// The table is guaranteed to be zero-terminated and to be followed by an array of context data of
+/// type `Option<U>`.
+pub struct IdTable<'a, T: RawDeviceId, U> {
+ first: &'a T::RawType,
+ _p: PhantomData<&'a U>,
+}
+
+impl<T: RawDeviceId, U> const AsRef<T::RawType> for IdTable<'_, T, U> {
+ fn as_ref(&self) -> &T::RawType {
+ self.first
+ }
+}
+
+/// Counts the number of parenthesis-delimited, comma-separated items.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::count_paren_items;
+///
+/// assert_eq!(0, count_paren_items!());
+/// assert_eq!(1, count_paren_items!((A)));
+/// assert_eq!(1, count_paren_items!((A),));
+/// assert_eq!(2, count_paren_items!((A), (B)));
+/// assert_eq!(2, count_paren_items!((A), (B),));
+/// assert_eq!(3, count_paren_items!((A), (B), (C)));
+/// assert_eq!(3, count_paren_items!((A), (B), (C),));
+/// ```
+#[macro_export]
+macro_rules! count_paren_items {
+ (($($item:tt)*), $($remaining:tt)*) => { 1 + $crate::count_paren_items!($($remaining)*) };
+ (($($item:tt)*)) => { 1 };
+ () => { 0 };
+}
+
+/// Converts a comma-separated list of pairs into an array with the first element. That is, it
+/// discards the second element of the pair.
+///
+/// Additionally, it automatically introduces a type if the first element is warpped in curly
+/// braces, for example, if it's `{v: 10}`, it becomes `X { v: 10 }`; this is to avoid repeating
+/// the type.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::first_item;
+///
+/// #[derive(PartialEq, Debug)]
+/// struct X {
+/// v: u32,
+/// }
+///
+/// assert_eq!([] as [X; 0], first_item!(X, ));
+/// assert_eq!([X { v: 10 }], first_item!(X, ({ v: 10 }, Y)));
+/// assert_eq!([X { v: 10 }], first_item!(X, ({ v: 10 }, Y),));
+/// assert_eq!([X { v: 10 }], first_item!(X, (X { v: 10 }, Y)));
+/// assert_eq!([X { v: 10 }], first_item!(X, (X { v: 10 }, Y),));
+/// assert_eq!([X { v: 10 }, X { v: 20 }], first_item!(X, ({ v: 10 }, Y), ({ v: 20 }, Y)));
+/// assert_eq!([X { v: 10 }, X { v: 20 }], first_item!(X, ({ v: 10 }, Y), ({ v: 20 }, Y),));
+/// assert_eq!([X { v: 10 }, X { v: 20 }], first_item!(X, (X { v: 10 }, Y), (X { v: 20 }, Y)));
+/// assert_eq!([X { v: 10 }, X { v: 20 }], first_item!(X, (X { v: 10 }, Y), (X { v: 20 }, Y),));
+/// assert_eq!([X { v: 10 }, X { v: 20 }, X { v: 30 }],
+/// first_item!(X, ({ v: 10 }, Y), ({ v: 20 }, Y), ({v: 30}, Y)));
+/// assert_eq!([X { v: 10 }, X { v: 20 }, X { v: 30 }],
+/// first_item!(X, ({ v: 10 }, Y), ({ v: 20 }, Y), ({v: 30}, Y),));
+/// assert_eq!([X { v: 10 }, X { v: 20 }, X { v: 30 }],
+/// first_item!(X, (X { v: 10 }, Y), (X { v: 20 }, Y), (X {v: 30}, Y)));
+/// assert_eq!([X { v: 10 }, X { v: 20 }, X { v: 30 }],
+/// first_item!(X, (X { v: 10 }, Y), (X { v: 20 }, Y), (X {v: 30}, Y),));
+/// ```
+#[macro_export]
+macro_rules! first_item {
+ ($id_type:ty, $(({$($first:tt)*}, $second:expr)),* $(,)?) => {
+ {
+ type IdType = $id_type;
+ [$(IdType{$($first)*},)*]
+ }
+ };
+ ($id_type:ty, $(($first:expr, $second:expr)),* $(,)?) => { [$($first,)*] };
+}
+
+/// Converts a comma-separated list of pairs into an array with the second element. That is, it
+/// discards the first element of the pair.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::second_item;
+///
+/// assert_eq!([] as [u32; 0], second_item!());
+/// assert_eq!([10u32], second_item!((X, 10u32)));
+/// assert_eq!([10u32], second_item!((X, 10u32),));
+/// assert_eq!([10u32], second_item!(({ X }, 10u32)));
+/// assert_eq!([10u32], second_item!(({ X }, 10u32),));
+/// assert_eq!([10u32, 20], second_item!((X, 10u32), (X, 20)));
+/// assert_eq!([10u32, 20], second_item!((X, 10u32), (X, 20),));
+/// assert_eq!([10u32, 20], second_item!(({ X }, 10u32), ({ X }, 20)));
+/// assert_eq!([10u32, 20], second_item!(({ X }, 10u32), ({ X }, 20),));
+/// assert_eq!([10u32, 20, 30], second_item!((X, 10u32), (X, 20), (X, 30)));
+/// assert_eq!([10u32, 20, 30], second_item!((X, 10u32), (X, 20), (X, 30),));
+/// assert_eq!([10u32, 20, 30], second_item!(({ X }, 10u32), ({ X }, 20), ({ X }, 30)));
+/// assert_eq!([10u32, 20, 30], second_item!(({ X }, 10u32), ({ X }, 20), ({ X }, 30),));
+/// ```
+#[macro_export]
+macro_rules! second_item {
+ ($(({$($first:tt)*}, $second:expr)),* $(,)?) => { [$($second,)*] };
+ ($(($first:expr, $second:expr)),* $(,)?) => { [$($second,)*] };
+}
+
+/// Defines a new constant [`IdArray`] with a concise syntax.
+///
+/// It is meant to be used by buses and subsystems to create a similar macro with their device id
+/// type already specified, i.e., with fewer parameters to the end user.
+///
+/// # Examples
+///
+// TODO: Exported but not usable by kernel modules (requires `const_trait_impl`).
+/// ```ignore
+/// #![feature(const_trait_impl)]
+/// # use kernel::{define_id_array, driver::RawDeviceId};
+///
+/// #[derive(Copy, Clone)]
+/// struct Id(u32);
+///
+/// // SAFETY: `ZERO` is all zeroes and `to_rawid` stores `offset` as the second element of the raw
+/// // device id pair.
+/// unsafe impl const RawDeviceId for Id {
+/// type RawType = (u64, isize);
+/// const ZERO: Self::RawType = (0, 0);
+/// fn to_rawid(&self, offset: isize) -> Self::RawType {
+/// (self.0 as u64 + 1, offset)
+/// }
+/// }
+///
+/// define_id_array!(A1, Id, (), []);
+/// define_id_array!(A2, Id, &'static [u8], [(Id(10), None)]);
+/// define_id_array!(A3, Id, &'static [u8], [(Id(10), Some(b"id1")), ]);
+/// define_id_array!(A4, Id, &'static [u8], [(Id(10), Some(b"id1")), (Id(20), Some(b"id2"))]);
+/// define_id_array!(A5, Id, &'static [u8], [(Id(10), Some(b"id1")), (Id(20), Some(b"id2")), ]);
+/// define_id_array!(A6, Id, &'static [u8], [(Id(10), None), (Id(20), Some(b"id2")), ]);
+/// define_id_array!(A7, Id, &'static [u8], [(Id(10), Some(b"id1")), (Id(20), None), ]);
+/// define_id_array!(A8, Id, &'static [u8], [(Id(10), None), (Id(20), None), ]);
+/// ```
+#[macro_export]
+macro_rules! define_id_array {
+ ($table_name:ident, $id_type:ty, $data_type:ty, [ $($t:tt)* ]) => {
+ const $table_name:
+ $crate::driver::IdArray<$id_type, $data_type, { $crate::count_paren_items!($($t)*) }> =
+ $crate::driver::IdArray::new(
+ $crate::first_item!($id_type, $($t)*), $crate::second_item!($($t)*));
+ };
+}
+
+/// Defines a new constant [`IdTable`] with a concise syntax.
+///
+/// It is meant to be used by buses and subsystems to create a similar macro with their device id
+/// type already specified, i.e., with fewer parameters to the end user.
+///
+/// # Examples
+///
+// TODO: Exported but not usable by kernel modules (requires `const_trait_impl`).
+/// ```ignore
+/// #![feature(const_trait_impl)]
+/// # use kernel::{define_id_table, driver::RawDeviceId};
+///
+/// #[derive(Copy, Clone)]
+/// struct Id(u32);
+///
+/// // SAFETY: `ZERO` is all zeroes and `to_rawid` stores `offset` as the second element of the raw
+/// // device id pair.
+/// unsafe impl const RawDeviceId for Id {
+/// type RawType = (u64, isize);
+/// const ZERO: Self::RawType = (0, 0);
+/// fn to_rawid(&self, offset: isize) -> Self::RawType {
+/// (self.0 as u64 + 1, offset)
+/// }
+/// }
+///
+/// define_id_table!(T1, Id, &'static [u8], [(Id(10), None)]);
+/// define_id_table!(T2, Id, &'static [u8], [(Id(10), Some(b"id1")), ]);
+/// define_id_table!(T3, Id, &'static [u8], [(Id(10), Some(b"id1")), (Id(20), Some(b"id2"))]);
+/// define_id_table!(T4, Id, &'static [u8], [(Id(10), Some(b"id1")), (Id(20), Some(b"id2")), ]);
+/// define_id_table!(T5, Id, &'static [u8], [(Id(10), None), (Id(20), Some(b"id2")), ]);
+/// define_id_table!(T6, Id, &'static [u8], [(Id(10), Some(b"id1")), (Id(20), None), ]);
+/// define_id_table!(T7, Id, &'static [u8], [(Id(10), None), (Id(20), None), ]);
+/// ```
+#[macro_export]
+macro_rules! define_id_table {
+ ($table_name:ident, $id_type:ty, $data_type:ty, [ $($t:tt)* ]) => {
+ const $table_name: Option<$crate::driver::IdTable<'static, $id_type, $data_type>> = {
+ $crate::define_id_array!(ARRAY, $id_type, $data_type, [ $($t)* ]);
+ Some(ARRAY.as_table())
+ };
+ };
+}
+
+/// Custom code within device removal.
+pub trait DeviceRemoval {
+ /// Cleans resources up when the device is removed.
+ ///
+ /// This is called when a device is removed and offers implementers the chance to run some code
+ /// that cleans state up.
+ fn device_remove(&self);
+}
+
+impl DeviceRemoval for () {
+ fn device_remove(&self) {}
+}
+
+impl<T: DeviceRemoval> DeviceRemoval for Ref<T> {
+ fn device_remove(&self) {
+ self.deref().device_remove();
+ }
+}
+
+impl<T: DeviceRemoval> DeviceRemoval for Box<T> {
+ fn device_remove(&self) {
+ self.deref().device_remove();
+ }
+}
+
+/// A kernel module that only registers the given driver on init.
+///
+/// This is a helper struct to make it easier to define single-functionality modules, in this case,
+/// modules that offer a single driver.
+pub struct Module<T: DriverOps> {
+ _driver: Pin<Box<Registration<T>>>,
+}
+
+impl<T: DriverOps> crate::Module for Module<T> {
+ fn init(name: &'static CStr, module: &'static ThisModule) -> Result<Self> {
+ Ok(Self {
+ _driver: Registration::new_pinned(name, module)?,
+ })
+ }
+}
+
+/// Declares a kernel module that exposes a single driver.
+///
+/// It is meant to be used as a helper by other subsystems so they can more easily expose their own
+/// macros.
+#[macro_export]
+macro_rules! module_driver {
+ (<$gen_type:ident>, $driver_ops:ty, { type: $type:ty, $($f:tt)* }) => {
+ type Ops<$gen_type> = $driver_ops;
+ type ModuleType = $crate::driver::Module<Ops<$type>>;
+ $crate::prelude::module! {
+ type: ModuleType,
+ $($f)*
+ }
+ }
+}
diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs
new file mode 100644
index 000000000000..f968aa91ddf2
--- /dev/null
+++ b/rust/kernel/error.rs
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Kernel errors.
+//!
+//! C header: [`include/uapi/asm-generic/errno-base.h`](../../../include/uapi/asm-generic/errno-base.h)
+
+use crate::bindings;
+use crate::str::CStr;
+use alloc::{
+ alloc::{AllocError, LayoutError},
+ collections::TryReserveError,
+};
+use core::convert::From;
+use core::fmt;
+use core::num::TryFromIntError;
+use core::str::{self, Utf8Error};
+
+/// Contains the C-compatible error codes.
+pub mod code {
+ macro_rules! declare_err {
+ ($err:tt $(,)? $($doc:expr),+) => {
+ $(
+ #[doc = $doc]
+ )*
+ pub const $err: super::Error = super::Error(-(crate::bindings::$err as i32));
+ };
+ }
+
+ declare_err!(EPERM, "Operation not permitted.");
+
+ declare_err!(ENOENT, "No such file or directory.");
+
+ declare_err!(ESRCH, "No such process.");
+
+ declare_err!(EINTR, "Interrupted system call.");
+
+ declare_err!(EIO, "I/O error.");
+
+ declare_err!(ENXIO, "No such device or address.");
+
+ declare_err!(E2BIG, "Argument list too long.");
+
+ declare_err!(ENOEXEC, "Exec format error.");
+
+ declare_err!(EBADF, "Bad file number.");
+
+ declare_err!(ECHILD, "Exec format error.");
+
+ declare_err!(EAGAIN, "Try again.");
+
+ declare_err!(ENOMEM, "Out of memory.");
+
+ declare_err!(EACCES, "Permission denied.");
+
+ declare_err!(EFAULT, "Bad address.");
+
+ declare_err!(ENOTBLK, "Block device required.");
+
+ declare_err!(EBUSY, "Device or resource busy.");
+
+ declare_err!(EEXIST, "File exists.");
+
+ declare_err!(EXDEV, "Cross-device link.");
+
+ declare_err!(ENODEV, "No such device.");
+
+ declare_err!(ENOTDIR, "Not a directory.");
+
+ declare_err!(EISDIR, "Is a directory.");
+
+ declare_err!(EINVAL, "Invalid argument.");
+
+ declare_err!(ENFILE, "File table overflow.");
+
+ declare_err!(EMFILE, "Too many open files.");
+
+ declare_err!(ENOTTY, "Not a typewriter.");
+
+ declare_err!(ETXTBSY, "Text file busy.");
+
+ declare_err!(EFBIG, "File too large.");
+
+ declare_err!(ENOSPC, "No space left on device.");
+
+ declare_err!(ESPIPE, "Illegal seek.");
+
+ declare_err!(EROFS, "Read-only file system.");
+
+ declare_err!(EMLINK, "Too many links.");
+
+ declare_err!(EPIPE, "Broken pipe.");
+
+ declare_err!(EDOM, "Math argument out of domain of func.");
+
+ declare_err!(ERANGE, "Math result not representable.");
+
+ declare_err!(EDEADLK, "Resource deadlock would occur");
+
+ declare_err!(ENAMETOOLONG, "File name too long");
+
+ declare_err!(ENOLCK, "No record locks available");
+
+ declare_err!(
+ ENOSYS,
+ "Invalid system call number.",
+ "",
+ "This error code is special: arch syscall entry code will return",
+ "[`ENOSYS`] if users try to call a syscall that doesn't exist.",
+ "To keep failures of syscalls that really do exist distinguishable from",
+ "failures due to attempts to use a nonexistent syscall, syscall",
+ "implementations should refrain from returning [`ENOSYS`]."
+ );
+
+ declare_err!(ENOTEMPTY, "Directory not empty.");
+
+ declare_err!(ELOOP, "Too many symbolic links encountered.");
+
+ declare_err!(EWOULDBLOCK, "Operation would block.");
+
+ declare_err!(ENOMSG, "No message of desired type.");
+
+ declare_err!(EIDRM, "Identifier removed.");
+
+ declare_err!(ECHRNG, "Channel number out of range.");
+
+ declare_err!(EL2NSYNC, "Level 2 not synchronized.");
+
+ declare_err!(EL3HLT, "Level 3 halted.");
+
+ declare_err!(EL3RST, "Level 3 reset.");
+
+ declare_err!(ELNRNG, "Link number out of range.");
+
+ declare_err!(EUNATCH, "Protocol driver not attached.");
+
+ declare_err!(ENOCSI, "No CSI structure available.");
+
+ declare_err!(EL2HLT, "Level 2 halted.");
+
+ declare_err!(EBADE, "Invalid exchange.");
+
+ declare_err!(EBADR, "Invalid request descriptor.");
+
+ declare_err!(EXFULL, "Exchange full.");
+
+ declare_err!(ENOANO, "No anode.");
+
+ declare_err!(EBADRQC, "Invalid request code.");
+
+ declare_err!(EBADSLT, "Invalid slot.");
+
+ declare_err!(EDEADLOCK, "Resource deadlock would occur.");
+
+ declare_err!(EBFONT, "Bad font file format.");
+
+ declare_err!(ENOSTR, "Device not a stream.");
+
+ declare_err!(ENODATA, "No data available.");
+
+ declare_err!(ETIME, "Timer expired.");
+
+ declare_err!(ENOSR, "Out of streams resources.");
+
+ declare_err!(ENONET, "Machine is not on the network.");
+
+ declare_err!(ENOPKG, "Package not installed.");
+
+ declare_err!(EREMOTE, "Object is remote.");
+
+ declare_err!(ENOLINK, "Link has been severed.");
+
+ declare_err!(EADV, "Advertise error.");
+
+ declare_err!(ESRMNT, "Srmount error.");
+
+ declare_err!(ECOMM, "Communication error on send.");
+
+ declare_err!(EPROTO, "Protocol error.");
+
+ declare_err!(EMULTIHOP, "Multihop attempted.");
+
+ declare_err!(EDOTDOT, "RFS specific error.");
+
+ declare_err!(EBADMSG, "Not a data message.");
+
+ declare_err!(EOVERFLOW, "Value too large for defined data type.");
+
+ declare_err!(ENOTUNIQ, "Name not unique on network.");
+
+ declare_err!(EBADFD, "File descriptor in bad state.");
+
+ declare_err!(EREMCHG, "Remote address changed.");
+
+ declare_err!(ELIBACC, "Can not access a needed shared library.");
+
+ declare_err!(ELIBBAD, "Accessing a corrupted shared library.");
+
+ declare_err!(ELIBSCN, ".lib section in a.out corrupted.");
+
+ declare_err!(ELIBMAX, "Attempting to link in too many shared libraries.");
+
+ declare_err!(ELIBEXEC, "Cannot exec a shared library directly.");
+
+ declare_err!(EILSEQ, "Illegal byte sequence.");
+
+ declare_err!(ERESTART, "Interrupted system call should be restarted.");
+
+ declare_err!(ESTRPIPE, "Streams pipe error.");
+
+ declare_err!(EUSERS, "Too many users.");
+
+ declare_err!(ENOTSOCK, "Socket operation on non-socket.");
+
+ declare_err!(EDESTADDRREQ, "Destination address required.");
+
+ declare_err!(EMSGSIZE, "Message too long.");
+
+ declare_err!(EPROTOTYPE, "Protocol wrong type for socket.");
+
+ declare_err!(ENOPROTOOPT, "Protocol not available.");
+
+ declare_err!(EPROTONOSUPPORT, "Protocol not supported.");
+
+ declare_err!(ESOCKTNOSUPPORT, "Socket type not supported.");
+
+ declare_err!(EOPNOTSUPP, "Operation not supported on transport endpoint.");
+
+ declare_err!(EPFNOSUPPORT, "Protocol family not supported.");
+
+ declare_err!(EAFNOSUPPORT, "Address family not supported by protocol.");
+
+ declare_err!(EADDRINUSE, "Address already in use.");
+
+ declare_err!(EADDRNOTAVAIL, "Cannot assign requested address.");
+
+ declare_err!(ENETDOWN, "Network is down.");
+
+ declare_err!(ENETUNREACH, "Network is unreachable.");
+
+ declare_err!(ENETRESET, "Network dropped connection because of reset.");
+
+ declare_err!(ECONNABORTED, "Software caused connection abort.");
+
+ declare_err!(ECONNRESET, "Connection reset by peer.");
+
+ declare_err!(ENOBUFS, "No buffer space available.");
+
+ declare_err!(EISCONN, "Transport endpoint is already connected.");
+
+ declare_err!(ENOTCONN, "Transport endpoint is not connected.");
+
+ declare_err!(ESHUTDOWN, "Cannot send after transport endpoint shutdown.");
+
+ declare_err!(ETOOMANYREFS, "Too many references: cannot splice.");
+
+ declare_err!(ETIMEDOUT, "Connection timed out.");
+
+ declare_err!(ECONNREFUSED, "Connection refused.");
+
+ declare_err!(EHOSTDOWN, "Host is down.");
+
+ declare_err!(EHOSTUNREACH, "No route to host.");
+
+ declare_err!(EALREADY, "Operation already in progress.");
+
+ declare_err!(EINPROGRESS, "Operation now in progress.");
+
+ declare_err!(ESTALE, "Stale file handle.");
+
+ declare_err!(EUCLEAN, "Structure needs cleaning.");
+
+ declare_err!(ENOTNAM, "Not a XENIX named type file.");
+
+ declare_err!(ENAVAIL, "No XENIX semaphores available.");
+
+ declare_err!(EISNAM, "Is a named type file.");
+
+ declare_err!(EREMOTEIO, "Remote I/O error.");
+
+ declare_err!(EDQUOT, "Quota exceeded.");
+
+ declare_err!(ENOMEDIUM, "No medium found.");
+
+ declare_err!(EMEDIUMTYPE, "Wrong medium type.");
+
+ declare_err!(ECANCELED, "Operation Canceled.");
+
+ declare_err!(ENOKEY, "Required key not available.");
+
+ declare_err!(EKEYEXPIRED, "Key has expired.");
+
+ declare_err!(EKEYREVOKED, "Key has been revoked.");
+
+ declare_err!(EKEYREJECTED, "Key was rejected by service.");
+
+ declare_err!(EOWNERDEAD, "Owner died.", "", "For robust mutexes.");
+
+ declare_err!(ENOTRECOVERABLE, "State not recoverable.");
+
+ declare_err!(ERFKILL, "Operation not possible due to RF-kill.");
+
+ declare_err!(EHWPOISON, "Memory page has hardware error.");
+
+ declare_err!(ERESTARTSYS, "Restart the system call.");
+
+ declare_err!(ENOTSUPP, "Operation is not supported.");
+
+ declare_err!(ENOPARAM, "Parameter not supported.");
+}
+
+/// Generic integer kernel error.
+///
+/// The kernel defines a set of integer generic error codes based on C and
+/// POSIX ones. These codes may have a more specific meaning in some contexts.
+///
+/// # Invariants
+///
+/// The value is a valid `errno` (i.e. `>= -MAX_ERRNO && < 0`).
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub struct Error(core::ffi::c_int);
+
+impl Error {
+ /// Creates an [`Error`] from a kernel error code.
+ ///
+ /// It is a bug to pass an out-of-range `errno`. `EINVAL` would
+ /// be returned in such a case.
+ pub(crate) fn from_kernel_errno(errno: core::ffi::c_int) -> Error {
+ if errno < -(bindings::MAX_ERRNO as i32) || errno >= 0 {
+ // TODO: Make it a `WARN_ONCE` once available.
+ crate::pr_warn!(
+ "attempted to create `Error` with out of range `errno`: {}",
+ errno
+ );
+ return code::EINVAL;
+ }
+
+ // INVARIANT: The check above ensures the type invariant
+ // will hold.
+ Error(errno)
+ }
+
+ /// Creates an [`Error`] from a kernel error code.
+ ///
+ /// # Safety
+ ///
+ /// `errno` must be within error code range (i.e. `>= -MAX_ERRNO && < 0`).
+ pub(crate) unsafe fn from_kernel_errno_unchecked(errno: core::ffi::c_int) -> Error {
+ // INVARIANT: The contract ensures the type invariant
+ // will hold.
+ Error(errno)
+ }
+
+ /// Returns the kernel error code.
+ pub fn to_kernel_errno(self) -> core::ffi::c_int {
+ self.0
+ }
+
+ /// Returns a string representing the error, if one exists.
+ #[cfg(not(testlib))]
+ pub fn name(&self) -> Option<&'static CStr> {
+ // SAFETY: Just an FFI call, there are no extra safety requirements.
+ let ptr = unsafe { bindings::errname(-self.0) };
+ if ptr.is_null() {
+ None
+ } else {
+ // SAFETY: The string returned by `errname` is static and `NUL`-terminated.
+ Some(unsafe { CStr::from_char_ptr(ptr) })
+ }
+ }
+
+ /// Returns a string representing the error, if one exists.
+ ///
+ /// When `testlib` is configured, this always returns `None` to avoid the dependency on a
+ /// kernel function so that tests that use this (e.g., by calling [`Result::unwrap`]) can still
+ /// run in userspace.
+ #[cfg(testlib)]
+ pub fn name(&self) -> Option<&'static CStr> {
+ None
+ }
+}
+
+impl fmt::Debug for Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.name() {
+ // Print out number if no name can be found.
+ None => f.debug_tuple("Error").field(&-self.0).finish(),
+ // SAFETY: These strings are ASCII-only.
+ Some(name) => f
+ .debug_tuple(unsafe { str::from_utf8_unchecked(name) })
+ .finish(),
+ }
+ }
+}
+
+impl From<TryFromIntError> for Error {
+ fn from(_: TryFromIntError) -> Error {
+ code::EINVAL
+ }
+}
+
+impl From<Utf8Error> for Error {
+ fn from(_: Utf8Error) -> Error {
+ code::EINVAL
+ }
+}
+
+impl From<TryReserveError> for Error {
+ fn from(_: TryReserveError) -> Error {
+ code::ENOMEM
+ }
+}
+
+impl From<LayoutError> for Error {
+ fn from(_: LayoutError) -> Error {
+ code::ENOMEM
+ }
+}
+
+impl From<core::fmt::Error> for Error {
+ fn from(_: core::fmt::Error) -> Error {
+ code::EINVAL
+ }
+}
+
+impl From<core::convert::Infallible> for Error {
+ fn from(e: core::convert::Infallible) -> Error {
+ match e {}
+ }
+}
+
+/// A [`Result`] with an [`Error`] error type.
+///
+/// To be used as the return type for functions that may fail.
+///
+/// # Error codes in C and Rust
+///
+/// In C, it is common that functions indicate success or failure through
+/// their return value; modifying or returning extra data through non-`const`
+/// pointer parameters. In particular, in the kernel, functions that may fail
+/// typically return an `int` that represents a generic error code. We model
+/// those as [`Error`].
+///
+/// In Rust, it is idiomatic to model functions that may fail as returning
+/// a [`Result`]. Since in the kernel many functions return an error code,
+/// [`Result`] is a type alias for a [`core::result::Result`] that uses
+/// [`Error`] as its error type.
+///
+/// Note that even if a function does not return anything when it succeeds,
+/// it should still be modeled as returning a `Result` rather than
+/// just an [`Error`].
+pub type Result<T = ()> = core::result::Result<T, Error>;
+
+impl From<AllocError> for Error {
+ fn from(_: AllocError) -> Error {
+ code::ENOMEM
+ }
+}
+
+// # Invariant: `-bindings::MAX_ERRNO` fits in an `i16`.
+crate::static_assert!(bindings::MAX_ERRNO <= -(i16::MIN as i32) as u32);
+
+pub(crate) fn from_kernel_result_helper<T>(r: Result<T>) -> T
+where
+ T: From<i16>,
+{
+ match r {
+ Ok(v) => v,
+ // NO-OVERFLOW: negative `errno`s are no smaller than `-bindings::MAX_ERRNO`,
+ // `-bindings::MAX_ERRNO` fits in an `i16` as per invariant above,
+ // therefore a negative `errno` always fits in an `i16` and will not overflow.
+ Err(e) => T::from(e.to_kernel_errno() as i16),
+ }
+}
+
+/// Transforms a [`crate::error::Result<T>`] to a kernel C integer result.
+///
+/// This is useful when calling Rust functions that return [`crate::error::Result<T>`]
+/// from inside `extern "C"` functions that need to return an integer
+/// error result.
+///
+/// `T` should be convertible to an `i16` via `From<i16>`.
+///
+/// # Examples
+///
+/// ```ignore
+/// # use kernel::from_kernel_result;
+/// # use kernel::bindings;
+/// unsafe extern "C" fn probe_callback(
+/// pdev: *mut bindings::platform_device,
+/// ) -> core::ffi::c_int {
+/// from_kernel_result! {
+/// let ptr = devm_alloc(pdev)?;
+/// bindings::platform_set_drvdata(pdev, ptr);
+/// Ok(0)
+/// }
+/// }
+/// ```
+macro_rules! from_kernel_result {
+ ($($tt:tt)*) => {{
+ $crate::error::from_kernel_result_helper((|| {
+ $($tt)*
+ })())
+ }};
+}
+
+pub(crate) use from_kernel_result;
+
+/// Transform a kernel "error pointer" to a normal pointer.
+///
+/// Some kernel C API functions return an "error pointer" which optionally
+/// embeds an `errno`. Callers are supposed to check the returned pointer
+/// for errors. This function performs the check and converts the "error pointer"
+/// to a normal pointer in an idiomatic fashion.
+///
+/// # Examples
+///
+/// ```ignore
+/// # use kernel::from_kernel_err_ptr;
+/// # use kernel::bindings;
+/// fn devm_platform_ioremap_resource(
+/// pdev: &mut PlatformDevice,
+/// index: u32,
+/// ) -> Result<*mut core::ffi::c_void> {
+/// // SAFETY: FFI call.
+/// unsafe {
+/// from_kernel_err_ptr(bindings::devm_platform_ioremap_resource(
+/// pdev.to_ptr(),
+/// index,
+/// ))
+/// }
+/// }
+/// ```
+// TODO: Remove `dead_code` marker once an in-kernel client is available.
+#[allow(dead_code)]
+pub(crate) fn from_kernel_err_ptr<T>(ptr: *mut T) -> Result<*mut T> {
+ // CAST: Casting a pointer to `*const core::ffi::c_void` is always valid.
+ let const_ptr: *const core::ffi::c_void = ptr.cast();
+ // SAFETY: The FFI function does not deref the pointer.
+ if unsafe { bindings::IS_ERR(const_ptr) } {
+ // SAFETY: The FFI function does not deref the pointer.
+ let err = unsafe { bindings::PTR_ERR(const_ptr) };
+ // CAST: If `IS_ERR()` returns `true`,
+ // then `PTR_ERR()` is guaranteed to return a
+ // negative value greater-or-equal to `-bindings::MAX_ERRNO`,
+ // which always fits in an `i16`, as per the invariant above.
+ // And an `i16` always fits in an `i32`. So casting `err` to
+ // an `i32` can never overflow, and is always valid.
+ //
+ // SAFETY: `IS_ERR()` ensures `err` is a
+ // negative value greater-or-equal to `-bindings::MAX_ERRNO`.
+ return Err(unsafe { Error::from_kernel_errno_unchecked(err as i32) });
+ }
+ Ok(ptr)
+}
+
+/// Converts an integer as returned by a C kernel function to an error if it's negative, and
+/// `Ok(())` otherwise.
+pub fn to_result(err: core::ffi::c_int) -> Result {
+ if err < 0 {
+ Err(Error::from_kernel_errno(err))
+ } else {
+ Ok(())
+ }
+}
diff --git a/rust/kernel/file.rs b/rust/kernel/file.rs
new file mode 100644
index 000000000000..62538e6b3eea
--- /dev/null
+++ b/rust/kernel/file.rs
@@ -0,0 +1,887 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Files and file descriptors.
+//!
+//! C headers: [`include/linux/fs.h`](../../../../include/linux/fs.h) and
+//! [`include/linux/file.h`](../../../../include/linux/file.h)
+
+use crate::{
+ bindings,
+ cred::Credential,
+ error::{code::*, from_kernel_result, Error, Result},
+ io_buffer::{IoBufferReader, IoBufferWriter},
+ iov_iter::IovIter,
+ mm,
+ sync::CondVar,
+ types::PointerWrapper,
+ user_ptr::{UserSlicePtr, UserSlicePtrReader, UserSlicePtrWriter},
+ ARef, AlwaysRefCounted,
+};
+use core::convert::{TryFrom, TryInto};
+use core::{cell::UnsafeCell, marker, mem, ptr};
+use macros::vtable;
+
+/// Flags associated with a [`File`].
+pub mod flags {
+ /// File is opened in append mode.
+ pub const O_APPEND: u32 = bindings::O_APPEND;
+
+ /// Signal-driven I/O is enabled.
+ pub const O_ASYNC: u32 = bindings::FASYNC;
+
+ /// Close-on-exec flag is set.
+ pub const O_CLOEXEC: u32 = bindings::O_CLOEXEC;
+
+ /// File was created if it didn't already exist.
+ pub const O_CREAT: u32 = bindings::O_CREAT;
+
+ /// Direct I/O is enabled for this file.
+ pub const O_DIRECT: u32 = bindings::O_DIRECT;
+
+ /// File must be a directory.
+ pub const O_DIRECTORY: u32 = bindings::O_DIRECTORY;
+
+ /// Like [`O_SYNC`] except metadata is not synced.
+ pub const O_DSYNC: u32 = bindings::O_DSYNC;
+
+ /// Ensure that this file is created with the `open(2)` call.
+ pub const O_EXCL: u32 = bindings::O_EXCL;
+
+ /// Large file size enabled (`off64_t` over `off_t`).
+ pub const O_LARGEFILE: u32 = bindings::O_LARGEFILE;
+
+ /// Do not update the file last access time.
+ pub const O_NOATIME: u32 = bindings::O_NOATIME;
+
+ /// File should not be used as process's controlling terminal.
+ pub const O_NOCTTY: u32 = bindings::O_NOCTTY;
+
+ /// If basename of path is a symbolic link, fail open.
+ pub const O_NOFOLLOW: u32 = bindings::O_NOFOLLOW;
+
+ /// File is using nonblocking I/O.
+ pub const O_NONBLOCK: u32 = bindings::O_NONBLOCK;
+
+ /// Also known as `O_NDELAY`.
+ ///
+ /// This is effectively the same flag as [`O_NONBLOCK`] on all architectures
+ /// except SPARC64.
+ pub const O_NDELAY: u32 = bindings::O_NDELAY;
+
+ /// Used to obtain a path file descriptor.
+ pub const O_PATH: u32 = bindings::O_PATH;
+
+ /// Write operations on this file will flush data and metadata.
+ pub const O_SYNC: u32 = bindings::O_SYNC;
+
+ /// This file is an unnamed temporary regular file.
+ pub const O_TMPFILE: u32 = bindings::O_TMPFILE;
+
+ /// File should be truncated to length 0.
+ pub const O_TRUNC: u32 = bindings::O_TRUNC;
+
+ /// Bitmask for access mode flags.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use kernel::file;
+ /// # fn do_something() {}
+ /// # let flags = 0;
+ /// if (flags & file::flags::O_ACCMODE) == file::flags::O_RDONLY {
+ /// do_something();
+ /// }
+ /// ```
+ pub const O_ACCMODE: u32 = bindings::O_ACCMODE;
+
+ /// File is read only.
+ pub const O_RDONLY: u32 = bindings::O_RDONLY;
+
+ /// File is write only.
+ pub const O_WRONLY: u32 = bindings::O_WRONLY;
+
+ /// File can be both read and written.
+ pub const O_RDWR: u32 = bindings::O_RDWR;
+}
+
+/// Wraps the kernel's `struct file`.
+///
+/// # Invariants
+///
+/// Instances of this type are always ref-counted, that is, a call to `get_file` ensures that the
+/// allocation remains valid at least until the matching call to `fput`.
+#[repr(transparent)]
+pub struct File(pub(crate) UnsafeCell<bindings::file>);
+
+// TODO: Accessing fields of `struct file` through the pointer is UB because other threads may be
+// writing to them. However, this is how the C code currently operates: naked reads and writes to
+// fields. Even if we used relaxed atomics on the Rust side, we can't force this on the C side.
+impl File {
+ /// Constructs a new [`struct file`] wrapper from a file descriptor.
+ ///
+ /// The file descriptor belongs to the current process.
+ pub fn from_fd(fd: u32) -> Result<ARef<Self>> {
+ // SAFETY: FFI call, there are no requirements on `fd`.
+ let ptr = ptr::NonNull::new(unsafe { bindings::fget(fd) }).ok_or(EBADF)?;
+
+ // SAFETY: `fget` increments the refcount before returning.
+ Ok(unsafe { ARef::from_raw(ptr.cast()) })
+ }
+
+ /// Creates a reference to a [`File`] from a valid pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `ptr` is valid and remains valid for the lifetime of the
+ /// returned [`File`] instance.
+ pub(crate) unsafe fn from_ptr<'a>(ptr: *const bindings::file) -> &'a File {
+ // SAFETY: The safety requirements guarantee the validity of the dereference, while the
+ // `File` type being transparent makes the cast ok.
+ unsafe { &*ptr.cast() }
+ }
+
+ /// Returns the current seek/cursor/pointer position (`struct file::f_pos`).
+ pub fn pos(&self) -> u64 {
+ // SAFETY: The file is valid because the shared reference guarantees a nonzero refcount.
+ unsafe { core::ptr::addr_of!((*self.0.get()).f_pos).read() as _ }
+ }
+
+ /// Returns the credentials of the task that originally opened the file.
+ pub fn cred(&self) -> &Credential {
+ // SAFETY: The file is valid because the shared reference guarantees a nonzero refcount.
+ let ptr = unsafe { core::ptr::addr_of!((*self.0.get()).f_cred).read() };
+ // SAFETY: The lifetimes of `self` and `Credential` are tied, so it is guaranteed that
+ // the credential pointer remains valid (because the file is still alive, and it doesn't
+ // change over the lifetime of a file).
+ unsafe { Credential::from_ptr(ptr) }
+ }
+
+ /// Returns the flags associated with the file.
+ ///
+ /// The flags are a combination of the constants in [`flags`].
+ pub fn flags(&self) -> u32 {
+ // SAFETY: The file is valid because the shared reference guarantees a nonzero refcount.
+ unsafe { core::ptr::addr_of!((*self.0.get()).f_flags).read() }
+ }
+}
+
+// SAFETY: The type invariants guarantee that `File` is always ref-counted.
+unsafe impl AlwaysRefCounted for File {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference means that the refcount is nonzero.
+ unsafe { bindings::get_file(self.0.get()) };
+ }
+
+ unsafe fn dec_ref(obj: ptr::NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is nonzero.
+ unsafe { bindings::fput(obj.cast().as_ptr()) }
+ }
+}
+
+/// A file descriptor reservation.
+///
+/// This allows the creation of a file descriptor in two steps: first, we reserve a slot for it,
+/// then we commit or drop the reservation. The first step may fail (e.g., the current process ran
+/// out of available slots), but commit and drop never fail (and are mutually exclusive).
+pub struct FileDescriptorReservation {
+ fd: u32,
+}
+
+impl FileDescriptorReservation {
+ /// Creates a new file descriptor reservation.
+ pub fn new(flags: u32) -> Result<Self> {
+ // SAFETY: FFI call, there are no safety requirements on `flags`.
+ let fd = unsafe { bindings::get_unused_fd_flags(flags) };
+ if fd < 0 {
+ return Err(Error::from_kernel_errno(fd));
+ }
+ Ok(Self { fd: fd as _ })
+ }
+
+ /// Returns the file descriptor number that was reserved.
+ pub fn reserved_fd(&self) -> u32 {
+ self.fd
+ }
+
+ /// Commits the reservation.
+ ///
+ /// The previously reserved file descriptor is bound to `file`.
+ pub fn commit(self, file: ARef<File>) {
+ // SAFETY: `self.fd` was previously returned by `get_unused_fd_flags`, and `file.ptr` is
+ // guaranteed to have an owned ref count by its type invariants.
+ unsafe { bindings::fd_install(self.fd, file.0.get()) };
+
+ // `fd_install` consumes both the file descriptor and the file reference, so we cannot run
+ // the destructors.
+ core::mem::forget(self);
+ core::mem::forget(file);
+ }
+}
+
+impl Drop for FileDescriptorReservation {
+ fn drop(&mut self) {
+ // SAFETY: `self.fd` was returned by a previous call to `get_unused_fd_flags`.
+ unsafe { bindings::put_unused_fd(self.fd) };
+ }
+}
+
+/// Wraps the kernel's `struct poll_table_struct`.
+///
+/// # Invariants
+///
+/// The pointer `PollTable::ptr` is null or valid.
+pub struct PollTable {
+ ptr: *mut bindings::poll_table_struct,
+}
+
+impl PollTable {
+ /// Constructors a new `struct poll_table_struct` wrapper.
+ ///
+ /// # Safety
+ ///
+ /// The pointer `ptr` must be either null or a valid pointer for the lifetime of the object.
+ unsafe fn from_ptr(ptr: *mut bindings::poll_table_struct) -> Self {
+ Self { ptr }
+ }
+
+ /// Associates the given file and condition variable to this poll table. It means notifying the
+ /// condition variable will notify the poll table as well; additionally, the association
+ /// between the condition variable and the file will automatically be undone by the kernel when
+ /// the file is destructed. To unilaterally remove the association before then, one can call
+ /// [`CondVar::free_waiters`].
+ ///
+ /// # Safety
+ ///
+ /// If the condition variable is destroyed before the file, then [`CondVar::free_waiters`] must
+ /// be called to ensure that all waiters are flushed out.
+ pub unsafe fn register_wait<'a>(&self, file: &'a File, cv: &'a CondVar) {
+ if self.ptr.is_null() {
+ return;
+ }
+
+ // SAFETY: `PollTable::ptr` is guaranteed to be valid by the type invariants and the null
+ // check above.
+ let table = unsafe { &*self.ptr };
+ if let Some(proc) = table._qproc {
+ // SAFETY: All pointers are known to be valid.
+ unsafe { proc(file.0.get() as _, cv.wait_list.get(), self.ptr) }
+ }
+ }
+}
+
+/// Equivalent to [`std::io::SeekFrom`].
+///
+/// [`std::io::SeekFrom`]: https://doc.rust-lang.org/std/io/enum.SeekFrom.html
+pub enum SeekFrom {
+ /// Equivalent to C's `SEEK_SET`.
+ Start(u64),
+
+ /// Equivalent to C's `SEEK_END`.
+ End(i64),
+
+ /// Equivalent to C's `SEEK_CUR`.
+ Current(i64),
+}
+
+pub(crate) struct OperationsVtable<A, T>(marker::PhantomData<A>, marker::PhantomData<T>);
+
+impl<A: OpenAdapter<T::OpenData>, T: Operations> OperationsVtable<A, T> {
+ /// Called by the VFS when an inode should be opened.
+ ///
+ /// Calls `T::open` on the returned value of `A::convert`.
+ ///
+ /// # Safety
+ ///
+ /// The returned value of `A::convert` must be a valid non-null pointer and
+ /// `T:open` must return a valid non-null pointer on an `Ok` result.
+ unsafe extern "C" fn open_callback(
+ inode: *mut bindings::inode,
+ file: *mut bindings::file,
+ ) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: `A::convert` must return a valid non-null pointer that
+ // should point to data in the inode or file that lives longer
+ // than the following use of `T::open`.
+ let arg = unsafe { A::convert(inode, file) };
+ // SAFETY: The C contract guarantees that `file` is valid. Additionally,
+ // `fileref` never outlives this function, so it is guaranteed to be
+ // valid.
+ let fileref = unsafe { File::from_ptr(file) };
+ // SAFETY: `arg` was previously returned by `A::convert` and must
+ // be a valid non-null pointer.
+ let ptr = T::open(unsafe { &*arg }, fileref)?.into_pointer();
+ // SAFETY: The C contract guarantees that `private_data` is available
+ // for implementers of the file operations (no other C code accesses
+ // it), so we know that there are no concurrent threads/CPUs accessing
+ // it (it's not visible to any other Rust code).
+ unsafe { (*file).private_data = ptr as *mut core::ffi::c_void };
+ Ok(0)
+ }
+ }
+
+ unsafe extern "C" fn read_callback(
+ file: *mut bindings::file,
+ buf: *mut core::ffi::c_char,
+ len: core::ffi::c_size_t,
+ offset: *mut bindings::loff_t,
+ ) -> core::ffi::c_ssize_t {
+ from_kernel_result! {
+ let mut data =
+ unsafe { UserSlicePtr::new(buf as *mut core::ffi::c_void, len).writer() };
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the
+ // `release` callback, which the C API guarantees that will be called only when all
+ // references to `file` have been released, so we know it can't be called while this
+ // function is running.
+ let f = unsafe { T::Data::borrow((*file).private_data) };
+ // No `FMODE_UNSIGNED_OFFSET` support, so `offset` must be in [0, 2^63).
+ // See <https://github.com/fishinabarrel/linux-kernel-module-rust/pull/113>.
+ let read = T::read(
+ f,
+ unsafe { File::from_ptr(file) },
+ &mut data,
+ unsafe { *offset }.try_into()?,
+ )?;
+ unsafe { (*offset) += bindings::loff_t::try_from(read).unwrap() };
+ Ok(read as _)
+ }
+ }
+
+ unsafe extern "C" fn read_iter_callback(
+ iocb: *mut bindings::kiocb,
+ raw_iter: *mut bindings::iov_iter,
+ ) -> isize {
+ from_kernel_result! {
+ let mut iter = unsafe { IovIter::from_ptr(raw_iter) };
+ let file = unsafe { (*iocb).ki_filp };
+ let offset = unsafe { (*iocb).ki_pos };
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the
+ // `release` callback, which the C API guarantees that will be called only when all
+ // references to `file` have been released, so we know it can't be called while this
+ // function is running.
+ let f = unsafe { T::Data::borrow((*file).private_data) };
+ let read = T::read(
+ f,
+ unsafe { File::from_ptr(file) },
+ &mut iter,
+ offset.try_into()?,
+ )?;
+ unsafe { (*iocb).ki_pos += bindings::loff_t::try_from(read).unwrap() };
+ Ok(read as _)
+ }
+ }
+
+ unsafe extern "C" fn write_callback(
+ file: *mut bindings::file,
+ buf: *const core::ffi::c_char,
+ len: core::ffi::c_size_t,
+ offset: *mut bindings::loff_t,
+ ) -> core::ffi::c_ssize_t {
+ from_kernel_result! {
+ let mut data =
+ unsafe { UserSlicePtr::new(buf as *mut core::ffi::c_void, len).reader() };
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the
+ // `release` callback, which the C API guarantees that will be called only when all
+ // references to `file` have been released, so we know it can't be called while this
+ // function is running.
+ let f = unsafe { T::Data::borrow((*file).private_data) };
+ // No `FMODE_UNSIGNED_OFFSET` support, so `offset` must be in [0, 2^63).
+ // See <https://github.com/fishinabarrel/linux-kernel-module-rust/pull/113>.
+ let written = T::write(
+ f,
+ unsafe { File::from_ptr(file) },
+ &mut data,
+ unsafe { *offset }.try_into()?,
+ )?;
+ unsafe { (*offset) += bindings::loff_t::try_from(written).unwrap() };
+ Ok(written as _)
+ }
+ }
+
+ unsafe extern "C" fn write_iter_callback(
+ iocb: *mut bindings::kiocb,
+ raw_iter: *mut bindings::iov_iter,
+ ) -> isize {
+ from_kernel_result! {
+ let mut iter = unsafe { IovIter::from_ptr(raw_iter) };
+ let file = unsafe { (*iocb).ki_filp };
+ let offset = unsafe { (*iocb).ki_pos };
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the
+ // `release` callback, which the C API guarantees that will be called only when all
+ // references to `file` have been released, so we know it can't be called while this
+ // function is running.
+ let f = unsafe { T::Data::borrow((*file).private_data) };
+ let written = T::write(
+ f,
+ unsafe { File::from_ptr(file) },
+ &mut iter,
+ offset.try_into()?,
+ )?;
+ unsafe { (*iocb).ki_pos += bindings::loff_t::try_from(written).unwrap() };
+ Ok(written as _)
+ }
+ }
+
+ unsafe extern "C" fn release_callback(
+ _inode: *mut bindings::inode,
+ file: *mut bindings::file,
+ ) -> core::ffi::c_int {
+ let ptr = mem::replace(unsafe { &mut (*file).private_data }, ptr::null_mut());
+ T::release(unsafe { T::Data::from_pointer(ptr as _) }, unsafe {
+ File::from_ptr(file)
+ });
+ 0
+ }
+
+ unsafe extern "C" fn llseek_callback(
+ file: *mut bindings::file,
+ offset: bindings::loff_t,
+ whence: core::ffi::c_int,
+ ) -> bindings::loff_t {
+ from_kernel_result! {
+ let off = match whence as u32 {
+ bindings::SEEK_SET => SeekFrom::Start(offset.try_into()?),
+ bindings::SEEK_CUR => SeekFrom::Current(offset),
+ bindings::SEEK_END => SeekFrom::End(offset),
+ _ => return Err(EINVAL),
+ };
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the
+ // `release` callback, which the C API guarantees that will be called only when all
+ // references to `file` have been released, so we know it can't be called while this
+ // function is running.
+ let f = unsafe { T::Data::borrow((*file).private_data) };
+ let off = T::seek(f, unsafe { File::from_ptr(file) }, off)?;
+ Ok(off as bindings::loff_t)
+ }
+ }
+
+ unsafe extern "C" fn unlocked_ioctl_callback(
+ file: *mut bindings::file,
+ cmd: core::ffi::c_uint,
+ arg: core::ffi::c_ulong,
+ ) -> core::ffi::c_long {
+ from_kernel_result! {
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the
+ // `release` callback, which the C API guarantees that will be called only when all
+ // references to `file` have been released, so we know it can't be called while this
+ // function is running.
+ let f = unsafe { T::Data::borrow((*file).private_data) };
+ let mut cmd = IoctlCommand::new(cmd as _, arg as _);
+ let ret = T::ioctl(f, unsafe { File::from_ptr(file) }, &mut cmd)?;
+ Ok(ret as _)
+ }
+ }
+
+ unsafe extern "C" fn compat_ioctl_callback(
+ file: *mut bindings::file,
+ cmd: core::ffi::c_uint,
+ arg: core::ffi::c_ulong,
+ ) -> core::ffi::c_long {
+ from_kernel_result! {
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the
+ // `release` callback, which the C API guarantees that will be called only when all
+ // references to `file` have been released, so we know it can't be called while this
+ // function is running.
+ let f = unsafe { T::Data::borrow((*file).private_data) };
+ let mut cmd = IoctlCommand::new(cmd as _, arg as _);
+ let ret = T::compat_ioctl(f, unsafe { File::from_ptr(file) }, &mut cmd)?;
+ Ok(ret as _)
+ }
+ }
+
+ unsafe extern "C" fn mmap_callback(
+ file: *mut bindings::file,
+ vma: *mut bindings::vm_area_struct,
+ ) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the
+ // `release` callback, which the C API guarantees that will be called only when all
+ // references to `file` have been released, so we know it can't be called while this
+ // function is running.
+ let f = unsafe { T::Data::borrow((*file).private_data) };
+
+ // SAFETY: The C API guarantees that `vma` is valid for the duration of this call.
+ // `area` only lives within this call, so it is guaranteed to be valid.
+ let mut area = unsafe { mm::virt::Area::from_ptr(vma) };
+
+ // SAFETY: The C API guarantees that `file` is valid for the duration of this call,
+ // which is longer than the lifetime of the file reference.
+ T::mmap(f, unsafe { File::from_ptr(file) }, &mut area)?;
+ Ok(0)
+ }
+ }
+
+ unsafe extern "C" fn fsync_callback(
+ file: *mut bindings::file,
+ start: bindings::loff_t,
+ end: bindings::loff_t,
+ datasync: core::ffi::c_int,
+ ) -> core::ffi::c_int {
+ from_kernel_result! {
+ let start = start.try_into()?;
+ let end = end.try_into()?;
+ let datasync = datasync != 0;
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the
+ // `release` callback, which the C API guarantees that will be called only when all
+ // references to `file` have been released, so we know it can't be called while this
+ // function is running.
+ let f = unsafe { T::Data::borrow((*file).private_data) };
+ let res = T::fsync(f, unsafe { File::from_ptr(file) }, start, end, datasync)?;
+ Ok(res.try_into().unwrap())
+ }
+ }
+
+ unsafe extern "C" fn poll_callback(
+ file: *mut bindings::file,
+ wait: *mut bindings::poll_table_struct,
+ ) -> bindings::__poll_t {
+ // SAFETY: `private_data` was initialised by `open_callback` with a value returned by
+ // `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the `release`
+ // callback, which the C API guarantees that will be called only when all references to
+ // `file` have been released, so we know it can't be called while this function is running.
+ let f = unsafe { T::Data::borrow((*file).private_data) };
+ match T::poll(f, unsafe { File::from_ptr(file) }, unsafe {
+ &PollTable::from_ptr(wait)
+ }) {
+ Ok(v) => v,
+ Err(_) => bindings::POLLERR,
+ }
+ }
+
+ const VTABLE: bindings::file_operations = bindings::file_operations {
+ open: Some(Self::open_callback),
+ release: Some(Self::release_callback),
+ read: if T::HAS_READ {
+ Some(Self::read_callback)
+ } else {
+ None
+ },
+ write: if T::HAS_WRITE {
+ Some(Self::write_callback)
+ } else {
+ None
+ },
+ llseek: if T::HAS_SEEK {
+ Some(Self::llseek_callback)
+ } else {
+ None
+ },
+
+ check_flags: None,
+ compat_ioctl: if T::HAS_COMPAT_IOCTL {
+ Some(Self::compat_ioctl_callback)
+ } else {
+ None
+ },
+ copy_file_range: None,
+ fallocate: None,
+ fadvise: None,
+ fasync: None,
+ flock: None,
+ flush: None,
+ fsync: if T::HAS_FSYNC {
+ Some(Self::fsync_callback)
+ } else {
+ None
+ },
+ get_unmapped_area: None,
+ iterate: None,
+ iterate_shared: None,
+ iopoll: None,
+ lock: None,
+ mmap: if T::HAS_MMAP {
+ Some(Self::mmap_callback)
+ } else {
+ None
+ },
+ mmap_supported_flags: 0,
+ owner: ptr::null_mut(),
+ poll: if T::HAS_POLL {
+ Some(Self::poll_callback)
+ } else {
+ None
+ },
+ read_iter: if T::HAS_READ {
+ Some(Self::read_iter_callback)
+ } else {
+ None
+ },
+ remap_file_range: None,
+ sendpage: None,
+ setlease: None,
+ show_fdinfo: None,
+ splice_read: None,
+ splice_write: None,
+ unlocked_ioctl: if T::HAS_IOCTL {
+ Some(Self::unlocked_ioctl_callback)
+ } else {
+ None
+ },
+ uring_cmd: None,
+ write_iter: if T::HAS_WRITE {
+ Some(Self::write_iter_callback)
+ } else {
+ None
+ },
+ };
+
+ /// Builds an instance of [`struct file_operations`].
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that the adapter is compatible with the way the device is registered.
+ pub(crate) const unsafe fn build() -> &'static bindings::file_operations {
+ &Self::VTABLE
+ }
+}
+
+/// Allows the handling of ioctls defined with the `_IO`, `_IOR`, `_IOW`, and `_IOWR` macros.
+///
+/// For each macro, there is a handler function that takes the appropriate types as arguments.
+pub trait IoctlHandler: Sync {
+ /// The type of the first argument to each associated function.
+ type Target<'a>;
+
+ /// Handles ioctls defined with the `_IO` macro, that is, with no buffer as argument.
+ fn pure(_this: Self::Target<'_>, _file: &File, _cmd: u32, _arg: usize) -> Result<i32> {
+ Err(EINVAL)
+ }
+
+ /// Handles ioctls defined with the `_IOR` macro, that is, with an output buffer provided as
+ /// argument.
+ fn read(
+ _this: Self::Target<'_>,
+ _file: &File,
+ _cmd: u32,
+ _writer: &mut UserSlicePtrWriter,
+ ) -> Result<i32> {
+ Err(EINVAL)
+ }
+
+ /// Handles ioctls defined with the `_IOW` macro, that is, with an input buffer provided as
+ /// argument.
+ fn write(
+ _this: Self::Target<'_>,
+ _file: &File,
+ _cmd: u32,
+ _reader: &mut UserSlicePtrReader,
+ ) -> Result<i32> {
+ Err(EINVAL)
+ }
+
+ /// Handles ioctls defined with the `_IOWR` macro, that is, with a buffer for both input and
+ /// output provided as argument.
+ fn read_write(
+ _this: Self::Target<'_>,
+ _file: &File,
+ _cmd: u32,
+ _data: UserSlicePtr,
+ ) -> Result<i32> {
+ Err(EINVAL)
+ }
+}
+
+/// Represents an ioctl command.
+///
+/// It can use the components of an ioctl command to dispatch ioctls using
+/// [`IoctlCommand::dispatch`].
+pub struct IoctlCommand {
+ cmd: u32,
+ arg: usize,
+ user_slice: Option<UserSlicePtr>,
+}
+
+impl IoctlCommand {
+ /// Constructs a new [`IoctlCommand`].
+ fn new(cmd: u32, arg: usize) -> Self {
+ let size = (cmd >> bindings::_IOC_SIZESHIFT) & bindings::_IOC_SIZEMASK;
+
+ // SAFETY: We only create one instance of the user slice per ioctl call, so TOCTOU issues
+ // are not possible.
+ let user_slice = Some(unsafe { UserSlicePtr::new(arg as _, size as _) });
+ Self {
+ cmd,
+ arg,
+ user_slice,
+ }
+ }
+
+ /// Dispatches the given ioctl to the appropriate handler based on the value of the command. It
+ /// also creates a [`UserSlicePtr`], [`UserSlicePtrReader`], or [`UserSlicePtrWriter`]
+ /// depending on the direction of the buffer of the command.
+ ///
+ /// It is meant to be used in implementations of [`Operations::ioctl`] and
+ /// [`Operations::compat_ioctl`].
+ pub fn dispatch<T: IoctlHandler>(
+ &mut self,
+ handler: T::Target<'_>,
+ file: &File,
+ ) -> Result<i32> {
+ let dir = (self.cmd >> bindings::_IOC_DIRSHIFT) & bindings::_IOC_DIRMASK;
+ if dir == bindings::_IOC_NONE {
+ return T::pure(handler, file, self.cmd, self.arg);
+ }
+
+ let data = self.user_slice.take().ok_or(EINVAL)?;
+ const READ_WRITE: u32 = bindings::_IOC_READ | bindings::_IOC_WRITE;
+ match dir {
+ bindings::_IOC_WRITE => T::write(handler, file, self.cmd, &mut data.reader()),
+ bindings::_IOC_READ => T::read(handler, file, self.cmd, &mut data.writer()),
+ READ_WRITE => T::read_write(handler, file, self.cmd, data),
+ _ => Err(EINVAL),
+ }
+ }
+
+ /// Returns the raw 32-bit value of the command and the ptr-sized argument.
+ pub fn raw(&self) -> (u32, usize) {
+ (self.cmd, self.arg)
+ }
+}
+
+/// Trait for extracting file open arguments from kernel data structures.
+///
+/// This is meant to be implemented by registration managers.
+pub trait OpenAdapter<T: Sync> {
+ /// Converts untyped data stored in [`struct inode`] and [`struct file`] (when [`struct
+ /// file_operations::open`] is called) into the given type. For example, for `miscdev`
+ /// devices, a pointer to the registered [`struct miscdev`] is stored in [`struct
+ /// file::private_data`].
+ ///
+ /// # Safety
+ ///
+ /// This function must be called only when [`struct file_operations::open`] is being called for
+ /// a file that was registered by the implementer. The returned pointer must be valid and
+ /// not-null.
+ unsafe fn convert(_inode: *mut bindings::inode, _file: *mut bindings::file) -> *const T;
+}
+
+/// Corresponds to the kernel's `struct file_operations`.
+///
+/// You implement this trait whenever you would create a `struct file_operations`.
+///
+/// File descriptors may be used from multiple threads/processes concurrently, so your type must be
+/// [`Sync`]. It must also be [`Send`] because [`Operations::release`] will be called from the
+/// thread that decrements that associated file's refcount to zero.
+#[vtable]
+pub trait Operations {
+ /// The type of the context data returned by [`Operations::open`] and made available to
+ /// other methods.
+ type Data: PointerWrapper + Send + Sync = ();
+
+ /// The type of the context data passed to [`Operations::open`].
+ type OpenData: Sync = ();
+
+ /// Creates a new instance of this file.
+ ///
+ /// Corresponds to the `open` function pointer in `struct file_operations`.
+ fn open(context: &Self::OpenData, file: &File) -> Result<Self::Data>;
+
+ /// Cleans up after the last reference to the file goes away.
+ ///
+ /// Note that context data is moved, so it will be freed automatically unless the
+ /// implementation moves it elsewhere.
+ ///
+ /// Corresponds to the `release` function pointer in `struct file_operations`.
+ fn release(_data: Self::Data, _file: &File) {}
+
+ /// Reads data from this file to the caller's buffer.
+ ///
+ /// Corresponds to the `read` and `read_iter` function pointers in `struct file_operations`.
+ fn read(
+ _data: <Self::Data as PointerWrapper>::Borrowed<'_>,
+ _file: &File,
+ _writer: &mut impl IoBufferWriter,
+ _offset: u64,
+ ) -> Result<usize> {
+ Err(EINVAL)
+ }
+
+ /// Writes data from the caller's buffer to this file.
+ ///
+ /// Corresponds to the `write` and `write_iter` function pointers in `struct file_operations`.
+ fn write(
+ _data: <Self::Data as PointerWrapper>::Borrowed<'_>,
+ _file: &File,
+ _reader: &mut impl IoBufferReader,
+ _offset: u64,
+ ) -> Result<usize> {
+ Err(EINVAL)
+ }
+
+ /// Changes the position of the file.
+ ///
+ /// Corresponds to the `llseek` function pointer in `struct file_operations`.
+ fn seek(
+ _data: <Self::Data as PointerWrapper>::Borrowed<'_>,
+ _file: &File,
+ _offset: SeekFrom,
+ ) -> Result<u64> {
+ Err(EINVAL)
+ }
+
+ /// Performs IO control operations that are specific to the file.
+ ///
+ /// Corresponds to the `unlocked_ioctl` function pointer in `struct file_operations`.
+ fn ioctl(
+ _data: <Self::Data as PointerWrapper>::Borrowed<'_>,
+ _file: &File,
+ _cmd: &mut IoctlCommand,
+ ) -> Result<i32> {
+ Err(ENOTTY)
+ }
+
+ /// Performs 32-bit IO control operations on that are specific to the file on 64-bit kernels.
+ ///
+ /// Corresponds to the `compat_ioctl` function pointer in `struct file_operations`.
+ fn compat_ioctl(
+ _data: <Self::Data as PointerWrapper>::Borrowed<'_>,
+ _file: &File,
+ _cmd: &mut IoctlCommand,
+ ) -> Result<i32> {
+ Err(ENOTTY)
+ }
+
+ /// Syncs pending changes to this file.
+ ///
+ /// Corresponds to the `fsync` function pointer in `struct file_operations`.
+ fn fsync(
+ _data: <Self::Data as PointerWrapper>::Borrowed<'_>,
+ _file: &File,
+ _start: u64,
+ _end: u64,
+ _datasync: bool,
+ ) -> Result<u32> {
+ Err(EINVAL)
+ }
+
+ /// Maps areas of the caller's virtual memory with device/file memory.
+ ///
+ /// Corresponds to the `mmap` function pointer in `struct file_operations`.
+ fn mmap(
+ _data: <Self::Data as PointerWrapper>::Borrowed<'_>,
+ _file: &File,
+ _vma: &mut mm::virt::Area,
+ ) -> Result {
+ Err(EINVAL)
+ }
+
+ /// Checks the state of the file and optionally registers for notification when the state
+ /// changes.
+ ///
+ /// Corresponds to the `poll` function pointer in `struct file_operations`.
+ fn poll(
+ _data: <Self::Data as PointerWrapper>::Borrowed<'_>,
+ _file: &File,
+ _table: &PollTable,
+ ) -> Result<u32> {
+ Ok(bindings::POLLIN | bindings::POLLOUT | bindings::POLLRDNORM | bindings::POLLWRNORM)
+ }
+}
diff --git a/rust/kernel/fs.rs b/rust/kernel/fs.rs
new file mode 100644
index 000000000000..46dc38aad2bc
--- /dev/null
+++ b/rust/kernel/fs.rs
@@ -0,0 +1,846 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! File systems.
+//!
+//! C headers: [`include/linux/fs.h`](../../../../include/linux/fs.h)
+
+use crate::{
+ bindings, error::code::*, error::from_kernel_result, str::CStr, to_result,
+ types::PointerWrapper, AlwaysRefCounted, Error, Result, ScopeGuard, ThisModule,
+};
+use alloc::boxed::Box;
+use core::{
+ cell::UnsafeCell,
+ marker::{PhantomData, PhantomPinned},
+ pin::Pin,
+ ptr,
+};
+use macros::vtable;
+
+pub mod param;
+
+/// Type of superblock keying.
+///
+/// It determines how C's `fs_context_operations::get_tree` is implemented.
+pub enum Super {
+ /// Only one such superblock may exist.
+ Single,
+
+ /// As [`Super::Single`], but reconfigure if it exists.
+ SingleReconf,
+
+ /// Superblocks with different data pointers may exist.
+ Keyed,
+
+ /// Multiple independent superblocks may exist.
+ Independent,
+
+ /// Uses a block device.
+ BlockDev,
+}
+
+/// A file system context.
+///
+/// It is used to gather configuration to then mount or reconfigure a file system.
+#[vtable]
+pub trait Context<T: Type + ?Sized> {
+ /// Type of the data associated with the context.
+ type Data: PointerWrapper + Send + Sync + 'static;
+
+ /// The typed file system parameters.
+ ///
+ /// Users are encouraged to define it using the [`crate::define_fs_params`] macro.
+ const PARAMS: param::SpecTable<'static, Self::Data> = param::SpecTable::empty();
+
+ /// Creates a new context.
+ fn try_new() -> Result<Self::Data>;
+
+ /// Parses a parameter that wasn't specified in [`Self::PARAMS`].
+ fn parse_unknown_param(
+ _data: &mut Self::Data,
+ _name: &CStr,
+ _value: param::Value<'_>,
+ ) -> Result {
+ Err(ENOPARAM)
+ }
+
+ /// Parses the whole parameter block, potentially skipping regular handling for parts of it.
+ ///
+ /// The return value is the portion of the input buffer for which the regular handling
+ /// (involving [`Self::PARAMS`] and [`Self::parse_unknown_param`]) will still be carried out.
+ /// If it's `None`, the regular handling is not performed at all.
+ fn parse_monolithic<'a>(
+ _data: &mut Self::Data,
+ _buf: Option<&'a mut [u8]>,
+ ) -> Result<Option<&'a mut [u8]>> {
+ Ok(None)
+ }
+
+ /// Returns the superblock data to be used by this file system context.
+ ///
+ /// This is only needed when [`Type::SUPER_TYPE`] is [`Super::Keyed`], otherwise it is never
+ /// called. In the former case, when the fs is being mounted, an existing superblock is reused
+ /// if one can be found with the same data as the returned value; otherwise a new superblock is
+ /// created.
+ fn tree_key(_data: &mut Self::Data) -> Result<T::Data> {
+ Err(ENOTSUPP)
+ }
+}
+
+struct Tables<T: Type + ?Sized>(T);
+impl<T: Type + ?Sized> Tables<T> {
+ const CONTEXT: bindings::fs_context_operations = bindings::fs_context_operations {
+ free: Some(Self::free_callback),
+ parse_param: Some(Self::parse_param_callback),
+ get_tree: Some(Self::get_tree_callback),
+ reconfigure: Some(Self::reconfigure_callback),
+ parse_monolithic: if <T::Context as Context<T>>::HAS_PARSE_MONOLITHIC {
+ Some(Self::parse_monolithic_callback)
+ } else {
+ None
+ },
+ dup: None,
+ };
+
+ unsafe extern "C" fn free_callback(fc: *mut bindings::fs_context) {
+ // SAFETY: The callback contract guarantees that `fc` is valid.
+ let fc = unsafe { &*fc };
+
+ let ptr = fc.fs_private;
+ if !ptr.is_null() {
+ // SAFETY: `fs_private` was initialised with the result of a `to_pointer` call in
+ // `init_fs_context_callback`, so it's ok to call `from_pointer` here.
+ unsafe { <T::Context as Context<T>>::Data::from_pointer(ptr) };
+ }
+
+ let ptr = fc.s_fs_info;
+ if !ptr.is_null() {
+ // SAFETY: `s_fs_info` may be initialised with the result of a `to_pointer` call in
+ // `get_tree_callback` when keyed superblocks are used (`get_tree_keyed` sets it), so
+ // it's ok to call `from_pointer` here.
+ unsafe { T::Data::from_pointer(ptr) };
+ }
+ }
+
+ unsafe extern "C" fn parse_param_callback(
+ fc: *mut bindings::fs_context,
+ param: *mut bindings::fs_parameter,
+ ) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: The callback contract guarantees that `fc` is valid.
+ let ptr = unsafe { (*fc).fs_private };
+
+ // SAFETY: The value of `ptr` (coming from `fs_private` was initialised in
+ // `init_fs_context_callback` to the result of an `into_pointer` call. Since the
+ // context is valid, `from_pointer` wasn't called yet, so `ptr` is valid. Additionally,
+ // the callback contract guarantees that callbacks are serialised, so it is ok to
+ // mutably reference it.
+ let mut data =
+ unsafe { <<T::Context as Context<T>>::Data as PointerWrapper>::borrow_mut(ptr) };
+ let mut result = bindings::fs_parse_result::default();
+ // SAFETY: All parameters are valid at least for the duration of the call.
+ let opt =
+ unsafe { bindings::fs_parse(fc, T::Context::PARAMS.first, param, &mut result) };
+
+ // SAFETY: The callback contract guarantees that `param` is valid for the duration of
+ // the callback.
+ let param = unsafe { &*param };
+ if opt >= 0 {
+ let opt = opt as usize;
+ if opt >= T::Context::PARAMS.handlers.len() {
+ return Err(EINVAL);
+ }
+ T::Context::PARAMS.handlers[opt].handle_param(&mut data, param, &result)?;
+ return Ok(0);
+ }
+
+ if opt != ENOPARAM.to_kernel_errno() {
+ return Err(Error::from_kernel_errno(opt));
+ }
+
+ if !T::Context::HAS_PARSE_UNKNOWN_PARAM {
+ return Err(ENOPARAM);
+ }
+
+ let val = param::Value::from_fs_parameter(param);
+ // SAFETY: The callback contract guarantees the parameter key to be valid and last at
+ // least the duration of the callback.
+ T::Context::parse_unknown_param(
+ &mut data,
+ unsafe { CStr::from_char_ptr(param.key) },
+ val,
+ )?;
+ Ok(0)
+ }
+ }
+
+ unsafe extern "C" fn fill_super_callback(
+ sb_ptr: *mut bindings::super_block,
+ fc: *mut bindings::fs_context,
+ ) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: The callback contract guarantees that `fc` is valid. It also guarantees that
+ // the callbacks are serialised for a given `fc`, so it is safe to mutably dereference
+ // it.
+ let fc = unsafe { &mut *fc };
+ let ptr = core::mem::replace(&mut fc.fs_private, ptr::null_mut());
+
+ // SAFETY: The value of `ptr` (coming from `fs_private` was initialised in
+ // `init_fs_context_callback` to the result of an `into_pointer` call. The context is
+ // being used to initialise a superblock, so we took over `ptr` (`fs_private` is set to
+ // null now) and call `from_pointer` below.
+ let data =
+ unsafe { <<T::Context as Context<T>>::Data as PointerWrapper>::from_pointer(ptr) };
+
+ // SAFETY: The callback contract guarantees that `sb_ptr` is a unique pointer to a
+ // newly-created superblock.
+ let newsb = unsafe { NewSuperBlock::new(sb_ptr) };
+ T::fill_super(data, newsb)?;
+ Ok(0)
+ }
+ }
+
+ unsafe extern "C" fn get_tree_callback(fc: *mut bindings::fs_context) -> core::ffi::c_int {
+ // N.B. When new types are added below, we may need to update `kill_sb_callback` to ensure
+ // that we're cleaning up properly.
+ match T::SUPER_TYPE {
+ Super::Single => unsafe {
+ // SAFETY: `fc` is valid per the callback contract. `fill_super_callback` also has
+ // the right type and is a valid callback.
+ bindings::get_tree_single(fc, Some(Self::fill_super_callback))
+ },
+ Super::SingleReconf => unsafe {
+ // SAFETY: `fc` is valid per the callback contract. `fill_super_callback` also has
+ // the right type and is a valid callback.
+ bindings::get_tree_single_reconf(fc, Some(Self::fill_super_callback))
+ },
+ Super::Independent => unsafe {
+ // SAFETY: `fc` is valid per the callback contract. `fill_super_callback` also has
+ // the right type and is a valid callback.
+ bindings::get_tree_nodev(fc, Some(Self::fill_super_callback))
+ },
+ Super::BlockDev => unsafe {
+ // SAFETY: `fc` is valid per the callback contract. `fill_super_callback` also has
+ // the right type and is a valid callback.
+ bindings::get_tree_bdev(fc, Some(Self::fill_super_callback))
+ },
+ Super::Keyed => {
+ from_kernel_result! {
+ // SAFETY: `fc` is valid per the callback contract.
+ let ctx = unsafe { &*fc };
+ let ptr = ctx.fs_private;
+
+ // SAFETY: The value of `ptr` (coming from `fs_private` was initialised in
+ // `init_fs_context_callback` to the result of an `into_pointer` call. Since
+ // the context is valid, `from_pointer` wasn't called yet, so `ptr` is valid.
+ // Additionally, the callback contract guarantees that callbacks are
+ // serialised, so it is ok to mutably reference it.
+ let mut data = unsafe {
+ <<T::Context as Context<T>>::Data as PointerWrapper>::borrow_mut(ptr)
+ };
+ let fs_data = T::Context::tree_key(&mut data)?;
+ let fs_data_ptr = fs_data.into_pointer();
+
+ // `get_tree_keyed` reassigns `ctx.s_fs_info`, which should be ok because
+ // nowhere else is it assigned a non-null value. However, we add the assert
+ // below to ensure that there are no unexpected paths on the C side that may do
+ // this.
+ assert_eq!(ctx.s_fs_info, core::ptr::null_mut());
+
+ // SAFETY: `fc` is valid per the callback contract. `fill_super_callback` also
+ // has the right type and is a valid callback. Lastly, we just called
+ // `into_pointer` above, so `fs_data_ptr` is also valid.
+ to_result(unsafe {
+ bindings::get_tree_keyed(
+ fc,
+ Some(Self::fill_super_callback),
+ fs_data_ptr as _,
+ )
+ })?;
+ Ok(0)
+ }
+ }
+ }
+ }
+
+ unsafe extern "C" fn reconfigure_callback(_fc: *mut bindings::fs_context) -> core::ffi::c_int {
+ EINVAL.to_kernel_errno()
+ }
+
+ unsafe extern "C" fn parse_monolithic_callback(
+ fc: *mut bindings::fs_context,
+ buf: *mut core::ffi::c_void,
+ ) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: The callback contract guarantees that `fc` is valid.
+ let ptr = unsafe { (*fc).fs_private };
+
+ // SAFETY: The value of `ptr` (coming from `fs_private` was initialised in
+ // `init_fs_context_callback` to the result of an `into_pointer` call. Since the
+ // context is valid, `from_pointer` wasn't called yet, so `ptr` is valid. Additionally,
+ // the callback contract guarantees that callbacks are serialised, so it is ok to
+ // mutably reference it.
+ let mut data =
+ unsafe { <<T::Context as Context<T>>::Data as PointerWrapper>::borrow_mut(ptr) };
+ let page = if buf.is_null() {
+ None
+ } else {
+ // SAFETY: This callback is called to handle the `mount` syscall, which takes a
+ // page-sized buffer as data.
+ Some(unsafe { &mut *ptr::slice_from_raw_parts_mut(buf.cast(), crate::PAGE_SIZE) })
+ };
+ let regular = T::Context::parse_monolithic(&mut data, page)?;
+ if let Some(buf) = regular {
+ // SAFETY: Both `fc` and `buf` are guaranteed to be valid; the former because the
+ // callback is still ongoing and the latter because its lifefime is tied to that of
+ // `page`, which is also valid for the duration of the callback.
+ to_result(unsafe {
+ bindings::generic_parse_monolithic(fc, buf.as_mut_ptr().cast())
+ })?;
+ }
+ Ok(0)
+ }
+ }
+
+ const SUPER_BLOCK: bindings::super_operations = bindings::super_operations {
+ alloc_inode: None,
+ destroy_inode: None,
+ free_inode: None,
+ dirty_inode: None,
+ write_inode: None,
+ drop_inode: None,
+ evict_inode: None,
+ put_super: None,
+ sync_fs: None,
+ freeze_super: None,
+ freeze_fs: None,
+ thaw_super: None,
+ unfreeze_fs: None,
+ statfs: None,
+ remount_fs: None,
+ umount_begin: None,
+ show_options: None,
+ show_devname: None,
+ show_path: None,
+ show_stats: None,
+ #[cfg(CONFIG_QUOTA)]
+ quota_read: None,
+ #[cfg(CONFIG_QUOTA)]
+ quota_write: None,
+ #[cfg(CONFIG_QUOTA)]
+ get_dquots: None,
+ nr_cached_objects: None,
+ free_cached_objects: None,
+ };
+}
+
+/// A file system type.
+pub trait Type {
+ /// The context used to build fs configuration before it is mounted or reconfigured.
+ type Context: Context<Self> + ?Sized;
+
+ /// Data associated with each file system instance.
+ type Data: PointerWrapper + Send + Sync = ();
+
+ /// Determines how superblocks for this file system type are keyed.
+ const SUPER_TYPE: Super;
+
+ /// The name of the file system type.
+ const NAME: &'static CStr;
+
+ /// The flags of this file system type.
+ ///
+ /// It is a combination of the flags in the [`flags`] module.
+ const FLAGS: i32;
+
+ /// Initialises a super block for this file system type.
+ fn fill_super(
+ data: <Self::Context as Context<Self>>::Data,
+ sb: NewSuperBlock<'_, Self>,
+ ) -> Result<&SuperBlock<Self>>;
+}
+
+/// File system flags.
+pub mod flags {
+ use crate::bindings;
+
+ /// The file system requires a device.
+ pub const REQUIRES_DEV: i32 = bindings::FS_REQUIRES_DEV as _;
+
+ /// The options provided when mounting are in binary form.
+ pub const BINARY_MOUNTDATA: i32 = bindings::FS_BINARY_MOUNTDATA as _;
+
+ /// The file system has a subtype. It is extracted from the name and passed in as a parameter.
+ pub const HAS_SUBTYPE: i32 = bindings::FS_HAS_SUBTYPE as _;
+
+ /// The file system can be mounted by userns root.
+ pub const USERNS_MOUNT: i32 = bindings::FS_USERNS_MOUNT as _;
+
+ /// Disables fanotify permission events.
+ pub const DISALLOW_NOTIFY_PERM: i32 = bindings::FS_DISALLOW_NOTIFY_PERM as _;
+
+ /// The file system has been updated to handle vfs idmappings.
+ pub const ALLOW_IDMAP: i32 = bindings::FS_ALLOW_IDMAP as _;
+
+ /// The file systen will handle `d_move` during `rename` internally.
+ pub const RENAME_DOES_D_MOVE: i32 = bindings::FS_RENAME_DOES_D_MOVE as _;
+}
+
+/// A file system registration.
+#[derive(Default)]
+pub struct Registration {
+ is_registered: bool,
+ fs: UnsafeCell<bindings::file_system_type>,
+ _pin: PhantomPinned,
+}
+
+// SAFETY: `Registration` doesn't really provide any `&self` methods, so it is safe to pass
+// references to it around.
+unsafe impl Sync for Registration {}
+
+// SAFETY: Both registration and unregistration are implemented in C and safe to be performed from
+// any thread, so `Registration` is `Send`.
+unsafe impl Send for Registration {}
+
+impl Registration {
+ /// Creates a new file system registration.
+ ///
+ /// It is not visible or accessible yet. A successful call to [`Registration::register`] needs
+ /// to be made before users can mount it.
+ pub fn new() -> Self {
+ Self {
+ is_registered: false,
+ fs: UnsafeCell::new(bindings::file_system_type::default()),
+ _pin: PhantomPinned,
+ }
+ }
+
+ /// Registers a file system so that it can be mounted by users.
+ ///
+ /// The file system is described by the [`Type`] argument.
+ ///
+ /// It is automatically unregistered when the registration is dropped.
+ pub fn register<T: Type + ?Sized>(self: Pin<&mut Self>, module: &'static ThisModule) -> Result {
+ // SAFETY: We never move out of `this`.
+ let this = unsafe { self.get_unchecked_mut() };
+
+ if this.is_registered {
+ return Err(EINVAL);
+ }
+
+ let mut fs = this.fs.get_mut();
+ fs.owner = module.0;
+ fs.name = T::NAME.as_char_ptr();
+ fs.fs_flags = T::FLAGS;
+ fs.parameters = T::Context::PARAMS.first;
+ fs.init_fs_context = Some(Self::init_fs_context_callback::<T>);
+ fs.kill_sb = Some(Self::kill_sb_callback::<T>);
+
+ // SAFETY: This block registers all fs type keys with lockdep. We just need the memory
+ // locations to be owned by the caller, which is the case.
+ unsafe {
+ bindings::lockdep_register_key(&mut fs.s_lock_key);
+ bindings::lockdep_register_key(&mut fs.s_umount_key);
+ bindings::lockdep_register_key(&mut fs.s_vfs_rename_key);
+ bindings::lockdep_register_key(&mut fs.i_lock_key);
+ bindings::lockdep_register_key(&mut fs.i_mutex_key);
+ bindings::lockdep_register_key(&mut fs.invalidate_lock_key);
+ bindings::lockdep_register_key(&mut fs.i_mutex_dir_key);
+ for key in &mut fs.s_writers_key {
+ bindings::lockdep_register_key(key);
+ }
+ }
+
+ let ptr = this.fs.get();
+
+ // SAFETY: `ptr` as valid as it points to the `self.fs`.
+ let key_guard = ScopeGuard::new(|| unsafe { Self::unregister_keys(ptr) });
+
+ // SAFETY: Pointers stored in `fs` are either static so will live for as long as the
+ // registration is active (it is undone in `drop`).
+ to_result(unsafe { bindings::register_filesystem(ptr) })?;
+ key_guard.dismiss();
+ this.is_registered = true;
+ Ok(())
+ }
+
+ /// Unregisters the lockdep keys in the file system type.
+ ///
+ /// # Safety
+ ///
+ /// `fs` must be non-null and valid.
+ unsafe fn unregister_keys(fs: *mut bindings::file_system_type) {
+ // SAFETY: This block unregisters all fs type keys from lockdep. They must have been
+ // registered before.
+ unsafe {
+ bindings::lockdep_unregister_key(ptr::addr_of_mut!((*fs).s_lock_key));
+ bindings::lockdep_unregister_key(ptr::addr_of_mut!((*fs).s_umount_key));
+ bindings::lockdep_unregister_key(ptr::addr_of_mut!((*fs).s_vfs_rename_key));
+ bindings::lockdep_unregister_key(ptr::addr_of_mut!((*fs).i_lock_key));
+ bindings::lockdep_unregister_key(ptr::addr_of_mut!((*fs).i_mutex_key));
+ bindings::lockdep_unregister_key(ptr::addr_of_mut!((*fs).invalidate_lock_key));
+ bindings::lockdep_unregister_key(ptr::addr_of_mut!((*fs).i_mutex_dir_key));
+ for i in 0..(*fs).s_writers_key.len() {
+ bindings::lockdep_unregister_key(ptr::addr_of_mut!((*fs).s_writers_key[i]));
+ }
+ }
+ }
+
+ unsafe extern "C" fn init_fs_context_callback<T: Type + ?Sized>(
+ fc_ptr: *mut bindings::fs_context,
+ ) -> core::ffi::c_int {
+ from_kernel_result! {
+ let data = T::Context::try_new()?;
+ // SAFETY: The callback contract guarantees that `fc_ptr` is the only pointer to a
+ // newly-allocated fs context, so it is safe to mutably reference it.
+ let fc = unsafe { &mut *fc_ptr };
+ fc.fs_private = data.into_pointer() as _;
+ fc.ops = &Tables::<T>::CONTEXT;
+ Ok(0)
+ }
+ }
+
+ unsafe extern "C" fn kill_sb_callback<T: Type + ?Sized>(sb_ptr: *mut bindings::super_block) {
+ if let Super::BlockDev = T::SUPER_TYPE {
+ // SAFETY: When the superblock type is `BlockDev`, we have a block device so it's safe
+ // to call `kill_block_super`. Additionally, the callback contract guarantees that
+ // `sb_ptr` is valid.
+ unsafe { bindings::kill_block_super(sb_ptr) }
+ } else {
+ // SAFETY: We always call a `get_tree_nodev` variant from `get_tree_callback` without a
+ // device when `T::SUPER_TYPE` is not `BlockDev`, so we never have a device in such
+ // cases, therefore it is ok to call the function below. Additionally, the callback
+ // contract guarantees that `sb_ptr` is valid.
+ unsafe { bindings::kill_anon_super(sb_ptr) }
+ }
+
+ // SAFETY: The callback contract guarantees that `sb_ptr` is valid.
+ let sb = unsafe { &*sb_ptr };
+
+ // SAFETY: The `kill_sb` callback being called implies that the `s_type` field is valid.
+ unsafe { Self::unregister_keys(sb.s_type) };
+
+ let ptr = sb.s_fs_info;
+ if !ptr.is_null() {
+ // SAFETY: The only place where `s_fs_info` is assigned is `NewSuperBlock::init`, where
+ // it's initialised with the result of a `to_pointer` call. We checked above that ptr
+ // is non-null because it would be null if we never reached the point where we init the
+ // field.
+ unsafe { T::Data::from_pointer(ptr) };
+ }
+ }
+}
+
+impl Drop for Registration {
+ fn drop(&mut self) {
+ if self.is_registered {
+ // SAFETY: When `is_registered` is `true`, a previous call to `register_filesystem` has
+ // succeeded, so it is safe to unregister here.
+ unsafe { bindings::unregister_filesystem(self.fs.get()) };
+ }
+ }
+}
+
+/// State of [`NewSuperBlock`] that indicates that [`NewSuperBlock::init`] needs to be called
+/// eventually.
+pub struct NeedsInit;
+
+/// State of [`NewSuperBlock`] that indicates that [`NewSuperBlock::init_root`] needs to be called
+/// eventually.
+pub struct NeedsRoot;
+
+/// Required superblock parameters.
+///
+/// This is used in [`NewSuperBlock::init`].
+pub struct SuperParams {
+ /// The magic number of the superblock.
+ pub magic: u32,
+
+ /// The size of a block in powers of 2 (i.e., for a value of `n`, the size is `2^n`.
+ pub blocksize_bits: u8,
+
+ /// Maximum size of a file.
+ pub maxbytes: i64,
+
+ /// Granularity of c/m/atime in ns (cannot be worse than a second).
+ pub time_gran: u32,
+}
+
+impl SuperParams {
+ /// Default value for instances of [`SuperParams`].
+ pub const DEFAULT: Self = Self {
+ magic: 0,
+ blocksize_bits: crate::PAGE_SIZE as _,
+ maxbytes: bindings::MAX_LFS_FILESIZE,
+ time_gran: 1,
+ };
+}
+
+/// A superblock that is still being initialised.
+///
+/// It uses type states to ensure that callers use the right sequence of calls.
+///
+/// # Invariants
+///
+/// The superblock is a newly-created one and this is the only active pointer to it.
+pub struct NewSuperBlock<'a, T: Type + ?Sized, S = NeedsInit> {
+ sb: *mut bindings::super_block,
+ _p: PhantomData<(&'a T, S)>,
+}
+
+impl<'a, T: Type + ?Sized> NewSuperBlock<'a, T, NeedsInit> {
+ /// Creates a new instance of [`NewSuperBlock`].
+ ///
+ /// # Safety
+ ///
+ /// `sb` must point to a newly-created superblock and it must be the only active pointer to it.
+ unsafe fn new(sb: *mut bindings::super_block) -> Self {
+ // INVARIANT: The invariants are satisfied by the safety requirements of this function.
+ Self {
+ sb,
+ _p: PhantomData,
+ }
+ }
+
+ /// Initialises the superblock so that it transitions to the [`NeedsRoot`] type state.
+ pub fn init(
+ self,
+ data: T::Data,
+ params: &SuperParams,
+ ) -> Result<NewSuperBlock<'a, T, NeedsRoot>> {
+ // SAFETY: The type invariant guarantees that `self.sb` is the only pointer to a
+ // newly-allocated superblock, so it is safe to mutably reference it.
+ let sb = unsafe { &mut *self.sb };
+
+ sb.s_magic = params.magic as _;
+ sb.s_op = &Tables::<T>::SUPER_BLOCK;
+ sb.s_maxbytes = params.maxbytes;
+ sb.s_time_gran = params.time_gran;
+ sb.s_blocksize_bits = params.blocksize_bits;
+ sb.s_blocksize = 1;
+ if sb.s_blocksize.leading_zeros() < params.blocksize_bits.into() {
+ return Err(EINVAL);
+ }
+ sb.s_blocksize = 1 << sb.s_blocksize_bits;
+
+ // Keyed file systems already have `s_fs_info` initialised.
+ let info = data.into_pointer() as *mut _;
+ if let Super::Keyed = T::SUPER_TYPE {
+ // SAFETY: We just called `into_pointer` above.
+ unsafe { T::Data::from_pointer(info) };
+
+ if sb.s_fs_info != info {
+ return Err(EINVAL);
+ }
+ } else {
+ sb.s_fs_info = info;
+ }
+
+ Ok(NewSuperBlock {
+ sb: self.sb,
+ _p: PhantomData,
+ })
+ }
+}
+
+impl<'a, T: Type + ?Sized> NewSuperBlock<'a, T, NeedsRoot> {
+ /// Initialises the root of the superblock.
+ pub fn init_root(self) -> Result<&'a SuperBlock<T>> {
+ // The following is temporary code to create the root inode and dentry. It will be replaced
+ // once we allow inodes and dentries to be created directly from Rust code.
+
+ // SAFETY: `sb` is initialised (`NeedsRoot` typestate implies it), so it is safe to pass it
+ // to `new_inode`.
+ let inode = unsafe { bindings::new_inode(self.sb) };
+ if inode.is_null() {
+ return Err(ENOMEM);
+ }
+
+ {
+ // SAFETY: This is a newly-created inode. No other references to it exist, so it is
+ // safe to mutably dereference it.
+ let inode = unsafe { &mut *inode };
+
+ // SAFETY: `current_time` requires that `inode.sb` be valid, which is the case here
+ // since we allocated the inode through the superblock.
+ let time = unsafe { bindings::current_time(inode) };
+ inode.i_ino = 1;
+ inode.i_mode = (bindings::S_IFDIR | 0o755) as _;
+ inode.i_mtime = time;
+ inode.i_atime = time;
+ inode.i_ctime = time;
+
+ // SAFETY: `simple_dir_operations` never changes, it's safe to reference it.
+ inode.__bindgen_anon_3.i_fop = unsafe { &bindings::simple_dir_operations };
+
+ // SAFETY: `simple_dir_inode_operations` never changes, it's safe to reference it.
+ inode.i_op = unsafe { &bindings::simple_dir_inode_operations };
+
+ // SAFETY: `inode` is valid for write.
+ unsafe { bindings::set_nlink(inode, 2) };
+ }
+
+ // SAFETY: `d_make_root` requires that `inode` be valid and referenced, which is the
+ // case for this call.
+ //
+ // It takes over the inode, even on failure, so we don't need to clean it up.
+ let dentry = unsafe { bindings::d_make_root(inode) };
+ if dentry.is_null() {
+ return Err(ENOMEM);
+ }
+
+ // SAFETY: The typestate guarantees that `self.sb` is valid.
+ unsafe { (*self.sb).s_root = dentry };
+
+ // SAFETY: The typestate guarantees that `self.sb` is initialised and we just finished
+ // setting its root, so it's a fully ready superblock.
+ Ok(unsafe { &mut *self.sb.cast() })
+ }
+}
+
+/// A file system super block.
+///
+/// Wraps the kernel's `struct super_block`.
+#[repr(transparent)]
+pub struct SuperBlock<T: Type + ?Sized>(
+ pub(crate) UnsafeCell<bindings::super_block>,
+ PhantomData<T>,
+);
+
+/// Wraps the kernel's `struct inode`.
+///
+/// # Invariants
+///
+/// Instances of this type are always ref-counted, that is, a call to `ihold` ensures that the
+/// allocation remains valid at least until the matching call to `iput`.
+#[repr(transparent)]
+pub struct INode(pub(crate) UnsafeCell<bindings::inode>);
+
+// SAFETY: The type invariants guarantee that `INode` is always ref-counted.
+unsafe impl AlwaysRefCounted for INode {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference means that the refcount is nonzero.
+ unsafe { bindings::ihold(self.0.get()) };
+ }
+
+ unsafe fn dec_ref(obj: ptr::NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is nonzero.
+ unsafe { bindings::iput(obj.cast().as_ptr()) }
+ }
+}
+
+/// Wraps the kernel's `struct dentry`.
+///
+/// # Invariants
+///
+/// Instances of this type are always ref-counted, that is, a call to `dget` ensures that the
+/// allocation remains valid at least until the matching call to `dput`.
+#[repr(transparent)]
+pub struct DEntry(pub(crate) UnsafeCell<bindings::dentry>);
+
+// SAFETY: The type invariants guarantee that `DEntry` is always ref-counted.
+unsafe impl AlwaysRefCounted for DEntry {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference means that the refcount is nonzero.
+ unsafe { bindings::dget(self.0.get()) };
+ }
+
+ unsafe fn dec_ref(obj: ptr::NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is nonzero.
+ unsafe { bindings::dput(obj.cast().as_ptr()) }
+ }
+}
+
+/// Wraps the kernel's `struct filename`.
+#[repr(transparent)]
+pub struct Filename(pub(crate) UnsafeCell<bindings::filename>);
+
+impl Filename {
+ /// Creates a reference to a [`Filename`] from a valid pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `ptr` is valid and remains valid for the lifetime of the
+ /// returned [`Filename`] instance.
+ pub(crate) unsafe fn from_ptr<'a>(ptr: *const bindings::filename) -> &'a Filename {
+ // SAFETY: The safety requirements guarantee the validity of the dereference, while the
+ // `Filename` type being transparent makes the cast ok.
+ unsafe { &*ptr.cast() }
+ }
+}
+
+/// Kernel module that exposes a single file system implemented by `T`.
+pub struct Module<T: Type> {
+ _fs: Pin<Box<Registration>>,
+ _p: PhantomData<T>,
+}
+
+impl<T: Type + Sync> crate::Module for Module<T> {
+ fn init(_name: &'static CStr, module: &'static ThisModule) -> Result<Self> {
+ let mut reg = Pin::from(Box::try_new(Registration::new())?);
+ reg.as_mut().register::<T>(module)?;
+ Ok(Self {
+ _fs: reg,
+ _p: PhantomData,
+ })
+ }
+}
+
+/// Declares a kernel module that exposes a single file system.
+///
+/// The `type` argument must be a type which implements the [`Type`] trait. Also accepts various
+/// forms of kernel metadata.
+///
+/// # Examples
+///
+/// ```ignore
+/// use kernel::prelude::*;
+/// use kernel::{c_str, fs};
+///
+/// module_fs! {
+/// type: MyFs,
+/// name: b"my_fs_kernel_module",
+/// author: b"Rust for Linux Contributors",
+/// description: b"My very own file system kernel module!",
+/// license: b"GPL",
+/// }
+///
+/// struct MyFs;
+///
+/// #[vtable]
+/// impl fs::Context<Self> for MyFs {
+/// type Data = ();
+/// fn try_new() -> Result {
+/// Ok(())
+/// }
+/// }
+///
+/// impl fs::Type for MyFs {
+/// type Context = Self;
+/// const SUPER_TYPE: fs::Super = fs::Super::Independent;
+/// const NAME: &'static CStr = c_str!("example");
+/// const FLAGS: i32 = 0;
+///
+/// fn fill_super(_data: (), sb: fs::NewSuperBlock<'_, Self>) -> Result<&fs::SuperBlock<Self>> {
+/// let sb = sb.init(
+/// (),
+/// &fs::SuperParams {
+/// magic: 0x6578616d,
+/// ..fs::SuperParams::DEFAULT
+/// },
+/// )?;
+/// let sb = sb.init_root()?;
+/// Ok(sb)
+/// }
+/// }
+/// ```
+#[macro_export]
+macro_rules! module_fs {
+ (type: $type:ty, $($f:tt)*) => {
+ type ModuleType = $crate::fs::Module<$type>;
+ $crate::macros::module! {
+ type: ModuleType,
+ $($f)*
+ }
+ }
+}
diff --git a/rust/kernel/fs/param.rs b/rust/kernel/fs/param.rs
new file mode 100644
index 000000000000..445cea404bcd
--- /dev/null
+++ b/rust/kernel/fs/param.rs
@@ -0,0 +1,553 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! File system parameters and parsing them.
+//!
+//! C headers: [`include/linux/fs_parser.h`](../../../../../include/linux/fs_parser.h)
+
+use crate::{bindings, file, fs, str::CStr, Result};
+use core::{marker::PhantomData, ptr};
+
+/// The value of a file system parameter.
+pub enum Value<'a> {
+ /// The value is undefined.
+ Undefined,
+
+ /// There is no value, but parameter itself is a flag.
+ Flag,
+
+ /// The value is the given string.
+ String(&'a CStr),
+
+ /// The value is the given binary blob.
+ Blob(&'a mut [u8]),
+
+ /// The value is the given file.
+ File(&'a file::File),
+
+ /// The value is the given filename and the given directory file descriptor (which may be
+ /// `AT_FDCWD`, to indicate the current directory).
+ Filename(&'a fs::Filename, i32),
+}
+
+impl<'a> Value<'a> {
+ pub(super) fn from_fs_parameter(p: &'a bindings::fs_parameter) -> Self {
+ match p.type_() {
+ bindings::fs_value_type_fs_value_is_string => {
+ // SAFETY: `type_` is string, so it is ok to use the union field. Additionally, it
+ // is guaranteed to be valid while `p` is valid.
+ Self::String(unsafe { CStr::from_char_ptr(p.__bindgen_anon_1.string) })
+ }
+ bindings::fs_value_type_fs_value_is_flag => Self::Flag,
+ bindings::fs_value_type_fs_value_is_blob => {
+ // SAFETY: `type_` is blob, so it is ok to use the union field and size.
+ // Additionally, it is guaranteed to be valid while `p` is valid.
+ let slice = unsafe {
+ &mut *ptr::slice_from_raw_parts_mut(p.__bindgen_anon_1.blob.cast(), p.size)
+ };
+ Self::Blob(slice)
+ }
+ bindings::fs_value_type_fs_value_is_file => {
+ // SAFETY: `type_` is file, so it is ok to use the union field. Additionally, it is
+ // guaranteed to be valid while `p` is valid.
+ let file_ptr = unsafe { p.__bindgen_anon_1.file };
+ if file_ptr.is_null() {
+ Self::Undefined
+ } else {
+ // SAFETY: `file_ptr` is non-null and guaranteed to be valid while `p` is.
+ Self::File(unsafe { file::File::from_ptr(file_ptr) })
+ }
+ }
+ bindings::fs_value_type_fs_value_is_filename => {
+ // SAFETY: `type_` is filename, so it is ok to use the union field. Additionally,
+ // it is guaranteed to be valid while `p` is valid.
+ let filename_ptr = unsafe { p.__bindgen_anon_1.name };
+ if filename_ptr.is_null() {
+ Self::Undefined
+ } else {
+ // SAFETY: `filename_ptr` is non-null and guaranteed to be valid while `p` is.
+ Self::Filename(unsafe { fs::Filename::from_ptr(filename_ptr) }, p.dirfd)
+ }
+ }
+ _ => Self::Undefined,
+ }
+ }
+}
+
+/// A specification of a file system parameter.
+pub struct Spec {
+ name: &'static CStr,
+ flags: u16,
+ type_: bindings::fs_param_type,
+ extra: *const core::ffi::c_void,
+}
+
+const DEFAULT: Spec = Spec {
+ name: crate::c_str!(""),
+ flags: 0,
+ type_: None,
+ extra: core::ptr::null(),
+};
+
+macro_rules! define_param_type {
+ ($name:ident, $fntype:ty, $spec:expr, |$param:ident, $result:ident| $value:expr) => {
+ /// Module to support `$name` parameter types.
+ pub mod $name {
+ use super::*;
+
+ #[doc(hidden)]
+ pub const fn spec(name: &'static CStr) -> Spec {
+ const GIVEN: Spec = $spec;
+ Spec { name, ..GIVEN }
+ }
+
+ #[doc(hidden)]
+ pub const fn handler<S>(setfn: fn(&mut S, $fntype) -> Result) -> impl Handler<S> {
+ let c =
+ move |s: &mut S,
+ $param: &bindings::fs_parameter,
+ $result: &bindings::fs_parse_result| { setfn(s, $value) };
+ ConcreteHandler {
+ setfn: c,
+ _p: PhantomData,
+ }
+ }
+ }
+ };
+}
+
+// SAFETY: This is only called when the parse result is a boolean, so it is ok to access to union
+// field.
+define_param_type!(flag, bool, Spec { ..DEFAULT }, |_p, r| unsafe {
+ r.__bindgen_anon_1.boolean
+});
+
+define_param_type!(
+ flag_no,
+ bool,
+ Spec {
+ flags: bindings::fs_param_neg_with_no as _,
+ ..DEFAULT
+ },
+ // SAFETY: This is only called when the parse result is a boolean, so it is ok to access to
+ // union field.
+ |_p, r| unsafe { r.__bindgen_anon_1.boolean }
+);
+
+define_param_type!(
+ bool,
+ bool,
+ Spec {
+ type_: Some(bindings::fs_param_is_bool),
+ ..DEFAULT
+ },
+ // SAFETY: This is only called when the parse result is a boolean, so it is ok to access to
+ // union field.
+ |_p, r| unsafe { r.__bindgen_anon_1.boolean }
+);
+
+define_param_type!(
+ u32,
+ u32,
+ Spec {
+ type_: Some(bindings::fs_param_is_u32),
+ ..DEFAULT
+ },
+ // SAFETY: This is only called when the parse result is a u32, so it is ok to access to union
+ // field.
+ |_p, r| unsafe { r.__bindgen_anon_1.uint_32 }
+);
+
+define_param_type!(
+ u32oct,
+ u32,
+ Spec {
+ type_: Some(bindings::fs_param_is_u32),
+ extra: 8 as _,
+ ..DEFAULT
+ },
+ // SAFETY: This is only called when the parse result is a u32, so it is ok to access to union
+ // field.
+ |_p, r| unsafe { r.__bindgen_anon_1.uint_32 }
+);
+
+define_param_type!(
+ u32hex,
+ u32,
+ Spec {
+ type_: Some(bindings::fs_param_is_u32),
+ extra: 16 as _,
+ ..DEFAULT
+ },
+ // SAFETY: This is only called when the parse result is a u32, so it is ok to access to union
+ // field.
+ |_p, r| unsafe { r.__bindgen_anon_1.uint_32 }
+);
+
+define_param_type!(
+ s32,
+ i32,
+ Spec {
+ type_: Some(bindings::fs_param_is_s32),
+ ..DEFAULT
+ },
+ // SAFETY: This is only called when the parse result is an i32, so it is ok to access to union
+ // field.
+ |_p, r| unsafe { r.__bindgen_anon_1.int_32 }
+);
+
+define_param_type!(
+ u64,
+ u64,
+ Spec {
+ type_: Some(bindings::fs_param_is_u64),
+ extra: 16 as _,
+ ..DEFAULT
+ },
+ // SAFETY: This is only called when the parse result is a u32, so it is ok to access to union
+ // field.
+ |_p, r| unsafe { r.__bindgen_anon_1.uint_64 }
+);
+
+define_param_type!(
+ string,
+ &CStr,
+ Spec {
+ type_: Some(bindings::fs_param_is_string),
+ ..DEFAULT
+ },
+ // SAFETY: This is only called when the parse result is a string, so it is ok to access to
+ // union field.
+ |p, _r| unsafe { CStr::from_char_ptr(p.__bindgen_anon_1.string) }
+);
+
+/// Module to support `enum` parameter types.
+pub mod enum_ {
+ use super::*;
+
+ #[doc(hidden)]
+ pub const fn spec(name: &'static CStr, options: ConstantTable<'static>) -> Spec {
+ Spec {
+ name,
+ type_: Some(bindings::fs_param_is_enum),
+ extra: options.first as *const _ as _,
+ ..DEFAULT
+ }
+ }
+
+ #[doc(hidden)]
+ pub const fn handler<S>(setfn: fn(&mut S, u32) -> Result) -> impl Handler<S> {
+ let c = move |s: &mut S, _p: &bindings::fs_parameter, r: &bindings::fs_parse_result| {
+ // SAFETY: This is only called when the parse result is an enum, so it is ok to access
+ // to union field.
+ setfn(s, unsafe { r.__bindgen_anon_1.uint_32 })
+ };
+ ConcreteHandler {
+ setfn: c,
+ _p: PhantomData,
+ }
+ }
+}
+
+const ZERO_SPEC: bindings::fs_parameter_spec = bindings::fs_parameter_spec {
+ name: core::ptr::null(),
+ type_: None,
+ opt: 0,
+ flags: 0,
+ data: core::ptr::null(),
+};
+
+/// A zero-terminated parameter spec array, followed by handlers.
+#[repr(C)]
+pub struct SpecArray<const N: usize, S: 'static> {
+ specs: [bindings::fs_parameter_spec; N],
+ sentinel: bindings::fs_parameter_spec,
+ handlers: [&'static dyn Handler<S>; N],
+}
+
+impl<const N: usize, S: 'static> SpecArray<N, S> {
+ /// Creates a new spec array.
+ ///
+ /// Users are encouraged to use the [`define_fs_params`] macro to define the
+ /// [`super::Context::PARAMS`] constant.
+ ///
+ /// # Safety
+ ///
+ /// The type of the elements in `handlers` must be compatible with the types in specs. For
+ /// example, if `specs` declares that the i-th element is a bool then the i-th handler
+ /// should be for a bool.
+ pub const unsafe fn new(specs: [Spec; N], handlers: [&'static dyn Handler<S>; N]) -> Self {
+ let mut array = Self {
+ specs: [ZERO_SPEC; N],
+ sentinel: ZERO_SPEC,
+ handlers,
+ };
+ let mut i = 0usize;
+ while i < N {
+ array.specs[i] = bindings::fs_parameter_spec {
+ name: specs[i].name.as_char_ptr(),
+ type_: specs[i].type_,
+ opt: i as _,
+ flags: specs[i].flags,
+ data: specs[i].extra,
+ };
+ i += 1;
+ }
+ array
+ }
+
+ /// Returns a [`SpecTable`] backed by `self`.
+ ///
+ /// This is used to essentially erase the array size.
+ pub const fn as_table(&self) -> SpecTable<'_, S> {
+ SpecTable {
+ first: &self.specs[0],
+ handlers: &self.handlers,
+ _p: PhantomData,
+ }
+ }
+}
+
+/// A parameter spec table.
+///
+/// The table is guaranteed to be zero-terminated.
+///
+/// Users are encouraged to use the [`define_fs_params`] macro to define the
+/// [`super::Context::PARAMS`] constant.
+pub struct SpecTable<'a, S: 'static> {
+ pub(super) first: &'a bindings::fs_parameter_spec,
+ pub(super) handlers: &'a [&'static dyn Handler<S>],
+ _p: PhantomData<S>,
+}
+
+impl<S> SpecTable<'static, S> {
+ pub(super) const fn empty() -> Self {
+ Self {
+ first: &ZERO_SPEC,
+ handlers: &[],
+ _p: PhantomData,
+ }
+ }
+}
+
+/// A zero-terminated parameter constant array.
+#[repr(C)]
+pub struct ConstantArray<const N: usize> {
+ consts: [bindings::constant_table; N],
+ sentinel: bindings::constant_table,
+}
+
+impl<const N: usize> ConstantArray<N> {
+ /// Creates a new constant array.
+ ///
+ /// Users are encouraged to use the [`define_fs_params`] macro to define the
+ /// [`super::Context::PARAMS`] constant.
+ pub const fn new(consts: [(&'static CStr, u32); N]) -> Self {
+ const ZERO: bindings::constant_table = bindings::constant_table {
+ name: core::ptr::null(),
+ value: 0,
+ };
+ let mut array = Self {
+ consts: [ZERO; N],
+ sentinel: ZERO,
+ };
+ let mut i = 0usize;
+ while i < N {
+ array.consts[i] = bindings::constant_table {
+ name: consts[i].0.as_char_ptr(),
+ value: consts[i].1 as _,
+ };
+ i += 1;
+ }
+ array
+ }
+
+ /// Returns a [`ConstantTable`] backed by `self`.
+ ///
+ /// This is used to essentially erase the array size.
+ pub const fn as_table(&self) -> ConstantTable<'_> {
+ ConstantTable {
+ first: &self.consts[0],
+ }
+ }
+}
+
+/// A parameter constant table.
+///
+/// The table is guaranteed to be zero-terminated.
+pub struct ConstantTable<'a> {
+ pub(super) first: &'a bindings::constant_table,
+}
+
+#[doc(hidden)]
+pub trait Handler<S> {
+ fn handle_param(
+ &self,
+ state: &mut S,
+ p: &bindings::fs_parameter,
+ r: &bindings::fs_parse_result,
+ ) -> Result;
+}
+
+struct ConcreteHandler<
+ S,
+ T: Fn(&mut S, &bindings::fs_parameter, &bindings::fs_parse_result) -> Result,
+> {
+ setfn: T,
+ _p: PhantomData<S>,
+}
+
+impl<S, T: Fn(&mut S, &bindings::fs_parameter, &bindings::fs_parse_result) -> Result> Handler<S>
+ for ConcreteHandler<S, T>
+{
+ fn handle_param(
+ &self,
+ state: &mut S,
+ p: &bindings::fs_parameter,
+ r: &bindings::fs_parse_result,
+ ) -> Result {
+ (self.setfn)(state, p, r)
+ }
+}
+
+/// Counts the number of comma-separated entries surrounded by braces.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::count_brace_items;
+///
+/// assert_eq!(0, count_brace_items!());
+/// assert_eq!(1, count_brace_items!({ A }));
+/// assert_eq!(1, count_brace_items!({ A },));
+/// assert_eq!(2, count_brace_items!({ A }, { B }));
+/// assert_eq!(2, count_brace_items!({ A }, { B },));
+/// assert_eq!(3, count_brace_items!({ A }, { B }, { C }));
+/// assert_eq!(3, count_brace_items!({ A }, { B }, { C },));
+/// ```
+#[macro_export]
+macro_rules! count_brace_items {
+ ({$($item:tt)*}, $($remaining:tt)*) => { 1 + $crate::count_brace_items!($($remaining)*) };
+ ({$($item:tt)*}) => { 1 };
+ () => { 0 };
+}
+
+/// Defines the file system parameters of a given file system context.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// # use kernel::{c_str, fs, str::CString};
+///
+/// #[derive(Default)]
+/// struct State {
+/// flag: Option<bool>,
+/// flag_no: Option<bool>,
+/// bool_value: Option<bool>,
+/// u32_value: Option<u32>,
+/// i32_value: Option<i32>,
+/// u64_value: Option<u64>,
+/// str_value: Option<CString>,
+/// enum_value: Option<u32>,
+/// }
+///
+/// fn set_u32(s: &mut Box<State>, v: u32) -> Result {
+/// s.u32_value = Some(v);
+/// Ok(())
+/// }
+///
+/// struct Example;
+///
+/// #[vtable]
+/// impl fs::Context<Self> for Example {
+/// type Data = Box<State>;
+///
+/// kernel::define_fs_params! {Box<State>,
+/// {flag, "flag", |s, v| { s.flag = Some(v); Ok(()) } },
+/// {flag_no, "flagno", |s, v| { s.flag_no = Some(v); Ok(()) } },
+/// {bool, "bool", |s, v| { s.bool_value = Some(v); Ok(()) } },
+/// {u32, "u32", set_u32 },
+/// {u32oct, "u32oct", set_u32 },
+/// {u32hex, "u32hex", set_u32 },
+/// {s32, "s32", |s, v| { s.i32_value = Some(v); Ok(()) } },
+/// {u64, "u64", |s, v| { s.u64_value = Some(v); Ok(()) } },
+/// {string, "string", |s, v| {
+/// s.str_value = Some(CString::try_from_fmt(fmt!("{v}"))?);
+/// Ok(())
+/// }},
+/// {enum, "enum", [("first", 10), ("second", 20)], |s, v| {
+/// s.enum_value = Some(v);
+/// Ok(())
+/// }},
+/// }
+///
+/// fn try_new() -> Result<Self::Data> {
+/// Ok(Box::try_new(State::default())?)
+/// }
+/// }
+///
+/// # impl fs::Type for Example {
+/// # type Context = Self;
+/// # const SUPER_TYPE: fs::Super = fs::Super::Independent;
+/// # const NAME: &'static CStr = c_str!("example");
+/// # const FLAGS: i32 = 0;
+/// #
+/// # fn fill_super<'a>(
+/// # _data: Box<State>,
+/// # sb: fs::NewSuperBlock<'_, Self>,
+/// # ) -> Result<&fs::SuperBlock<Self>> {
+/// # let sb = sb.init(
+/// # (),
+/// # &fs::SuperParams {
+/// # magic: 0x6578616d,
+/// # ..fs::SuperParams::DEFAULT
+/// # },
+/// # )?;
+/// # let sb = sb.init_root()?;
+/// # Ok(sb)
+/// # }
+/// # }
+/// ```
+#[macro_export]
+macro_rules! define_fs_params {
+ ($data_type:ty, $({$($t:tt)*}),+ $(,)?) => {
+ const PARAMS: $crate::fs::param::SpecTable<'static, $data_type> =
+ {
+ use $crate::fs::param::{self, ConstantArray, Spec, SpecArray, Handler};
+ use $crate::c_str;
+ const COUNT: usize = $crate::count_brace_items!($({$($t)*},)*);
+ const SPECS: [Spec; COUNT] = $crate::define_fs_params!(@specs $({$($t)*},)*);
+ const HANDLERS: [&dyn Handler<$data_type>; COUNT] =
+ $crate::define_fs_params!(@handlers $data_type, $({$($t)*},)*);
+ // SAFETY: We defined matching specs and handlers above.
+ const ARRAY: SpecArray<COUNT, $data_type> =
+ unsafe { SpecArray::new(SPECS, HANDLERS) };
+ ARRAY.as_table()
+ };
+ };
+
+ (@handlers $data_type:ty, $({$($t:tt)*},)*) => {
+ [ $($crate::define_fs_params!(@handler $data_type, $($t)*),)* ]
+ };
+ (@handler $data_type:ty, enum, $name:expr, $opts:expr, $closure:expr) => {
+ &param::enum_::handler::<$data_type>($closure)
+ };
+ (@handler $data_type:ty, $type:ident, $name:expr, $closure:expr) => {
+ &param::$type::handler::<$data_type>($closure)
+ };
+
+ (@specs $({$($t:tt)*},)*) => {[ $($crate::define_fs_params!(@spec $($t)*),)* ]};
+ (@spec enum, $name:expr, [$($opts:tt)*], $closure:expr) => {
+ {
+ const COUNT: usize = $crate::count_paren_items!($($opts)*);
+ const OPTIONS: ConstantArray<COUNT> =
+ ConstantArray::new($crate::define_fs_params!(@c_str_first $($opts)*));
+ param::enum_::spec(c_str!($name), OPTIONS.as_table())
+ }
+ };
+ (@spec $type:ident, $name:expr, $closure:expr) => { param::$type::spec(c_str!($name)) };
+
+ (@c_str_first $(($first:expr, $second:expr)),+ $(,)?) => {
+ [$((c_str!($first), $second),)*]
+ };
+}
diff --git a/rust/kernel/gpio.rs b/rust/kernel/gpio.rs
new file mode 100644
index 000000000000..53c7b398d10b
--- /dev/null
+++ b/rust/kernel/gpio.rs
@@ -0,0 +1,505 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Support for gpio device drivers.
+//!
+//! C header: [`include/linux/gpio/driver.h`](../../../../include/linux/gpio/driver.h)
+
+use crate::{
+ bindings, device, error::code::*, error::from_kernel_result, sync::LockClassKey,
+ types::PointerWrapper, Error, Result,
+};
+use core::{
+ cell::UnsafeCell,
+ marker::{PhantomData, PhantomPinned},
+ pin::Pin,
+};
+use macros::vtable;
+
+#[cfg(CONFIG_GPIOLIB_IRQCHIP)]
+pub use irqchip::{ChipWithIrqChip, RegistrationWithIrqChip};
+
+/// The direction of a gpio line.
+pub enum LineDirection {
+ /// Direction is input.
+ In = bindings::GPIO_LINE_DIRECTION_IN as _,
+
+ /// Direction is output.
+ Out = bindings::GPIO_LINE_DIRECTION_OUT as _,
+}
+
+/// A gpio chip.
+#[vtable]
+pub trait Chip {
+ /// Context data associated with the gpio chip.
+ ///
+ /// It determines the type of the context data passed to each of the methods of the trait.
+ type Data: PointerWrapper + Sync + Send;
+
+ /// Returns the direction of the given gpio line.
+ fn get_direction(
+ _data: <Self::Data as PointerWrapper>::Borrowed<'_>,
+ _offset: u32,
+ ) -> Result<LineDirection> {
+ Err(ENOTSUPP)
+ }
+
+ /// Configures the direction as input of the given gpio line.
+ fn direction_input(
+ _data: <Self::Data as PointerWrapper>::Borrowed<'_>,
+ _offset: u32,
+ ) -> Result {
+ Err(EIO)
+ }
+
+ /// Configures the direction as output of the given gpio line.
+ ///
+ /// The value that will be initially output is also specified.
+ fn direction_output(
+ _data: <Self::Data as PointerWrapper>::Borrowed<'_>,
+ _offset: u32,
+ _value: bool,
+ ) -> Result {
+ Err(ENOTSUPP)
+ }
+
+ /// Returns the current value of the given gpio line.
+ fn get(_data: <Self::Data as PointerWrapper>::Borrowed<'_>, _offset: u32) -> Result<bool> {
+ Err(EIO)
+ }
+
+ /// Sets the value of the given gpio line.
+ fn set(_data: <Self::Data as PointerWrapper>::Borrowed<'_>, _offset: u32, _value: bool) {}
+}
+
+/// A registration of a gpio chip.
+///
+/// # Examples
+///
+/// The following example registers an empty gpio chip.
+///
+/// ```
+/// # use kernel::prelude::*;
+/// use kernel::{
+/// device::RawDevice,
+/// gpio::{self, Registration},
+/// };
+///
+/// struct MyGpioChip;
+/// #[vtable]
+/// impl gpio::Chip for MyGpioChip {
+/// type Data = ();
+/// }
+///
+/// fn example(parent: &dyn RawDevice) -> Result<Pin<Box<Registration<MyGpioChip>>>> {
+/// let mut r = Pin::from(Box::try_new(Registration::new())?);
+/// kernel::gpio_chip_register!(r.as_mut(), 32, None, parent, ())?;
+/// Ok(r)
+/// }
+/// ```
+pub struct Registration<T: Chip> {
+ gc: UnsafeCell<bindings::gpio_chip>,
+ parent: Option<device::Device>,
+ _p: PhantomData<T>,
+ _pin: PhantomPinned,
+}
+
+impl<T: Chip> Registration<T> {
+ /// Creates a new [`Registration`] but does not register it yet.
+ ///
+ /// It is allowed to move.
+ pub fn new() -> Self {
+ Self {
+ parent: None,
+ gc: UnsafeCell::new(bindings::gpio_chip::default()),
+ _pin: PhantomPinned,
+ _p: PhantomData,
+ }
+ }
+
+ /// Registers a gpio chip with the rest of the kernel.
+ ///
+ /// Users are encouraged to use the [`gpio_chip_register`] macro because it automatically
+ /// defines the lock classes and calls the registration function.
+ pub fn register(
+ self: Pin<&mut Self>,
+ gpio_count: u16,
+ base: Option<i32>,
+ parent: &dyn device::RawDevice,
+ data: T::Data,
+ lock_keys: [&'static LockClassKey; 2],
+ ) -> Result {
+ if self.parent.is_some() {
+ // Already registered.
+ return Err(EINVAL);
+ }
+
+ // SAFETY: We never move out of `this`.
+ let this = unsafe { self.get_unchecked_mut() };
+ {
+ let gc = this.gc.get_mut();
+
+ // Set up the callbacks.
+ gc.request = Some(bindings::gpiochip_generic_request);
+ gc.free = Some(bindings::gpiochip_generic_free);
+ if T::HAS_GET_DIRECTION {
+ gc.get_direction = Some(get_direction_callback::<T>);
+ }
+ if T::HAS_DIRECTION_INPUT {
+ gc.direction_input = Some(direction_input_callback::<T>);
+ }
+ if T::HAS_DIRECTION_OUTPUT {
+ gc.direction_output = Some(direction_output_callback::<T>);
+ }
+ if T::HAS_GET {
+ gc.get = Some(get_callback::<T>);
+ }
+ if T::HAS_SET {
+ gc.set = Some(set_callback::<T>);
+ }
+
+ // When a base is not explicitly given, use -1 for one to be picked.
+ if let Some(b) = base {
+ gc.base = b;
+ } else {
+ gc.base = -1;
+ }
+
+ gc.ngpio = gpio_count;
+ gc.parent = parent.raw_device();
+ gc.label = parent.name().as_char_ptr();
+
+ // TODO: Define `gc.owner` as well.
+ }
+
+ let data_pointer = <T::Data as PointerWrapper>::into_pointer(data);
+ // SAFETY: `gc` was initilised above, so it is valid.
+ let ret = unsafe {
+ bindings::gpiochip_add_data_with_key(
+ this.gc.get(),
+ data_pointer as _,
+ lock_keys[0].get(),
+ lock_keys[1].get(),
+ )
+ };
+ if ret < 0 {
+ // SAFETY: `data_pointer` was returned by `into_pointer` above.
+ unsafe { T::Data::from_pointer(data_pointer) };
+ return Err(Error::from_kernel_errno(ret));
+ }
+
+ this.parent = Some(device::Device::from_dev(parent));
+ Ok(())
+ }
+}
+
+// SAFETY: `Registration` doesn't offer any methods or access to fields when shared between threads
+// or CPUs, so it is safe to share it.
+unsafe impl<T: Chip> Sync for Registration<T> {}
+
+// SAFETY: Registration with and unregistration from the gpio subsystem can happen from any thread.
+// Additionally, `T::Data` (which is dropped during unregistration) is `Send`, so it is ok to move
+// `Registration` to different threads.
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl<T: Chip> Send for Registration<T> {}
+
+impl<T: Chip> Default for Registration<T> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<T: Chip> Drop for Registration<T> {
+ /// Removes the registration from the kernel if it has completed successfully before.
+ fn drop(&mut self) {
+ if self.parent.is_some() {
+ // Get a pointer to the data stored in chip before destroying it.
+ // SAFETY: `gc` was during registration, which is guaranteed to have succeeded (because
+ // `parent` is `Some(_)`, so it remains valid.
+ let data_pointer = unsafe { bindings::gpiochip_get_data(self.gc.get()) };
+
+ // SAFETY: By the same argument above, `gc` is still valid.
+ unsafe { bindings::gpiochip_remove(self.gc.get()) };
+
+ // Free data as well.
+ // SAFETY: `data_pointer` was returned by `into_pointer` during registration.
+ unsafe { <T::Data as PointerWrapper>::from_pointer(data_pointer) };
+ }
+ }
+}
+
+/// Registers a gpio chip with the rest of the kernel.
+///
+/// It automatically defines the required lock classes.
+#[macro_export]
+macro_rules! gpio_chip_register {
+ ($reg:expr, $count:expr, $base:expr, $parent:expr, $data:expr $(,)?) => {{
+ static CLASS1: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new();
+ static CLASS2: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new();
+ $crate::gpio::Registration::register(
+ $reg,
+ $count,
+ $base,
+ $parent,
+ $data,
+ [&CLASS1, &CLASS2],
+ )
+ }};
+}
+
+unsafe extern "C" fn get_direction_callback<T: Chip>(
+ gc: *mut bindings::gpio_chip,
+ offset: core::ffi::c_uint,
+) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: The value stored as chip data was returned by `into_pointer` during registration.
+ let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc)) };
+ Ok(T::get_direction(data, offset)? as i32)
+ }
+}
+
+unsafe extern "C" fn direction_input_callback<T: Chip>(
+ gc: *mut bindings::gpio_chip,
+ offset: core::ffi::c_uint,
+) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: The value stored as chip data was returned by `into_pointer` during registration.
+ let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc)) };
+ T::direction_input(data, offset)?;
+ Ok(0)
+ }
+}
+
+unsafe extern "C" fn direction_output_callback<T: Chip>(
+ gc: *mut bindings::gpio_chip,
+ offset: core::ffi::c_uint,
+ value: core::ffi::c_int,
+) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: The value stored as chip data was returned by `into_pointer` during registration.
+ let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc)) };
+ T::direction_output(data, offset, value != 0)?;
+ Ok(0)
+ }
+}
+
+unsafe extern "C" fn get_callback<T: Chip>(
+ gc: *mut bindings::gpio_chip,
+ offset: core::ffi::c_uint,
+) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: The value stored as chip data was returned by `into_pointer` during registration.
+ let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc)) };
+ let v = T::get(data, offset)?;
+ Ok(v as _)
+ }
+}
+
+unsafe extern "C" fn set_callback<T: Chip>(
+ gc: *mut bindings::gpio_chip,
+ offset: core::ffi::c_uint,
+ value: core::ffi::c_int,
+) {
+ // SAFETY: The value stored as chip data was returned by `into_pointer` during registration.
+ let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc)) };
+ T::set(data, offset, value != 0);
+}
+
+#[cfg(CONFIG_GPIOLIB_IRQCHIP)]
+mod irqchip {
+ use super::*;
+ use crate::irq;
+
+ /// A gpio chip that includes an irq chip.
+ pub trait ChipWithIrqChip: Chip {
+ /// Implements the irq flow for the gpio chip.
+ fn handle_irq_flow(
+ _data: <Self::Data as PointerWrapper>::Borrowed<'_>,
+ _desc: &irq::Descriptor,
+ _domain: &irq::Domain,
+ );
+ }
+
+ /// A registration of a gpio chip that includes an irq chip.
+ pub struct RegistrationWithIrqChip<T: ChipWithIrqChip> {
+ reg: Registration<T>,
+ irq_chip: UnsafeCell<bindings::irq_chip>,
+ parent_irq: u32,
+ }
+
+ impl<T: ChipWithIrqChip> RegistrationWithIrqChip<T> {
+ /// Creates a new [`RegistrationWithIrqChip`] but does not register it yet.
+ ///
+ /// It is allowed to move.
+ pub fn new() -> Self {
+ Self {
+ reg: Registration::new(),
+ irq_chip: UnsafeCell::new(bindings::irq_chip::default()),
+ parent_irq: 0,
+ }
+ }
+
+ /// Registers a gpio chip and its irq chip with the rest of the kernel.
+ ///
+ /// Users are encouraged to use the [`gpio_irq_chip_register`] macro because it
+ /// automatically defines the lock classes and calls the registration function.
+ pub fn register<U: irq::Chip<Data = T::Data>>(
+ mut self: Pin<&mut Self>,
+ gpio_count: u16,
+ base: Option<i32>,
+ parent: &dyn device::RawDevice,
+ data: T::Data,
+ parent_irq: u32,
+ lock_keys: [&'static LockClassKey; 2],
+ ) -> Result {
+ if self.reg.parent.is_some() {
+ // Already registered.
+ return Err(EINVAL);
+ }
+
+ // SAFETY: We never move out of `this`.
+ let this = unsafe { self.as_mut().get_unchecked_mut() };
+
+ // Initialise the irq_chip.
+ {
+ let irq_chip = this.irq_chip.get_mut();
+ irq_chip.name = parent.name().as_char_ptr();
+
+ // SAFETY: The gpio subsystem configures a pointer to `gpio_chip` as the irq chip
+ // data, so we use `IrqChipAdapter` to convert to the `T::Data`, which is the same
+ // as `irq::Chip::Data` per the bound above.
+ unsafe { irq::init_chip::<IrqChipAdapter<U>>(irq_chip) };
+ }
+
+ // Initialise gc irq state.
+ {
+ let girq = &mut this.reg.gc.get_mut().irq;
+ girq.chip = this.irq_chip.get();
+ // SAFETY: By leaving `parent_handler_data` set to `null`, the gpio subsystem
+ // initialises it to a pointer to the gpio chip, which is what `FlowHandler<T>`
+ // expects.
+ girq.parent_handler = unsafe { irq::new_flow_handler::<FlowHandler<T>>() };
+ girq.num_parents = 1;
+ girq.parents = &mut this.parent_irq;
+ this.parent_irq = parent_irq;
+ girq.default_type = bindings::IRQ_TYPE_NONE;
+ girq.handler = Some(bindings::handle_bad_irq);
+ }
+
+ // SAFETY: `reg` is pinned when `self` is.
+ let pinned = unsafe { self.map_unchecked_mut(|r| &mut r.reg) };
+ pinned.register(gpio_count, base, parent, data, lock_keys)
+ }
+ }
+
+ impl<T: ChipWithIrqChip> Default for RegistrationWithIrqChip<T> {
+ fn default() -> Self {
+ Self::new()
+ }
+ }
+
+ // SAFETY: `RegistrationWithIrqChip` doesn't offer any methods or access to fields when shared
+ // between threads or CPUs, so it is safe to share it.
+ unsafe impl<T: ChipWithIrqChip> Sync for RegistrationWithIrqChip<T> {}
+
+ // SAFETY: Registration with and unregistration from the gpio subsystem (including irq chips for
+ // them) can happen from any thread. Additionally, `T::Data` (which is dropped during
+ // unregistration) is `Send`, so it is ok to move `Registration` to different threads.
+ #[allow(clippy::non_send_fields_in_send_ty)]
+ unsafe impl<T: ChipWithIrqChip> Send for RegistrationWithIrqChip<T> where T::Data: Send {}
+
+ struct FlowHandler<T: ChipWithIrqChip>(PhantomData<T>);
+
+ impl<T: ChipWithIrqChip> irq::FlowHandler for FlowHandler<T> {
+ type Data = *mut bindings::gpio_chip;
+
+ fn handle_irq_flow(gc: *mut bindings::gpio_chip, desc: &irq::Descriptor) {
+ // SAFETY: `FlowHandler` is only used in gpio chips, and it is removed when the gpio is
+ // unregistered, so we know that `gc` must still be valid. We also know that the value
+ // stored as gpio data was returned by `T::Data::into_pointer` again because
+ // `FlowHandler` is a private structure only used in this way.
+ let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc)) };
+
+ // SAFETY: `gc` is valid (see comment above), so we can dereference it.
+ let domain = unsafe { irq::Domain::from_ptr((*gc).irq.domain) };
+
+ T::handle_irq_flow(data, desc, &domain);
+ }
+ }
+
+ /// Adapter from an irq chip with `gpio_chip` pointer as context to one where the gpio chip
+ /// data is passed as context.
+ struct IrqChipAdapter<T: irq::Chip>(PhantomData<T>);
+
+ #[vtable]
+ impl<T: irq::Chip> irq::Chip for IrqChipAdapter<T> {
+ type Data = *mut bindings::gpio_chip;
+
+ const HAS_SET_TYPE: bool = T::HAS_SET_TYPE;
+ const HAS_SET_WAKE: bool = T::HAS_SET_WAKE;
+
+ fn ack(gc: *mut bindings::gpio_chip, irq_data: &irq::IrqData) {
+ // SAFETY: `IrqChipAdapter` is a private struct, only used when the data stored in the
+ // gpio chip is known to come from `T::Data`, and only valid while the gpio chip is
+ // registered, so `gc` is valid.
+ let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc as _)) };
+ T::ack(data, irq_data);
+ }
+
+ fn mask(gc: *mut bindings::gpio_chip, irq_data: &irq::IrqData) {
+ // SAFETY: `IrqChipAdapter` is a private struct, only used when the data stored in the
+ // gpio chip is known to come from `T::Data`, and only valid while the gpio chip is
+ // registered, so `gc` is valid.
+ let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc as _)) };
+ T::mask(data, irq_data);
+ }
+
+ fn unmask(gc: *mut bindings::gpio_chip, irq_data: &irq::IrqData) {
+ // SAFETY: `IrqChipAdapter` is a private struct, only used when the data stored in the
+ // gpio chip is known to come from `T::Data`, and only valid while the gpio chip is
+ // registered, so `gc` is valid.
+ let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc as _)) };
+ T::unmask(data, irq_data);
+ }
+
+ fn set_type(
+ gc: *mut bindings::gpio_chip,
+ irq_data: &mut irq::LockedIrqData,
+ flow_type: u32,
+ ) -> Result<irq::ExtraResult> {
+ // SAFETY: `IrqChipAdapter` is a private struct, only used when the data stored in the
+ // gpio chip is known to come from `T::Data`, and only valid while the gpio chip is
+ // registered, so `gc` is valid.
+ let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc as _)) };
+ T::set_type(data, irq_data, flow_type)
+ }
+
+ fn set_wake(gc: *mut bindings::gpio_chip, irq_data: &irq::IrqData, on: bool) -> Result {
+ // SAFETY: `IrqChipAdapter` is a private struct, only used when the data stored in the
+ // gpio chip is known to come from `T::Data`, and only valid while the gpio chip is
+ // registered, so `gc` is valid.
+ let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc as _)) };
+ T::set_wake(data, irq_data, on)
+ }
+ }
+
+ /// Registers a gpio chip and its irq chip with the rest of the kernel.
+ ///
+ /// It automatically defines the required lock classes.
+ #[macro_export]
+ macro_rules! gpio_irq_chip_register {
+ ($reg:expr, $irqchip:ty, $count:expr, $base:expr, $parent:expr, $data:expr,
+ $parent_irq:expr $(,)?) => {{
+ static CLASS1: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new();
+ static CLASS2: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new();
+ $crate::gpio::RegistrationWithIrqChip::register::<$irqchip>(
+ $reg,
+ $count,
+ $base,
+ $parent,
+ $data,
+ $parent_irq,
+ [&CLASS1, &CLASS2],
+ )
+ }};
+ }
+}
diff --git a/rust/kernel/hwrng.rs b/rust/kernel/hwrng.rs
new file mode 100644
index 000000000000..5918f567c332
--- /dev/null
+++ b/rust/kernel/hwrng.rs
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Hardware Random Number Generator.
+//!
+//! C header: [`include/linux/hw_random.h`](../../../../include/linux/hw_random.h)
+
+use alloc::{boxed::Box, slice::from_raw_parts_mut};
+
+use crate::{
+ bindings, error::code::*, error::from_kernel_result, str::CString, to_result,
+ types::PointerWrapper, Result, ScopeGuard,
+};
+use macros::vtable;
+
+use core::{cell::UnsafeCell, fmt, marker::PhantomData, pin::Pin};
+
+/// This trait is implemented in order to provide callbacks to `struct hwrng`.
+#[vtable]
+pub trait Operations {
+ /// The pointer type that will be used to hold user-defined data type.
+ type Data: PointerWrapper + Send + Sync = ();
+
+ /// Initialization callback, can be left undefined.
+ fn init(_data: <Self::Data as PointerWrapper>::Borrowed<'_>) -> Result {
+ Err(EINVAL)
+ }
+
+ /// Cleanup callback, can be left undefined.
+ fn cleanup(_data: Self::Data) {}
+
+ /// Read data into the provided buffer.
+ /// Drivers can fill up to max bytes of data into the buffer.
+ /// The buffer is aligned for any type and its size is a multiple of 4 and >= 32 bytes.
+ fn read(
+ data: <Self::Data as PointerWrapper>::Borrowed<'_>,
+ buffer: &mut [u8],
+ wait: bool,
+ ) -> Result<u32>;
+}
+
+/// Registration structure for Hardware Random Number Generator driver.
+pub struct Registration<T: Operations> {
+ hwrng: UnsafeCell<bindings::hwrng>,
+ name: Option<CString>,
+ registered: bool,
+ _p: PhantomData<T>,
+}
+
+impl<T: Operations> Registration<T> {
+ /// Creates new instance of registration.
+ ///
+ /// The data must be registered.
+ pub fn new() -> Self {
+ Self {
+ hwrng: UnsafeCell::new(bindings::hwrng::default()),
+ name: None,
+ registered: false,
+ _p: PhantomData,
+ }
+ }
+
+ /// Returns a registered and pinned, heap-allocated representation of the registration.
+ pub fn new_pinned(
+ name: fmt::Arguments<'_>,
+ quality: u16,
+ data: T::Data,
+ ) -> Result<Pin<Box<Self>>> {
+ let mut reg = Pin::from(Box::try_new(Self::new())?);
+ reg.as_mut().register(name, quality, data)?;
+ Ok(reg)
+ }
+
+ /// Registers a hwrng device within the rest of the kernel.
+ ///
+ /// It must be pinned because the memory block that represents
+ /// the registration may be self-referential.
+ pub fn register(
+ self: Pin<&mut Self>,
+ name: fmt::Arguments<'_>,
+ quality: u16,
+ data: T::Data,
+ ) -> Result {
+ // SAFETY: We never move out of `this`.
+ let this = unsafe { self.get_unchecked_mut() };
+
+ if this.registered {
+ return Err(EINVAL);
+ }
+
+ let data_pointer = data.into_pointer();
+
+ // SAFETY: `data_pointer` comes from the call to `data.into_pointer()` above.
+ let guard = ScopeGuard::new(|| unsafe {
+ T::Data::from_pointer(data_pointer);
+ });
+
+ let name = CString::try_from_fmt(name)?;
+
+ // SAFETY: Registration is pinned and contains allocated and set to zero
+ // `bindings::hwrng` structure.
+ Self::init_hwrng(
+ unsafe { &mut *this.hwrng.get() },
+ &name,
+ quality,
+ data_pointer,
+ );
+
+ // SAFETY: `bindings::hwrng` is initialized above which guarantees safety.
+ to_result(unsafe { bindings::hwrng_register(this.hwrng.get()) })?;
+
+ this.registered = true;
+ this.name = Some(name);
+ guard.dismiss();
+ Ok(())
+ }
+
+ fn init_hwrng(
+ hwrng: &mut bindings::hwrng,
+ name: &CString,
+ quality: u16,
+ data: *const core::ffi::c_void,
+ ) {
+ hwrng.name = name.as_char_ptr();
+
+ hwrng.init = if T::HAS_INIT {
+ Some(Self::init_callback)
+ } else {
+ None
+ };
+ hwrng.cleanup = if T::HAS_CLEANUP {
+ Some(Self::cleanup_callback)
+ } else {
+ None
+ };
+ hwrng.data_present = None;
+ hwrng.data_read = None;
+ hwrng.read = Some(Self::read_callback);
+
+ hwrng.priv_ = data as _;
+ hwrng.quality = quality;
+
+ // SAFETY: All fields are properly initialized as
+ // remaining fields `list`, `ref` and `cleanup_done` are already
+ // zeroed by `bindings::hwrng::default()` call.
+ }
+
+ unsafe extern "C" fn init_callback(rng: *mut bindings::hwrng) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: `priv` private data field was initialized during creation of
+ // the `bindings::hwrng` in `Self::init_hwrng` method. This callback is only
+ // called once the driver is registered.
+ let data = unsafe { T::Data::borrow((*rng).priv_ as *const _) };
+ T::init(data)?;
+ Ok(0)
+ }
+ }
+
+ unsafe extern "C" fn cleanup_callback(rng: *mut bindings::hwrng) {
+ // SAFETY: `priv` private data field was initialized during creation of
+ // the `bindings::hwrng` in `Self::init_hwrng` method. This callback is only
+ // called once the driver is registered.
+ let data = unsafe { T::Data::from_pointer((*rng).priv_ as *const _) };
+ T::cleanup(data);
+ }
+
+ unsafe extern "C" fn read_callback(
+ rng: *mut bindings::hwrng,
+ data: *mut core::ffi::c_void,
+ max: usize,
+ wait: bindings::bool_,
+ ) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: `priv` private data field was initialized during creation of
+ // the `bindings::hwrng` in `Self::init_hwrng` method. This callback is only
+ // called once the driver is registered.
+ let drv_data = unsafe { T::Data::borrow((*rng).priv_ as *const _) };
+
+ // SAFETY: Slice is created from `data` and `max` arguments that are C's buffer
+ // along with its size in bytes that are safe for this conversion.
+ let buffer = unsafe { from_raw_parts_mut(data as *mut u8, max) };
+ let ret = T::read(drv_data, buffer, wait)?;
+ Ok(ret as _)
+ }
+ }
+}
+
+impl<T: Operations> Default for Registration<T> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+// SAFETY: `Registration` does not expose any of its state across threads.
+unsafe impl<T: Operations> Sync for Registration<T> {}
+
+// SAFETY: `Registration` is not restricted to a single thread,
+// its `T::Data` is also `Send` so it may be moved to different threads.
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl<T: Operations> Send for Registration<T> {}
+
+impl<T: Operations> Drop for Registration<T> {
+ /// Removes the registration from the kernel if it has completed successfully before.
+ fn drop(&mut self) {
+ // SAFETY: The instance of Registration<T> is unregistered only
+ // after being initialized and registered before.
+ if self.registered {
+ unsafe { bindings::hwrng_unregister(self.hwrng.get()) };
+ }
+ }
+}
diff --git a/rust/kernel/io_buffer.rs b/rust/kernel/io_buffer.rs
new file mode 100644
index 000000000000..ccecc4763aca
--- /dev/null
+++ b/rust/kernel/io_buffer.rs
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Buffers used in IO.
+
+use crate::Result;
+use alloc::vec::Vec;
+use core::mem::{size_of, MaybeUninit};
+
+/// Represents a buffer to be read from during IO.
+pub trait IoBufferReader {
+ /// Returns the number of bytes left to be read from the io buffer.
+ ///
+ /// Note that even reading less than this number of bytes may fail.
+ fn len(&self) -> usize;
+
+ /// Returns `true` if no data is available in the io buffer.
+ fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Reads raw data from the io buffer into a raw kernel buffer.
+ ///
+ /// # Safety
+ ///
+ /// The output buffer must be valid.
+ unsafe fn read_raw(&mut self, out: *mut u8, len: usize) -> Result;
+
+ /// Reads all data remaining in the io buffer.
+ ///
+ /// Returns `EFAULT` if the address does not currently point to mapped, readable memory.
+ fn read_all(&mut self) -> Result<Vec<u8>> {
+ let mut data = Vec::<u8>::new();
+ data.try_resize(self.len(), 0)?;
+
+ // SAFETY: The output buffer is valid as we just allocated it.
+ unsafe { self.read_raw(data.as_mut_ptr(), data.len())? };
+ Ok(data)
+ }
+
+ /// Reads a byte slice from the io buffer.
+ ///
+ /// Returns `EFAULT` if the byte slice is bigger than the remaining size of the user slice or
+ /// if the address does not currently point to mapped, readable memory.
+ fn read_slice(&mut self, data: &mut [u8]) -> Result {
+ // SAFETY: The output buffer is valid as it's coming from a live reference.
+ unsafe { self.read_raw(data.as_mut_ptr(), data.len()) }
+ }
+
+ /// Reads the contents of a plain old data (POD) type from the io buffer.
+ fn read<T: ReadableFromBytes>(&mut self) -> Result<T> {
+ let mut out = MaybeUninit::<T>::uninit();
+ // SAFETY: The buffer is valid as it was just allocated.
+ unsafe { self.read_raw(out.as_mut_ptr() as _, size_of::<T>()) }?;
+ // SAFETY: We just initialised the data.
+ Ok(unsafe { out.assume_init() })
+ }
+}
+
+/// Represents a buffer to be written to during IO.
+pub trait IoBufferWriter {
+ /// Returns the number of bytes left to be written into the io buffer.
+ ///
+ /// Note that even writing less than this number of bytes may fail.
+ fn len(&self) -> usize;
+
+ /// Returns `true` if the io buffer cannot hold any additional data.
+ fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Writes zeroes to the io buffer.
+ ///
+ /// Differently from the other write functions, `clear` will zero as much as it can and update
+ /// the writer internal state to reflect this. It will, however, return an error if it cannot
+ /// clear `len` bytes.
+ ///
+ /// For example, if a caller requests that 100 bytes be cleared but a segfault happens after
+ /// 20 bytes, then EFAULT is returned and the writer is advanced by 20 bytes.
+ fn clear(&mut self, len: usize) -> Result;
+
+ /// Writes a byte slice into the io buffer.
+ ///
+ /// Returns `EFAULT` if the byte slice is bigger than the remaining size of the io buffer or if
+ /// the address does not currently point to mapped, writable memory.
+ fn write_slice(&mut self, data: &[u8]) -> Result {
+ // SAFETY: The input buffer is valid as it's coming from a live reference.
+ unsafe { self.write_raw(data.as_ptr(), data.len()) }
+ }
+
+ /// Writes raw data to the io buffer from a raw kernel buffer.
+ ///
+ /// # Safety
+ ///
+ /// The input buffer must be valid.
+ unsafe fn write_raw(&mut self, data: *const u8, len: usize) -> Result;
+
+ /// Writes the contents of the given data into the io buffer.
+ fn write<T: WritableToBytes>(&mut self, data: &T) -> Result {
+ // SAFETY: The input buffer is valid as it's coming from a live
+ // reference to a type that implements `WritableToBytes`.
+ unsafe { self.write_raw(data as *const T as _, size_of::<T>()) }
+ }
+}
+
+/// Specifies that a type is safely readable from byte slices.
+///
+/// Not all types can be safely read from byte slices; examples from
+/// <https://doc.rust-lang.org/reference/behavior-considered-undefined.html> include `bool`
+/// that must be either `0` or `1`, and `char` that cannot be a surrogate or above `char::MAX`.
+///
+/// # Safety
+///
+/// Implementers must ensure that the type is made up only of types that can be safely read from
+/// arbitrary byte sequences (e.g., `u32`, `u64`, etc.).
+pub unsafe trait ReadableFromBytes {}
+
+// SAFETY: All bit patterns are acceptable values of the types below.
+unsafe impl ReadableFromBytes for u8 {}
+unsafe impl ReadableFromBytes for u16 {}
+unsafe impl ReadableFromBytes for u32 {}
+unsafe impl ReadableFromBytes for u64 {}
+unsafe impl ReadableFromBytes for usize {}
+unsafe impl ReadableFromBytes for i8 {}
+unsafe impl ReadableFromBytes for i16 {}
+unsafe impl ReadableFromBytes for i32 {}
+unsafe impl ReadableFromBytes for i64 {}
+unsafe impl ReadableFromBytes for isize {}
+
+/// Specifies that a type is safely writable to byte slices.
+///
+/// This means that we don't read undefined values (which leads to UB) in preparation for writing
+/// to the byte slice. It also ensures that no potentially sensitive information is leaked into the
+/// byte slices.
+///
+/// # Safety
+///
+/// A type must not include padding bytes and must be fully initialised to safely implement
+/// [`WritableToBytes`] (i.e., it doesn't contain [`MaybeUninit`] fields). A composition of
+/// writable types in a structure is not necessarily writable because it may result in padding
+/// bytes.
+pub unsafe trait WritableToBytes {}
+
+// SAFETY: Initialised instances of the following types have no uninitialised portions.
+unsafe impl WritableToBytes for u8 {}
+unsafe impl WritableToBytes for u16 {}
+unsafe impl WritableToBytes for u32 {}
+unsafe impl WritableToBytes for u64 {}
+unsafe impl WritableToBytes for usize {}
+unsafe impl WritableToBytes for i8 {}
+unsafe impl WritableToBytes for i16 {}
+unsafe impl WritableToBytes for i32 {}
+unsafe impl WritableToBytes for i64 {}
+unsafe impl WritableToBytes for isize {}
diff --git a/rust/kernel/io_mem.rs b/rust/kernel/io_mem.rs
new file mode 100644
index 000000000000..ff6886a9e3b7
--- /dev/null
+++ b/rust/kernel/io_mem.rs
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Memory-mapped IO.
+//!
+//! C header: [`include/asm-generic/io.h`](../../../../include/asm-generic/io.h)
+
+#![allow(dead_code)]
+
+use crate::{bindings, error::code::*, Result};
+use core::convert::TryInto;
+
+/// Represents a memory resource.
+pub struct Resource {
+ offset: bindings::resource_size_t,
+ size: bindings::resource_size_t,
+}
+
+impl Resource {
+ pub(crate) fn new(
+ start: bindings::resource_size_t,
+ end: bindings::resource_size_t,
+ ) -> Option<Self> {
+ if start == 0 {
+ return None;
+ }
+ Some(Self {
+ offset: start,
+ size: end.checked_sub(start)?.checked_add(1)?,
+ })
+ }
+}
+
+/// Represents a memory block of at least `SIZE` bytes.
+///
+/// # Invariants
+///
+/// `ptr` is a non-null and valid address of at least `SIZE` bytes and returned by an `ioremap`
+/// variant. `ptr` is also 8-byte aligned.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// use kernel::io_mem::{IoMem, Resource};
+///
+/// fn test(res: Resource) -> Result {
+/// // Create an io mem block of at least 100 bytes.
+/// // SAFETY: No DMA operations are initiated through `mem`.
+/// let mem = unsafe { IoMem::<100>::try_new(res) }?;
+///
+/// // Read one byte from offset 10.
+/// let v = mem.readb(10);
+///
+/// // Write value to offset 20.
+/// mem.writeb(v, 20);
+///
+/// Ok(())
+/// }
+/// ```
+pub struct IoMem<const SIZE: usize> {
+ ptr: usize,
+}
+
+macro_rules! define_read {
+ ($(#[$attr:meta])* $name:ident, $try_name:ident, $type_name:ty) => {
+ /// Reads IO data from the given offset known, at compile time.
+ ///
+ /// If the offset is not known at compile time, the build will fail.
+ $(#[$attr])*
+ #[inline]
+ pub fn $name(&self, offset: usize) -> $type_name {
+ Self::check_offset::<$type_name>(offset);
+ let ptr = self.ptr.wrapping_add(offset);
+ // SAFETY: The type invariants guarantee that `ptr` is a valid pointer. The check above
+ // guarantees that the code won't build if `offset` makes the read go out of bounds
+ // (including the type size).
+ unsafe { bindings::$name(ptr as _) }
+ }
+
+ /// Reads IO data from the given offset.
+ ///
+ /// It fails if/when the offset (plus the type size) is out of bounds.
+ $(#[$attr])*
+ pub fn $try_name(&self, offset: usize) -> Result<$type_name> {
+ if !Self::offset_ok::<$type_name>(offset) {
+ return Err(EINVAL);
+ }
+ let ptr = self.ptr.wrapping_add(offset);
+ // SAFETY: The type invariants guarantee that `ptr` is a valid pointer. The check above
+ // returns an error if `offset` would make the read go out of bounds (including the
+ // type size).
+ Ok(unsafe { bindings::$name(ptr as _) })
+ }
+ };
+}
+
+macro_rules! define_write {
+ ($(#[$attr:meta])* $name:ident, $try_name:ident, $type_name:ty) => {
+ /// Writes IO data to the given offset, known at compile time.
+ ///
+ /// If the offset is not known at compile time, the build will fail.
+ $(#[$attr])*
+ #[inline]
+ pub fn $name(&self, value: $type_name, offset: usize) {
+ Self::check_offset::<$type_name>(offset);
+ let ptr = self.ptr.wrapping_add(offset);
+ // SAFETY: The type invariants guarantee that `ptr` is a valid pointer. The check above
+ // guarantees that the code won't link if `offset` makes the write go out of bounds
+ // (including the type size).
+ unsafe { bindings::$name(value, ptr as _) }
+ }
+
+ /// Writes IO data to the given offset.
+ ///
+ /// It fails if/when the offset (plus the type size) is out of bounds.
+ $(#[$attr])*
+ pub fn $try_name(&self, value: $type_name, offset: usize) -> Result {
+ if !Self::offset_ok::<$type_name>(offset) {
+ return Err(EINVAL);
+ }
+ let ptr = self.ptr.wrapping_add(offset);
+ // SAFETY: The type invariants guarantee that `ptr` is a valid pointer. The check above
+ // returns an error if `offset` would make the write go out of bounds (including the
+ // type size).
+ unsafe { bindings::$name(value, ptr as _) };
+ Ok(())
+ }
+ };
+}
+
+impl<const SIZE: usize> IoMem<SIZE> {
+ /// Tries to create a new instance of a memory block.
+ ///
+ /// The resource described by `res` is mapped into the CPU's address space so that it can be
+ /// accessed directly. It is also consumed by this function so that it can't be mapped again
+ /// to a different address.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that either (a) the resulting interface cannot be used to initiate DMA
+ /// operations, or (b) that DMA operations initiated via the returned interface use DMA handles
+ /// allocated through the `dma` module.
+ pub unsafe fn try_new(res: Resource) -> Result<Self> {
+ // Check that the resource has at least `SIZE` bytes in it.
+ if res.size < SIZE.try_into()? {
+ return Err(EINVAL);
+ }
+
+ // To be able to check pointers at compile time based only on offsets, we need to guarantee
+ // that the base pointer is minimally aligned. So we conservatively expect at least 8 bytes.
+ if res.offset % 8 != 0 {
+ crate::pr_err!("Physical address is not 64-bit aligned: {:x}", res.offset);
+ return Err(EDOM);
+ }
+
+ // Try to map the resource.
+ // SAFETY: Just mapping the memory range.
+ let addr = unsafe { bindings::ioremap(res.offset, res.size as _) };
+ if addr.is_null() {
+ Err(ENOMEM)
+ } else {
+ // INVARIANT: `addr` is non-null and was returned by `ioremap`, so it is valid. It is
+ // also 8-byte aligned because we checked it above.
+ Ok(Self { ptr: addr as usize })
+ }
+ }
+
+ #[inline]
+ const fn offset_ok<T>(offset: usize) -> bool {
+ let type_size = core::mem::size_of::<T>();
+ if let Some(end) = offset.checked_add(type_size) {
+ end <= SIZE && offset % type_size == 0
+ } else {
+ false
+ }
+ }
+
+ fn offset_ok_of_val<T: ?Sized>(offset: usize, value: &T) -> bool {
+ let value_size = core::mem::size_of_val(value);
+ let value_alignment = core::mem::align_of_val(value);
+ if let Some(end) = offset.checked_add(value_size) {
+ end <= SIZE && offset % value_alignment == 0
+ } else {
+ false
+ }
+ }
+
+ #[inline]
+ const fn check_offset<T>(offset: usize) {
+ crate::build_assert!(Self::offset_ok::<T>(offset), "IoMem offset overflow");
+ }
+
+ /// Copy memory block from an i/o memory by filling the specified buffer with it.
+ ///
+ /// # Examples
+ /// ```
+ /// use kernel::io_mem::{self, IoMem, Resource};
+ ///
+ /// fn test(res: Resource) -> Result {
+ /// // Create an i/o memory block of at least 100 bytes.
+ /// let mem = unsafe { IoMem::<100>::try_new(res) }?;
+ ///
+ /// let mut buffer: [u8; 32] = [0; 32];
+ ///
+ /// // Memcpy 16 bytes from an offset 10 of i/o memory block into the buffer.
+ /// mem.try_memcpy_fromio(&mut buffer[..16], 10)?;
+ ///
+ /// Ok(())
+ /// }
+ /// ```
+ pub fn try_memcpy_fromio(&self, buffer: &mut [u8], offset: usize) -> Result {
+ if !Self::offset_ok_of_val(offset, buffer) {
+ return Err(EINVAL);
+ }
+
+ let ptr = self.ptr.wrapping_add(offset);
+
+ // SAFETY:
+ // - The type invariants guarantee that `ptr` is a valid pointer.
+ // - The bounds of `buffer` are checked with a call to `offset_ok_of_val()`.
+ unsafe {
+ bindings::memcpy_fromio(
+ buffer.as_mut_ptr() as *mut _,
+ ptr as *const _,
+ buffer.len() as _,
+ )
+ };
+ Ok(())
+ }
+
+ define_read!(readb, try_readb, u8);
+ define_read!(readw, try_readw, u16);
+ define_read!(readl, try_readl, u32);
+ define_read!(
+ #[cfg(CONFIG_64BIT)]
+ readq,
+ try_readq,
+ u64
+ );
+
+ define_read!(readb_relaxed, try_readb_relaxed, u8);
+ define_read!(readw_relaxed, try_readw_relaxed, u16);
+ define_read!(readl_relaxed, try_readl_relaxed, u32);
+ define_read!(
+ #[cfg(CONFIG_64BIT)]
+ readq_relaxed,
+ try_readq_relaxed,
+ u64
+ );
+
+ define_write!(writeb, try_writeb, u8);
+ define_write!(writew, try_writew, u16);
+ define_write!(writel, try_writel, u32);
+ define_write!(
+ #[cfg(CONFIG_64BIT)]
+ writeq,
+ try_writeq,
+ u64
+ );
+
+ define_write!(writeb_relaxed, try_writeb_relaxed, u8);
+ define_write!(writew_relaxed, try_writew_relaxed, u16);
+ define_write!(writel_relaxed, try_writel_relaxed, u32);
+ define_write!(
+ #[cfg(CONFIG_64BIT)]
+ writeq_relaxed,
+ try_writeq_relaxed,
+ u64
+ );
+}
+
+impl<const SIZE: usize> Drop for IoMem<SIZE> {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariant, `self.ptr` is a value returned by a previous successful
+ // call to `ioremap`.
+ unsafe { bindings::iounmap(self.ptr as _) };
+ }
+}
diff --git a/rust/kernel/iov_iter.rs b/rust/kernel/iov_iter.rs
new file mode 100644
index 000000000000..b9b8dc882bd0
--- /dev/null
+++ b/rust/kernel/iov_iter.rs
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! IO vector iterators.
+//!
+//! C header: [`include/linux/uio.h`](../../../../include/linux/uio.h)
+
+use crate::{
+ bindings,
+ error::code::*,
+ io_buffer::{IoBufferReader, IoBufferWriter},
+ Result,
+};
+
+/// Wraps the kernel's `struct iov_iter`.
+///
+/// # Invariants
+///
+/// The pointer `IovIter::ptr` is non-null and valid.
+pub struct IovIter {
+ ptr: *mut bindings::iov_iter,
+}
+
+impl IovIter {
+ fn common_len(&self) -> usize {
+ // SAFETY: `IovIter::ptr` is guaranteed to be valid by the type invariants.
+ unsafe { (*self.ptr).count }
+ }
+
+ /// Constructs a new [`struct iov_iter`] wrapper.
+ ///
+ /// # Safety
+ ///
+ /// The pointer `ptr` must be non-null and valid for the lifetime of the object.
+ pub(crate) unsafe fn from_ptr(ptr: *mut bindings::iov_iter) -> Self {
+ // INVARIANTS: the safety contract ensures the type invariant will hold.
+ Self { ptr }
+ }
+}
+
+impl IoBufferWriter for IovIter {
+ fn len(&self) -> usize {
+ self.common_len()
+ }
+
+ fn clear(&mut self, mut len: usize) -> Result {
+ while len > 0 {
+ // SAFETY: `IovIter::ptr` is guaranteed to be valid by the type invariants.
+ let written = unsafe { bindings::iov_iter_zero(len, self.ptr) };
+ if written == 0 {
+ return Err(EFAULT);
+ }
+
+ len -= written;
+ }
+ Ok(())
+ }
+
+ unsafe fn write_raw(&mut self, data: *const u8, len: usize) -> Result {
+ let res = unsafe { bindings::copy_to_iter(data as _, len, self.ptr) };
+ if res != len {
+ Err(EFAULT)
+ } else {
+ Ok(())
+ }
+ }
+}
+
+impl IoBufferReader for IovIter {
+ fn len(&self) -> usize {
+ self.common_len()
+ }
+
+ unsafe fn read_raw(&mut self, out: *mut u8, len: usize) -> Result {
+ let res = unsafe { bindings::copy_from_iter(out as _, len, self.ptr) };
+ if res != len {
+ Err(EFAULT)
+ } else {
+ Ok(())
+ }
+ }
+}
diff --git a/rust/kernel/irq.rs b/rust/kernel/irq.rs
new file mode 100644
index 000000000000..f2fa270dd728
--- /dev/null
+++ b/rust/kernel/irq.rs
@@ -0,0 +1,681 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Interrupts and interrupt chips.
+//!
+//! See <https://www.kernel.org/doc/Documentation/core-api/genericirq.rst>.
+//!
+//! C headers: [`include/linux/irq.h`](../../../../include/linux/irq.h) and
+//! [`include/linux/interrupt.h`](../../../../include/linux/interrupt.h).
+
+#![allow(dead_code)]
+
+use crate::{
+ bindings,
+ error::{from_kernel_result, to_result},
+ str::CString,
+ types::PointerWrapper,
+ Error, Result, ScopeGuard,
+};
+use core::{fmt, marker::PhantomData, ops::Deref};
+use macros::vtable;
+
+/// The type of irq hardware numbers.
+pub type HwNumber = bindings::irq_hw_number_t;
+
+/// Wraps the kernel's `struct irq_data`.
+///
+/// # Invariants
+///
+/// The pointer `IrqData::ptr` is non-null and valid.
+pub struct IrqData {
+ ptr: *mut bindings::irq_data,
+}
+
+impl IrqData {
+ /// Creates a new `IrqData` instance from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `ptr` is non-null and valid when the function is called, and that
+ /// it remains valid for the lifetime of the return [`IrqData`] instance.
+ unsafe fn from_ptr(ptr: *mut bindings::irq_data) -> Self {
+ // INVARIANTS: By the safety requirements, the instance we're creating satisfies the type
+ // invariants.
+ Self { ptr }
+ }
+
+ /// Returns the hardware irq number.
+ pub fn hwirq(&self) -> HwNumber {
+ // SAFETY: By the type invariants, it's ok to dereference `ptr`.
+ unsafe { (*self.ptr).hwirq }
+ }
+}
+
+/// Wraps the kernel's `struct irq_data` when it is locked.
+///
+/// Being locked allows additional operations to be performed on the data.
+pub struct LockedIrqData(IrqData);
+
+impl LockedIrqData {
+ /// Sets the high-level irq flow handler to the builtin one for level-triggered irqs.
+ pub fn set_level_handler(&mut self) {
+ // SAFETY: By the type invariants of `self.0`, we know `self.0.ptr` is valid.
+ unsafe { bindings::irq_set_handler_locked(self.0.ptr, Some(bindings::handle_level_irq)) };
+ }
+
+ /// Sets the high-level irq flow handler to the builtin one for edge-triggered irqs.
+ pub fn set_edge_handler(&mut self) {
+ // SAFETY: By the type invariants of `self.0`, we know `self.0.ptr` is valid.
+ unsafe { bindings::irq_set_handler_locked(self.0.ptr, Some(bindings::handle_edge_irq)) };
+ }
+
+ /// Sets the high-level irq flow handler to the builtin one for bad irqs.
+ pub fn set_bad_handler(&mut self) {
+ // SAFETY: By the type invariants of `self.0`, we know `self.0.ptr` is valid.
+ unsafe { bindings::irq_set_handler_locked(self.0.ptr, Some(bindings::handle_bad_irq)) };
+ }
+}
+
+impl Deref for LockedIrqData {
+ type Target = IrqData;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+/// Extra information returned by some of the [`Chip`] methods on success.
+pub enum ExtraResult {
+ /// Indicates that the caller (irq core) will update the descriptor state.
+ None = bindings::IRQ_SET_MASK_OK as _,
+
+ /// Indicates that the callee (irq chip implementation) already updated the descriptor state.
+ NoCopy = bindings::IRQ_SET_MASK_OK_NOCOPY as _,
+
+ /// Same as [`ExtraResult::None`] in terms of updating descriptor state. It is used in stacked
+ /// irq chips to indicate that descendant chips should be skipped.
+ Done = bindings::IRQ_SET_MASK_OK_DONE as _,
+}
+
+/// An irq chip.
+///
+/// It is a trait for the functions defined in [`struct irq_chip`].
+///
+/// [`struct irq_chip`]: ../../../include/linux/irq.h
+#[vtable]
+pub trait Chip: Sized {
+ /// The type of the context data stored in the irq chip and made available on each callback.
+ type Data: PointerWrapper;
+
+ /// Called at the start of a new interrupt.
+ fn ack(data: <Self::Data as PointerWrapper>::Borrowed<'_>, irq_data: &IrqData);
+
+ /// Masks an interrupt source.
+ fn mask(data: <Self::Data as PointerWrapper>::Borrowed<'_>, irq_data: &IrqData);
+
+ /// Unmasks an interrupt source.
+ fn unmask(_data: <Self::Data as PointerWrapper>::Borrowed<'_>, irq_data: &IrqData);
+
+ /// Sets the flow type of an interrupt.
+ ///
+ /// The flow type is a combination of the constants in [`Type`].
+ fn set_type(
+ _data: <Self::Data as PointerWrapper>::Borrowed<'_>,
+ _irq_data: &mut LockedIrqData,
+ _flow_type: u32,
+ ) -> Result<ExtraResult> {
+ Ok(ExtraResult::None)
+ }
+
+ /// Enables or disables power-management wake-on of an interrupt.
+ fn set_wake(
+ _data: <Self::Data as PointerWrapper>::Borrowed<'_>,
+ _irq_data: &IrqData,
+ _on: bool,
+ ) -> Result {
+ Ok(())
+ }
+}
+
+/// Initialises `chip` with the callbacks defined in `T`.
+///
+/// # Safety
+///
+/// The caller must ensure that the value stored in the irq chip data is the result of calling
+/// [`PointerWrapper::into_pointer] for the [`T::Data`] type.
+pub(crate) unsafe fn init_chip<T: Chip>(chip: &mut bindings::irq_chip) {
+ chip.irq_ack = Some(irq_ack_callback::<T>);
+ chip.irq_mask = Some(irq_mask_callback::<T>);
+ chip.irq_unmask = Some(irq_unmask_callback::<T>);
+
+ if T::HAS_SET_TYPE {
+ chip.irq_set_type = Some(irq_set_type_callback::<T>);
+ }
+
+ if T::HAS_SET_WAKE {
+ chip.irq_set_wake = Some(irq_set_wake_callback::<T>);
+ }
+}
+
+/// Enables or disables power-management wake-on for the given irq number.
+pub fn set_wake(irq: u32, on: bool) -> Result {
+ // SAFETY: Just an FFI call, there are no extra requirements for safety.
+ let ret = unsafe { bindings::irq_set_irq_wake(irq, on as _) };
+ if ret < 0 {
+ Err(Error::from_kernel_errno(ret))
+ } else {
+ Ok(())
+ }
+}
+
+unsafe extern "C" fn irq_ack_callback<T: Chip>(irq_data: *mut bindings::irq_data) {
+ // SAFETY: The safety requirements of `init_chip`, which is the only place that uses this
+ // callback, ensure that the value stored as irq chip data comes from a previous call to
+ // `PointerWrapper::into_pointer`.
+ let data = unsafe { T::Data::borrow(bindings::irq_data_get_irq_chip_data(irq_data)) };
+
+ // SAFETY: The value returned by `IrqData` is only valid until the end of this function, and
+ // `irq_data` is guaranteed to be valid until then (by the contract with C code).
+ T::ack(data, unsafe { &IrqData::from_ptr(irq_data) })
+}
+
+unsafe extern "C" fn irq_mask_callback<T: Chip>(irq_data: *mut bindings::irq_data) {
+ // SAFETY: The safety requirements of `init_chip`, which is the only place that uses this
+ // callback, ensure that the value stored as irq chip data comes from a previous call to
+ // `PointerWrapper::into_pointer`.
+ let data = unsafe { T::Data::borrow(bindings::irq_data_get_irq_chip_data(irq_data)) };
+
+ // SAFETY: The value returned by `IrqData` is only valid until the end of this function, and
+ // `irq_data` is guaranteed to be valid until then (by the contract with C code).
+ T::mask(data, unsafe { &IrqData::from_ptr(irq_data) })
+}
+
+unsafe extern "C" fn irq_unmask_callback<T: Chip>(irq_data: *mut bindings::irq_data) {
+ // SAFETY: The safety requirements of `init_chip`, which is the only place that uses this
+ // callback, ensure that the value stored as irq chip data comes from a previous call to
+ // `PointerWrapper::into_pointer`.
+ let data = unsafe { T::Data::borrow(bindings::irq_data_get_irq_chip_data(irq_data)) };
+
+ // SAFETY: The value returned by `IrqData` is only valid until the end of this function, and
+ // `irq_data` is guaranteed to be valid until then (by the contract with C code).
+ T::unmask(data, unsafe { &IrqData::from_ptr(irq_data) })
+}
+
+unsafe extern "C" fn irq_set_type_callback<T: Chip>(
+ irq_data: *mut bindings::irq_data,
+ flow_type: core::ffi::c_uint,
+) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: The safety requirements of `init_chip`, which is the only place that uses this
+ // callback, ensure that the value stored as irq chip data comes from a previous call to
+ // `PointerWrapper::into_pointer`.
+ let data = unsafe { T::Data::borrow(bindings::irq_data_get_irq_chip_data(irq_data)) };
+
+ // SAFETY: The value returned by `IrqData` is only valid until the end of this function, and
+ // `irq_data` is guaranteed to be valid until then (by the contract with C code).
+ let ret = T::set_type(
+ data,
+ &mut LockedIrqData(unsafe { IrqData::from_ptr(irq_data) }),
+ flow_type,
+ )?;
+ Ok(ret as _)
+ }
+}
+
+unsafe extern "C" fn irq_set_wake_callback<T: Chip>(
+ irq_data: *mut bindings::irq_data,
+ on: core::ffi::c_uint,
+) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: The safety requirements of `init_chip`, which is the only place that uses this
+ // callback, ensure that the value stored as irq chip data comes from a previous call to
+ // `PointerWrapper::into_pointer`.
+ let data = unsafe { T::Data::borrow(bindings::irq_data_get_irq_chip_data(irq_data)) };
+
+ // SAFETY: The value returned by `IrqData` is only valid until the end of this function, and
+ // `irq_data` is guaranteed to be valid until then (by the contract with C code).
+ T::set_wake(data, unsafe { &IrqData::from_ptr(irq_data) }, on != 0)?;
+ Ok(0)
+ }
+}
+
+/// Contains constants that describes how an interrupt can be triggered.
+///
+/// It is tagged with `non_exhaustive` to prevent users from instantiating it.
+#[non_exhaustive]
+pub struct Type;
+
+impl Type {
+ /// The interrupt cannot be triggered.
+ pub const NONE: u32 = bindings::IRQ_TYPE_NONE;
+
+ /// The interrupt is triggered when the signal goes from low to high.
+ pub const EDGE_RISING: u32 = bindings::IRQ_TYPE_EDGE_RISING;
+
+ /// The interrupt is triggered when the signal goes from high to low.
+ pub const EDGE_FALLING: u32 = bindings::IRQ_TYPE_EDGE_FALLING;
+
+ /// The interrupt is triggered when the signal goes from low to high and when it goes to high
+ /// to low.
+ pub const EDGE_BOTH: u32 = bindings::IRQ_TYPE_EDGE_BOTH;
+
+ /// The interrupt is triggered while the signal is held high.
+ pub const LEVEL_HIGH: u32 = bindings::IRQ_TYPE_LEVEL_HIGH;
+
+ /// The interrupt is triggered while the signal is held low.
+ pub const LEVEL_LOW: u32 = bindings::IRQ_TYPE_LEVEL_LOW;
+}
+
+/// Wraps the kernel's `struct irq_desc`.
+///
+/// # Invariants
+///
+/// The pointer `Descriptor::ptr` is non-null and valid.
+pub struct Descriptor {
+ pub(crate) ptr: *mut bindings::irq_desc,
+}
+
+impl Descriptor {
+ /// Constructs a new `struct irq_desc` wrapper.
+ ///
+ /// # Safety
+ ///
+ /// The pointer `ptr` must be non-null and valid for the lifetime of the returned object.
+ unsafe fn from_ptr(ptr: *mut bindings::irq_desc) -> Self {
+ // INVARIANT: The safety requirements ensure the invariant.
+ Self { ptr }
+ }
+
+ /// Calls `chained_irq_enter` and returns a guard that calls `chained_irq_exit` once dropped.
+ ///
+ /// It is meant to be used by chained irq handlers to dispatch irqs to the next handlers.
+ pub fn enter_chained(&self) -> ChainedGuard<'_> {
+ // SAFETY: By the type invariants, `ptr` is always non-null and valid.
+ let irq_chip = unsafe { bindings::irq_desc_get_chip(self.ptr) };
+
+ // SAFETY: By the type invariants, `ptr` is always non-null and valid. `irq_chip` was just
+ // returned from `ptr`, so it is still valid too.
+ unsafe { bindings::chained_irq_enter(irq_chip, self.ptr) };
+ ChainedGuard {
+ desc: self,
+ irq_chip,
+ }
+ }
+}
+
+struct InternalRegistration<T: PointerWrapper> {
+ irq: u32,
+ data: *mut core::ffi::c_void,
+ name: CString,
+ _p: PhantomData<T>,
+}
+
+impl<T: PointerWrapper> InternalRegistration<T> {
+ /// Registers a new irq handler.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `handler` and `thread_fn` are compatible with the registration,
+ /// that is, that they only use their second argument while the call is happening and that they
+ /// only call [`T::borrow`] on it (e.g., they shouldn't call [`T::from_pointer`] and consume
+ /// it).
+ unsafe fn try_new(
+ irq: core::ffi::c_uint,
+ handler: bindings::irq_handler_t,
+ thread_fn: bindings::irq_handler_t,
+ flags: usize,
+ data: T,
+ name: fmt::Arguments<'_>,
+ ) -> Result<Self> {
+ let ptr = data.into_pointer() as *mut _;
+ let name = CString::try_from_fmt(name)?;
+ let guard = ScopeGuard::new(|| {
+ // SAFETY: `ptr` came from a previous call to `into_pointer`.
+ unsafe { T::from_pointer(ptr) };
+ });
+ // SAFETY: `name` and `ptr` remain valid as long as the registration is alive.
+ to_result(unsafe {
+ bindings::request_threaded_irq(
+ irq,
+ handler,
+ thread_fn,
+ flags as _,
+ name.as_char_ptr(),
+ ptr,
+ )
+ })?;
+ guard.dismiss();
+ Ok(Self {
+ irq,
+ name,
+ data: ptr,
+ _p: PhantomData,
+ })
+ }
+}
+
+impl<T: PointerWrapper> Drop for InternalRegistration<T> {
+ fn drop(&mut self) {
+ // Unregister irq handler.
+ //
+ // SAFETY: When `try_new` succeeds, the irq was successfully requested, so it is ok to free
+ // it here.
+ unsafe { bindings::free_irq(self.irq, self.data) };
+
+ // Free context data.
+ //
+ // SAFETY: This matches the call to `into_pointer` from `try_new` in the success case.
+ unsafe { T::from_pointer(self.data) };
+ }
+}
+
+/// An irq handler.
+pub trait Handler {
+ /// The context data associated with and made available to the handler.
+ type Data: PointerWrapper;
+
+ /// Called from interrupt context when the irq happens.
+ fn handle_irq(data: <Self::Data as PointerWrapper>::Borrowed<'_>) -> Return;
+}
+
+/// The registration of an interrupt handler.
+///
+/// # Examples
+///
+/// The following is an example of a regular handler with a boxed `u32` as data.
+///
+/// ```
+/// # use kernel::prelude::*;
+/// use kernel::irq;
+///
+/// struct Example;
+///
+/// impl irq::Handler for Example {
+/// type Data = Box<u32>;
+///
+/// fn handle_irq(_data: &u32) -> irq::Return {
+/// irq::Return::None
+/// }
+/// }
+///
+/// fn request_irq(irq: u32, data: Box<u32>) -> Result<irq::Registration<Example>> {
+/// irq::Registration::try_new(irq, data, irq::flags::SHARED, fmt!("example_{irq}"))
+/// }
+/// ```
+pub struct Registration<H: Handler>(InternalRegistration<H::Data>);
+
+impl<H: Handler> Registration<H> {
+ /// Registers a new irq handler.
+ ///
+ /// The valid values of `flags` come from the [`flags`] module.
+ pub fn try_new(
+ irq: u32,
+ data: H::Data,
+ flags: usize,
+ name: fmt::Arguments<'_>,
+ ) -> Result<Self> {
+ // SAFETY: `handler` only calls `H::Data::borrow` on `raw_data`.
+ Ok(Self(unsafe {
+ InternalRegistration::try_new(irq, Some(Self::handler), None, flags, data, name)?
+ }))
+ }
+
+ unsafe extern "C" fn handler(
+ _irq: core::ffi::c_int,
+ raw_data: *mut core::ffi::c_void,
+ ) -> bindings::irqreturn_t {
+ // SAFETY: On registration, `into_pointer` was called, so it is safe to borrow from it here
+ // because `from_pointer` is called only after the irq is unregistered.
+ let data = unsafe { H::Data::borrow(raw_data) };
+ H::handle_irq(data) as _
+ }
+}
+
+/// A threaded irq handler.
+pub trait ThreadedHandler {
+ /// The context data associated with and made available to the handlers.
+ type Data: PointerWrapper;
+
+ /// Called from interrupt context when the irq first happens.
+ fn handle_primary_irq(_data: <Self::Data as PointerWrapper>::Borrowed<'_>) -> Return {
+ Return::WakeThread
+ }
+
+ /// Called from the handler thread.
+ fn handle_threaded_irq(data: <Self::Data as PointerWrapper>::Borrowed<'_>) -> Return;
+}
+
+/// The registration of a threaded interrupt handler.
+///
+/// # Examples
+///
+/// The following is an example of a threaded handler with a ref-counted u32 as data:
+///
+/// ```
+/// # use kernel::prelude::*;
+/// use kernel::{
+/// irq,
+/// sync::{Ref, RefBorrow},
+/// };
+///
+/// struct Example;
+///
+/// impl irq::ThreadedHandler for Example {
+/// type Data = Ref<u32>;
+///
+/// fn handle_threaded_irq(_data: RefBorrow<'_, u32>) -> irq::Return {
+/// irq::Return::None
+/// }
+/// }
+///
+/// fn request_irq(irq: u32, data: Ref<u32>) -> Result<irq::ThreadedRegistration<Example>> {
+/// irq::ThreadedRegistration::try_new(irq, data, irq::flags::SHARED, fmt!("example_{irq}"))
+/// }
+/// ```
+pub struct ThreadedRegistration<H: ThreadedHandler>(InternalRegistration<H::Data>);
+
+impl<H: ThreadedHandler> ThreadedRegistration<H> {
+ /// Registers a new threaded irq handler.
+ ///
+ /// The valid values of `flags` come from the [`flags`] module.
+ pub fn try_new(
+ irq: u32,
+ data: H::Data,
+ flags: usize,
+ name: fmt::Arguments<'_>,
+ ) -> Result<Self> {
+ // SAFETY: both `primary_handler` and `threaded_handler` only call `H::Data::borrow` on
+ // `raw_data`.
+ Ok(Self(unsafe {
+ InternalRegistration::try_new(
+ irq,
+ Some(Self::primary_handler),
+ Some(Self::threaded_handler),
+ flags,
+ data,
+ name,
+ )?
+ }))
+ }
+
+ unsafe extern "C" fn primary_handler(
+ _irq: core::ffi::c_int,
+ raw_data: *mut core::ffi::c_void,
+ ) -> bindings::irqreturn_t {
+ // SAFETY: On registration, `into_pointer` was called, so it is safe to borrow from it here
+ // because `from_pointer` is called only after the irq is unregistered.
+ let data = unsafe { H::Data::borrow(raw_data) };
+ H::handle_primary_irq(data) as _
+ }
+
+ unsafe extern "C" fn threaded_handler(
+ _irq: core::ffi::c_int,
+ raw_data: *mut core::ffi::c_void,
+ ) -> bindings::irqreturn_t {
+ // SAFETY: On registration, `into_pointer` was called, so it is safe to borrow from it here
+ // because `from_pointer` is called only after the irq is unregistered.
+ let data = unsafe { H::Data::borrow(raw_data) };
+ H::handle_threaded_irq(data) as _
+ }
+}
+
+/// The return value from interrupt handlers.
+pub enum Return {
+ /// The interrupt was not from this device or was not handled.
+ None = bindings::irqreturn_IRQ_NONE as _,
+
+ /// The interrupt was handled by this device.
+ Handled = bindings::irqreturn_IRQ_HANDLED as _,
+
+ /// The handler wants the handler thread to wake up.
+ WakeThread = bindings::irqreturn_IRQ_WAKE_THREAD as _,
+}
+
+/// Container for interrupt flags.
+pub mod flags {
+ use crate::bindings;
+
+ /// Use the interrupt line as already configured.
+ pub const TRIGGER_NONE: usize = bindings::IRQF_TRIGGER_NONE as _;
+
+ /// The interrupt is triggered when the signal goes from low to high.
+ pub const TRIGGER_RISING: usize = bindings::IRQF_TRIGGER_RISING as _;
+
+ /// The interrupt is triggered when the signal goes from high to low.
+ pub const TRIGGER_FALLING: usize = bindings::IRQF_TRIGGER_FALLING as _;
+
+ /// The interrupt is triggered while the signal is held high.
+ pub const TRIGGER_HIGH: usize = bindings::IRQF_TRIGGER_HIGH as _;
+
+ /// The interrupt is triggered while the signal is held low.
+ pub const TRIGGER_LOW: usize = bindings::IRQF_TRIGGER_LOW as _;
+
+ /// Allow sharing the irq among several devices.
+ pub const SHARED: usize = bindings::IRQF_SHARED as _;
+
+ /// Set by callers when they expect sharing mismatches to occur.
+ pub const PROBE_SHARED: usize = bindings::IRQF_PROBE_SHARED as _;
+
+ /// Flag to mark this interrupt as timer interrupt.
+ pub const TIMER: usize = bindings::IRQF_TIMER as _;
+
+ /// Interrupt is per cpu.
+ pub const PERCPU: usize = bindings::IRQF_PERCPU as _;
+
+ /// Flag to exclude this interrupt from irq balancing.
+ pub const NOBALANCING: usize = bindings::IRQF_NOBALANCING as _;
+
+ /// Interrupt is used for polling (only the interrupt that is registered first in a shared
+ /// interrupt is considered for performance reasons).
+ pub const IRQPOLL: usize = bindings::IRQF_IRQPOLL as _;
+
+ /// Interrupt is not reenabled after the hardirq handler finished. Used by threaded interrupts
+ /// which need to keep the irq line disabled until the threaded handler has been run.
+ pub const ONESHOT: usize = bindings::IRQF_ONESHOT as _;
+
+ /// Do not disable this IRQ during suspend. Does not guarantee that this interrupt will wake
+ /// the system from a suspended state.
+ pub const NO_SUSPEND: usize = bindings::IRQF_NO_SUSPEND as _;
+
+ /// Force enable it on resume even if [`NO_SUSPEND`] is set.
+ pub const FORCE_RESUME: usize = bindings::IRQF_FORCE_RESUME as _;
+
+ /// Interrupt cannot be threaded.
+ pub const NO_THREAD: usize = bindings::IRQF_NO_THREAD as _;
+
+ /// Resume IRQ early during syscore instead of at device resume time.
+ pub const EARLY_RESUME: usize = bindings::IRQF_EARLY_RESUME as _;
+
+ /// If the IRQ is shared with a NO_SUSPEND user, execute this interrupt handler after
+ /// suspending interrupts. For system wakeup devices users need to implement wakeup detection
+ /// in their interrupt handlers.
+ pub const COND_SUSPEND: usize = bindings::IRQF_COND_SUSPEND as _;
+
+ /// Don't enable IRQ or NMI automatically when users request it. Users will enable it
+ /// explicitly by `enable_irq` or `enable_nmi` later.
+ pub const NO_AUTOEN: usize = bindings::IRQF_NO_AUTOEN as _;
+
+ /// Exclude from runnaway detection for IPI and similar handlers, depends on `PERCPU`.
+ pub const NO_DEBUG: usize = bindings::IRQF_NO_DEBUG as _;
+}
+
+/// A guard to call `chained_irq_exit` after `chained_irq_enter` was called.
+///
+/// It is also used as evidence that a previous `chained_irq_enter` was called. So there are no
+/// public constructors and it is only created after indeed calling `chained_irq_enter`.
+pub struct ChainedGuard<'a> {
+ desc: &'a Descriptor,
+ irq_chip: *mut bindings::irq_chip,
+}
+
+impl Drop for ChainedGuard<'_> {
+ fn drop(&mut self) {
+ // SAFETY: The lifetime of `ChainedGuard` guarantees that `self.desc` remains valid, so it
+ // also guarantess `irq_chip` (which was returned from it) and `self.desc.ptr` (guaranteed
+ // by the type invariants).
+ unsafe { bindings::chained_irq_exit(self.irq_chip, self.desc.ptr) };
+ }
+}
+
+/// Wraps the kernel's `struct irq_domain`.
+///
+/// # Invariants
+///
+/// The pointer `Domain::ptr` is non-null and valid.
+#[cfg(CONFIG_IRQ_DOMAIN)]
+pub struct Domain {
+ ptr: *mut bindings::irq_domain,
+}
+
+#[cfg(CONFIG_IRQ_DOMAIN)]
+impl Domain {
+ /// Constructs a new `struct irq_domain` wrapper.
+ ///
+ /// # Safety
+ ///
+ /// The pointer `ptr` must be non-null and valid for the lifetime of the returned object.
+ pub(crate) unsafe fn from_ptr(ptr: *mut bindings::irq_domain) -> Self {
+ // INVARIANT: The safety requirements ensure the invariant.
+ Self { ptr }
+ }
+
+ /// Invokes the chained handler of the given hw irq of the given domain.
+ ///
+ /// It requires evidence that `chained_irq_enter` was called, which is done by passing a
+ /// `ChainedGuard` instance.
+ pub fn generic_handle_chained(&self, hwirq: u32, _guard: &ChainedGuard<'_>) {
+ // SAFETY: `ptr` is valid by the type invariants.
+ unsafe { bindings::generic_handle_domain_irq(self.ptr, hwirq) };
+ }
+}
+
+/// A high-level irq flow handler.
+pub trait FlowHandler {
+ /// The data associated with the handler.
+ type Data: PointerWrapper;
+
+ /// Implements the irq flow for the given descriptor.
+ fn handle_irq_flow(data: <Self::Data as PointerWrapper>::Borrowed<'_>, desc: &Descriptor);
+}
+
+/// Returns the raw irq flow handler corresponding to the (high-level) one defined in `T`.
+///
+/// # Safety
+///
+/// The caller must ensure that the value stored in the irq handler data (as returned by
+/// `irq_desc_get_handler_data`) is the result of calling [`PointerWrapper::into_pointer] for the
+/// [`T::Data`] type.
+pub(crate) unsafe fn new_flow_handler<T: FlowHandler>() -> bindings::irq_flow_handler_t {
+ Some(irq_flow_handler::<T>)
+}
+
+unsafe extern "C" fn irq_flow_handler<T: FlowHandler>(desc: *mut bindings::irq_desc) {
+ // SAFETY: By the safety requirements of `new_flow_handler`, we know that the value returned by
+ // `irq_desc_get_handler_data` comes from calling `T::Data::into_pointer`. `desc` is valid by
+ // the C API contract.
+ let data = unsafe { T::Data::borrow(bindings::irq_desc_get_handler_data(desc)) };
+
+ // SAFETY: The C API guarantees that `desc` is valid for the duration of this call, which
+ // outlives the lifetime returned by `from_desc`.
+ T::handle_irq_flow(data, &unsafe { Descriptor::from_ptr(desc) });
+}
diff --git a/rust/kernel/kasync.rs b/rust/kernel/kasync.rs
new file mode 100644
index 000000000000..d48e9041e804
--- /dev/null
+++ b/rust/kernel/kasync.rs
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Kernel async functionality.
+
+use core::{
+ future::Future,
+ pin::Pin,
+ task::{Context, Poll},
+};
+
+pub mod executor;
+#[cfg(CONFIG_NET)]
+pub mod net;
+
+/// Yields execution of the current task so that other tasks may execute.
+///
+/// The task continues to be in a "runnable" state though, so it will eventually run again.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::kasync::yield_now;
+///
+/// async fn example() {
+/// pr_info!("Before yield\n");
+/// yield_now().await;
+/// pr_info!("After yield\n");
+/// }
+/// ```
+pub fn yield_now() -> impl Future<Output = ()> {
+ struct Yield {
+ first_poll: bool,
+ }
+
+ impl Future for Yield {
+ type Output = ();
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
+ if !self.first_poll {
+ Poll::Ready(())
+ } else {
+ self.first_poll = false;
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+ }
+ }
+
+ Yield { first_poll: true }
+}
diff --git a/rust/kernel/kasync/executor.rs b/rust/kernel/kasync/executor.rs
new file mode 100644
index 000000000000..e8e55dcfdf35
--- /dev/null
+++ b/rust/kernel/kasync/executor.rs
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Kernel support for executing futures.
+
+use crate::{
+ sync::{LockClassKey, Ref, RefBorrow},
+ types::PointerWrapper,
+ Result,
+};
+use core::{
+ future::Future,
+ task::{RawWaker, RawWakerVTable, Waker},
+};
+
+pub mod workqueue;
+
+/// Spawns a new task to run in the given executor.
+///
+/// It also automatically defines a new lockdep lock class for executors (e.g., workqueue) that
+/// require one.
+#[macro_export]
+macro_rules! spawn_task {
+ ($executor:expr, $task:expr) => {{
+ static CLASS: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new();
+ $crate::kasync::executor::Executor::spawn($executor, &CLASS, $task)
+ }};
+}
+
+/// A task spawned in an executor.
+pub trait Task {
+ /// Synchronously stops the task.
+ ///
+ /// It ensures that the task won't run again and releases resources needed to run the task
+ /// (e.g., the closure is dropped). If the task is inflight, it waits for the task to block or
+ /// complete before cleaning up and returning.
+ ///
+ /// Callers must not call this from within the task itself as it will likely lead to a
+ /// deadlock.
+ fn sync_stop(self: Ref<Self>);
+}
+
+/// An environment for executing tasks.
+pub trait Executor: Sync + Send {
+ /// Starts executing a task defined by the given future.
+ ///
+ /// Callers are encouraged to use the [`spawn_task`] macro because it automatically defines a
+ /// new lock class key.
+ fn spawn(
+ self: RefBorrow<'_, Self>,
+ lock_class_key: &'static LockClassKey,
+ future: impl Future + 'static + Send,
+ ) -> Result<Ref<dyn Task>>
+ where
+ Self: Sized;
+
+ /// Stops the executor.
+ ///
+ /// After it is called, attempts to spawn new tasks will result in an error and existing ones
+ /// won't be polled anymore.
+ fn stop(&self);
+}
+
+/// A waker that is wrapped in [`Ref`] for its reference counting.
+///
+/// Types that implement this trait can get a [`Waker`] by calling [`ref_waker`].
+pub trait RefWake: Send + Sync {
+ /// Wakes a task up.
+ fn wake_by_ref(self: RefBorrow<'_, Self>);
+
+ /// Wakes a task up and consumes a reference.
+ fn wake(self: Ref<Self>) {
+ self.as_ref_borrow().wake_by_ref();
+ }
+}
+
+/// Creates a [`Waker`] from a [`Ref<T>`], where `T` implements the [`RefWake`] trait.
+pub fn ref_waker<T: 'static + RefWake>(w: Ref<T>) -> Waker {
+ fn raw_waker<T: 'static + RefWake>(w: Ref<T>) -> RawWaker {
+ let data = w.into_pointer();
+ RawWaker::new(
+ data.cast(),
+ &RawWakerVTable::new(clone::<T>, wake::<T>, wake_by_ref::<T>, drop::<T>),
+ )
+ }
+
+ unsafe fn clone<T: 'static + RefWake>(ptr: *const ()) -> RawWaker {
+ // SAFETY: The data stored in the raw waker is the result of a call to `into_pointer`.
+ let w = unsafe { Ref::<T>::borrow(ptr.cast()) };
+ raw_waker(w.into())
+ }
+
+ unsafe fn wake<T: 'static + RefWake>(ptr: *const ()) {
+ // SAFETY: The data stored in the raw waker is the result of a call to `into_pointer`.
+ let w = unsafe { Ref::<T>::from_pointer(ptr.cast()) };
+ w.wake();
+ }
+
+ unsafe fn wake_by_ref<T: 'static + RefWake>(ptr: *const ()) {
+ // SAFETY: The data stored in the raw waker is the result of a call to `into_pointer`.
+ let w = unsafe { Ref::<T>::borrow(ptr.cast()) };
+ w.wake_by_ref();
+ }
+
+ unsafe fn drop<T: 'static + RefWake>(ptr: *const ()) {
+ // SAFETY: The data stored in the raw waker is the result of a call to `into_pointer`.
+ unsafe { Ref::<T>::from_pointer(ptr.cast()) };
+ }
+
+ let raw = raw_waker(w);
+ // SAFETY: The vtable of the raw waker satisfy the behaviour requirements of a waker.
+ unsafe { Waker::from_raw(raw) }
+}
+
+/// A handle to an executor that automatically stops it on drop.
+pub struct AutoStopHandle<T: Executor + ?Sized> {
+ executor: Option<Ref<T>>,
+}
+
+impl<T: Executor + ?Sized> AutoStopHandle<T> {
+ /// Creates a new instance of an [`AutoStopHandle`].
+ pub fn new(executor: Ref<T>) -> Self {
+ Self {
+ executor: Some(executor),
+ }
+ }
+
+ /// Detaches from the auto-stop handle.
+ ///
+ /// That is, extracts the executor from the handle and doesn't stop it anymore.
+ pub fn detach(mut self) -> Ref<T> {
+ self.executor.take().unwrap()
+ }
+
+ /// Returns the executor associated with the auto-stop handle.
+ ///
+ /// This is so that callers can, for example, spawn new tasks.
+ pub fn executor(&self) -> RefBorrow<'_, T> {
+ self.executor.as_ref().unwrap().as_ref_borrow()
+ }
+}
+
+impl<T: Executor + ?Sized> Drop for AutoStopHandle<T> {
+ fn drop(&mut self) {
+ if let Some(ex) = self.executor.take() {
+ ex.stop();
+ }
+ }
+}
+
+impl<T: 'static + Executor> From<AutoStopHandle<T>> for AutoStopHandle<dyn Executor> {
+ fn from(src: AutoStopHandle<T>) -> Self {
+ Self::new(src.detach())
+ }
+}
diff --git a/rust/kernel/kasync/executor/workqueue.rs b/rust/kernel/kasync/executor/workqueue.rs
new file mode 100644
index 000000000000..81cd16880600
--- /dev/null
+++ b/rust/kernel/kasync/executor/workqueue.rs
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Kernel support for executing futures in C workqueues (`struct workqueue_struct`).
+
+use super::{AutoStopHandle, RefWake};
+use crate::{
+ error::code::*,
+ mutex_init,
+ revocable::AsyncRevocable,
+ sync::{LockClassKey, Mutex, Ref, RefBorrow, UniqueRef},
+ unsafe_list,
+ workqueue::{BoxedQueue, Queue, Work, WorkAdapter},
+ Either, Left, Result, Right,
+};
+use core::{cell::UnsafeCell, future::Future, marker::PhantomPinned, pin::Pin, task::Context};
+
+trait RevocableTask {
+ fn revoke(&self);
+ fn flush(self: Ref<Self>);
+ fn to_links(&self) -> &unsafe_list::Links<dyn RevocableTask>;
+}
+
+// SAFETY: `Task` has a single `links` field and only one adapter.
+unsafe impl unsafe_list::Adapter for dyn RevocableTask {
+ type EntryType = dyn RevocableTask;
+ fn to_links(obj: &dyn RevocableTask) -> &unsafe_list::Links<dyn RevocableTask> {
+ obj.to_links()
+ }
+}
+
+struct Task<T: 'static + Send + Future> {
+ links: unsafe_list::Links<dyn RevocableTask>,
+ executor: Ref<Executor>,
+ work: Work,
+ future: AsyncRevocable<UnsafeCell<T>>,
+}
+
+// SAFETY: The `future` field is only used by one thread at a time (in the `poll` method, which is
+// called by the work queue, who guarantees no reentrancy), so a task is `Sync` as long as the
+// future is `Send`.
+unsafe impl<T: 'static + Send + Future> Sync for Task<T> {}
+
+// SAFETY: If the future `T` is `Send`, so is the task.
+unsafe impl<T: 'static + Send + Future> Send for Task<T> {}
+
+impl<T: 'static + Send + Future> Task<T> {
+ fn try_new(
+ executor: Ref<Executor>,
+ key: &'static LockClassKey,
+ future: T,
+ ) -> Result<Ref<Self>> {
+ let task = UniqueRef::try_new(Self {
+ executor: executor.clone(),
+ links: unsafe_list::Links::new(),
+ // SAFETY: `work` is initialised below.
+ work: unsafe { Work::new() },
+ future: AsyncRevocable::new(UnsafeCell::new(future)),
+ })?;
+
+ Work::init(&task, key);
+
+ let task = Ref::from(task);
+
+ // Add task to list.
+ {
+ let mut guard = executor.inner.lock();
+ if guard.stopped {
+ return Err(EINVAL);
+ }
+
+ // Convert one reference into a pointer so that we hold on to a ref count while the
+ // task is in the list.
+ Ref::into_raw(task.clone());
+
+ // SAFETY: The task was just created, so it is not in any other lists. It remains alive
+ // because we incremented the refcount to account for it being in the list. It never
+ // moves because it's pinned behind a `Ref`.
+ unsafe { guard.tasks.push_back(&*task) };
+ }
+
+ Ok(task)
+ }
+}
+
+unsafe impl<T: 'static + Send + Future> WorkAdapter for Task<T> {
+ type Target = Self;
+ const FIELD_OFFSET: isize = crate::offset_of!(Self, work);
+ fn run(task: Ref<Task<T>>) {
+ let waker = super::ref_waker(task.clone());
+ let mut ctx = Context::from_waker(&waker);
+
+ let guard = if let Some(g) = task.future.try_access() {
+ g
+ } else {
+ return;
+ };
+
+ // SAFETY: `future` is pinned when the task is. The task is pinned because it's behind a
+ // `Ref`, which is always pinned.
+ //
+ // Work queues guarantee no reentrancy and this is the only place where the future is
+ // dereferenced, so it's ok to do it mutably.
+ let future = unsafe { Pin::new_unchecked(&mut *guard.get()) };
+ if future.poll(&mut ctx).is_ready() {
+ drop(guard);
+ task.revoke();
+ }
+ }
+}
+
+impl<T: 'static + Send + Future> super::Task for Task<T> {
+ fn sync_stop(self: Ref<Self>) {
+ self.revoke();
+ self.flush();
+ }
+}
+
+impl<T: 'static + Send + Future> RevocableTask for Task<T> {
+ fn revoke(&self) {
+ if !self.future.revoke() {
+ // Nothing to do if the task was already revoked.
+ return;
+ }
+
+ // SAFETY: The object is inserted into the list on creation and only removed when the
+ // future is first revoked. (Subsequent revocations don't result in additional attempts
+ // to remove per the check above.)
+ unsafe { self.executor.inner.lock().tasks.remove(self) };
+
+ // Decrement the refcount now that the task is no longer in the list.
+ //
+ // SAFETY: `into_raw` was called from `try_new` when the task was added to the list.
+ unsafe { Ref::from_raw(self) };
+ }
+
+ fn flush(self: Ref<Self>) {
+ self.work.cancel();
+ }
+
+ fn to_links(&self) -> &unsafe_list::Links<dyn RevocableTask> {
+ &self.links
+ }
+}
+
+impl<T: 'static + Send + Future> RefWake for Task<T> {
+ fn wake(self: Ref<Self>) {
+ if self.future.is_revoked() {
+ return;
+ }
+
+ match &self.executor.queue {
+ Left(q) => &**q,
+ Right(q) => *q,
+ }
+ .enqueue(self.clone());
+ }
+
+ fn wake_by_ref(self: RefBorrow<'_, Self>) {
+ Ref::from(self).wake();
+ }
+}
+
+struct ExecutorInner {
+ stopped: bool,
+ tasks: unsafe_list::List<dyn RevocableTask>,
+}
+
+/// An executor backed by a work queue.
+///
+/// # Examples
+///
+/// The following example runs two tasks on the shared system workqueue.
+///
+/// ```
+/// # use kernel::prelude::*;
+/// use kernel::kasync::executor::workqueue::Executor;
+/// use kernel::spawn_task;
+/// use kernel::workqueue;
+///
+/// fn example_shared_workqueue() -> Result {
+/// let mut handle = Executor::try_new(workqueue::system())?;
+/// spawn_task!(handle.executor(), async {
+/// pr_info!("First workqueue task\n");
+/// })?;
+/// spawn_task!(handle.executor(), async {
+/// pr_info!("Second workqueue task\n");
+/// })?;
+/// handle.detach();
+/// Ok(())
+/// }
+///
+/// # example_shared_workqueue().unwrap();
+/// ```
+pub struct Executor {
+ queue: Either<BoxedQueue, &'static Queue>,
+ inner: Mutex<ExecutorInner>,
+ _pin: PhantomPinned,
+}
+
+// SAFETY: The executor is backed by a kernel `struct workqueue_struct`, which works from any
+// thread.
+unsafe impl Send for Executor {}
+
+// SAFETY: The executor is backed by a kernel `struct workqueue_struct`, which can be used
+// concurrently by multiple threads.
+unsafe impl Sync for Executor {}
+
+impl Executor {
+ /// Creates a new workqueue-based executor using a static work queue.
+ pub fn try_new(wq: &'static Queue) -> Result<AutoStopHandle<Self>> {
+ Self::new_internal(Right(wq))
+ }
+
+ /// Creates a new workqueue-based executor using an owned (boxed) work queue.
+ pub fn try_new_owned(wq: BoxedQueue) -> Result<AutoStopHandle<Self>> {
+ Self::new_internal(Left(wq))
+ }
+
+ /// Creates a new workqueue-based executor.
+ ///
+ /// It uses the given work queue to run its tasks.
+ fn new_internal(queue: Either<BoxedQueue, &'static Queue>) -> Result<AutoStopHandle<Self>> {
+ let mut e = Pin::from(UniqueRef::try_new(Self {
+ queue,
+ _pin: PhantomPinned,
+ // SAFETY: `mutex_init` is called below.
+ inner: unsafe {
+ Mutex::new(ExecutorInner {
+ stopped: false,
+ tasks: unsafe_list::List::new(),
+ })
+ },
+ })?);
+ // SAFETY: `tasks` is pinned when the executor is.
+ let pinned = unsafe { e.as_mut().map_unchecked_mut(|e| &mut e.inner) };
+ mutex_init!(pinned, "Executor::inner");
+
+ Ok(AutoStopHandle::new(e.into()))
+ }
+}
+
+impl super::Executor for Executor {
+ fn spawn(
+ self: RefBorrow<'_, Self>,
+ key: &'static LockClassKey,
+ future: impl Future + 'static + Send,
+ ) -> Result<Ref<dyn super::Task>> {
+ let task = Task::try_new(self.into(), key, future)?;
+ task.clone().wake();
+ Ok(task)
+ }
+
+ fn stop(&self) {
+ // Set the `stopped` flag.
+ self.inner.lock().stopped = true;
+
+ // Go through all tasks and revoke & flush them.
+ //
+ // N.B. If we decide to allow "asynchronous" stops, we need to ensure that tasks that have
+ // been revoked but not flushed yet remain in the list so that we can flush them here.
+ // Otherwise we may have a race where we may have a running task (was revoked while
+ // running) that isn't the list anymore, so we think we've synchronously stopped all tasks
+ // when we haven't really -- unloading a module in this situation leads to memory safety
+ // issues (running unloaded code).
+ loop {
+ let guard = self.inner.lock();
+
+ let front = if let Some(t) = guard.tasks.front() {
+ t
+ } else {
+ break;
+ };
+
+ // Get a new reference to the task.
+ //
+ // SAFETY: We know all entries in the list are of type `Ref<dyn RevocableTask>` and
+ // that a reference exists while the entry is in the list, and since we are holding the
+ // list lock, we know it cannot go away. The `into_raw` call below ensures that we
+ // don't decrement the refcount accidentally.
+ let tasktmp = unsafe { Ref::<dyn RevocableTask>::from_raw(front.as_ptr()) };
+ let task = tasktmp.clone();
+ Ref::into_raw(tasktmp);
+
+ // Release the mutex before revoking the task.
+ drop(guard);
+
+ task.revoke();
+ task.flush();
+ }
+ }
+}
diff --git a/rust/kernel/kasync/net.rs b/rust/kernel/kasync/net.rs
new file mode 100644
index 000000000000..b4bbeffad94a
--- /dev/null
+++ b/rust/kernel/kasync/net.rs
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Async networking.
+
+use crate::{bindings, error::code::*, net, sync::NoWaitLock, types::Opaque, Result};
+use core::{
+ future::Future,
+ marker::{PhantomData, PhantomPinned},
+ ops::Deref,
+ pin::Pin,
+ task::{Context, Poll, Waker},
+};
+
+/// A socket listening on a TCP port.
+///
+/// The [`TcpListener::accept`] method is meant to be used in async contexts.
+pub struct TcpListener {
+ listener: net::TcpListener,
+}
+
+impl TcpListener {
+ /// Creates a new TCP listener.
+ ///
+ /// It is configured to listen on the given socket address for the given namespace.
+ pub fn try_new(ns: &net::Namespace, addr: &net::SocketAddr) -> Result<Self> {
+ Ok(Self {
+ listener: net::TcpListener::try_new(ns, addr)?,
+ })
+ }
+
+ /// Accepts a new connection.
+ ///
+ /// Returns a future that when ready indicates the result of the accept operation; on success,
+ /// it contains the newly-accepted tcp stream.
+ pub fn accept(&self) -> impl Future<Output = Result<TcpStream>> + '_ {
+ SocketFuture::from_listener(
+ self,
+ bindings::BINDINGS_EPOLLIN | bindings::BINDINGS_EPOLLERR,
+ || {
+ Ok(TcpStream {
+ stream: self.listener.accept(false)?,
+ })
+ },
+ )
+ }
+}
+
+impl Deref for TcpListener {
+ type Target = net::TcpListener;
+
+ fn deref(&self) -> &Self::Target {
+ &self.listener
+ }
+}
+
+/// A connected TCP socket.
+///
+/// The potentially blocking methods (e.g., [`TcpStream::read`], [`TcpStream::write`]) are meant
+/// to be used in async contexts.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// # use kernel::kasync::net::TcpStream;
+/// async fn echo_server(stream: TcpStream) -> Result {
+/// let mut buf = [0u8; 1024];
+/// loop {
+/// let n = stream.read(&mut buf).await?;
+/// if n == 0 {
+/// return Ok(());
+/// }
+/// stream.write_all(&buf[..n]).await?;
+/// }
+/// }
+/// ```
+pub struct TcpStream {
+ stream: net::TcpStream,
+}
+
+impl TcpStream {
+ /// Reads data from a connected socket.
+ ///
+ /// Returns a future that when ready indicates the result of the read operation; on success, it
+ /// contains the number of bytes read, which will be zero if the connection is closed.
+ pub fn read<'a>(&'a self, buf: &'a mut [u8]) -> impl Future<Output = Result<usize>> + 'a {
+ SocketFuture::from_stream(
+ self,
+ bindings::BINDINGS_EPOLLIN | bindings::BINDINGS_EPOLLHUP | bindings::BINDINGS_EPOLLERR,
+ || self.stream.read(buf, false),
+ )
+ }
+
+ /// Writes data to the connected socket.
+ ///
+ /// Returns a future that when ready indicates the result of the write operation; on success, it
+ /// contains the number of bytes written.
+ pub fn write<'a>(&'a self, buf: &'a [u8]) -> impl Future<Output = Result<usize>> + 'a {
+ SocketFuture::from_stream(
+ self,
+ bindings::BINDINGS_EPOLLOUT | bindings::BINDINGS_EPOLLHUP | bindings::BINDINGS_EPOLLERR,
+ || self.stream.write(buf, false),
+ )
+ }
+
+ /// Writes all the data to the connected socket.
+ ///
+ /// Returns a future that when ready indicates the result of the write operation; on success, it
+ /// has written all the data.
+ pub async fn write_all<'a>(&'a self, buf: &'a [u8]) -> Result {
+ let mut rem = buf;
+
+ while !rem.is_empty() {
+ let n = self.write(rem).await?;
+ rem = &rem[n..];
+ }
+
+ Ok(())
+ }
+}
+
+impl Deref for TcpStream {
+ type Target = net::TcpStream;
+
+ fn deref(&self) -> &Self::Target {
+ &self.stream
+ }
+}
+
+/// A future for a socket operation.
+///
+/// # Invariants
+///
+/// `sock` is always non-null and valid for the duration of the lifetime of the instance.
+struct SocketFuture<'a, Out, F: FnMut() -> Result<Out> + Send + 'a> {
+ sock: *mut bindings::socket,
+ mask: u32,
+ is_queued: bool,
+ wq_entry: Opaque<bindings::wait_queue_entry>,
+ waker: NoWaitLock<Option<Waker>>,
+ _p: PhantomData<&'a ()>,
+ _pin: PhantomPinned,
+ operation: F,
+}
+
+// SAFETY: A kernel socket can be used from any thread, `wq_entry` is only used on drop and when
+// `is_queued` is initially `false`.
+unsafe impl<Out, F: FnMut() -> Result<Out> + Send> Send for SocketFuture<'_, Out, F> {}
+
+impl<'a, Out, F: FnMut() -> Result<Out> + Send + 'a> SocketFuture<'a, Out, F> {
+ /// Creates a new socket future.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `sock` is non-null, valid, and remains valid for the lifetime
+ /// (`'a`) of the returned instance.
+ unsafe fn new(sock: *mut bindings::socket, mask: u32, operation: F) -> Self {
+ Self {
+ sock,
+ mask,
+ is_queued: false,
+ wq_entry: Opaque::uninit(),
+ waker: NoWaitLock::new(None),
+ operation,
+ _p: PhantomData,
+ _pin: PhantomPinned,
+ }
+ }
+
+ /// Creates a new socket future for a tcp listener.
+ fn from_listener(listener: &'a TcpListener, mask: u32, operation: F) -> Self {
+ // SAFETY: The socket is guaranteed to remain valid because it is bound to the reference to
+ // the listener (whose existence guarantees the socket remains valid).
+ unsafe { Self::new(listener.listener.sock, mask, operation) }
+ }
+
+ /// Creates a new socket future for a tcp stream.
+ fn from_stream(stream: &'a TcpStream, mask: u32, operation: F) -> Self {
+ // SAFETY: The socket is guaranteed to remain valid because it is bound to the reference to
+ // the stream (whose existence guarantees the socket remains valid).
+ unsafe { Self::new(stream.stream.sock, mask, operation) }
+ }
+
+ /// Callback called when the socket changes state.
+ ///
+ /// If the state matches the one we're waiting on, we wake up the task so that the future can be
+ /// polled again.
+ unsafe extern "C" fn wake_callback(
+ wq_entry: *mut bindings::wait_queue_entry,
+ _mode: core::ffi::c_uint,
+ _flags: core::ffi::c_int,
+ key: *mut core::ffi::c_void,
+ ) -> core::ffi::c_int {
+ let mask = key as u32;
+
+ // SAFETY: The future is valid while this callback is called because we remove from the
+ // queue on drop.
+ //
+ // There is a potential soundness issue here because we're generating a shared reference to
+ // `Self` while `Self::poll` has a mutable (unique) reference. However, for `!Unpin` types
+ // (like `Self`), `&mut T` is treated as `*mut T` per
+ // <https://github.com/rust-lang/rust/issues/63818> -- so we avoid the unsoundness. Once a
+ // more definitive solution is available, we can change this to use it.
+ let s = unsafe { &*crate::container_of!(wq_entry, Self, wq_entry) };
+ if mask & s.mask == 0 {
+ // Nothing to do as this notification doesn't interest us.
+ return 0;
+ }
+
+ // If we can't acquire the waker lock, the waker is in the process of being modified. Our
+ // attempt to acquire the lock will be reported to the lock owner, so it will trigger the
+ // wake up.
+ if let Some(guard) = s.waker.try_lock() {
+ if let Some(ref w) = *guard {
+ let cloned = w.clone();
+ drop(guard);
+ cloned.wake();
+ return 1;
+ }
+ }
+ 0
+ }
+
+ /// Poll the future once.
+ ///
+ /// It calls the operation and converts `EAGAIN` errors into a pending state.
+ fn poll_once(self: Pin<&mut Self>) -> Poll<Result<Out>> {
+ // SAFETY: We never move out of `this`.
+ let this = unsafe { self.get_unchecked_mut() };
+ match (this.operation)() {
+ Ok(s) => Poll::Ready(Ok(s)),
+ Err(e) => {
+ if e == EAGAIN {
+ Poll::Pending
+ } else {
+ Poll::Ready(Err(e))
+ }
+ }
+ }
+ }
+
+ /// Updates the waker stored in the future.
+ ///
+ /// It automatically triggers a wake up on races with the reactor.
+ fn set_waker(&self, waker: &Waker) {
+ if let Some(mut guard) = self.waker.try_lock() {
+ let old = core::mem::replace(&mut *guard, Some(waker.clone()));
+ let contention = guard.unlock();
+ drop(old);
+ if !contention {
+ return;
+ }
+ }
+
+ // We either couldn't store the waker because the existing one is being awakened, or the
+ // reactor tried to acquire the lock while we held it (contention). In either case, we just
+ // wake it up to ensure we don't miss any notification.
+ waker.wake_by_ref();
+ }
+}
+
+impl<Out, F: FnMut() -> Result<Out> + Send> Future for SocketFuture<'_, Out, F> {
+ type Output = Result<Out>;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ match self.as_mut().poll_once() {
+ Poll::Ready(r) => Poll::Ready(r),
+ Poll::Pending => {
+ // Store away the latest waker every time we may `Pending`.
+ self.set_waker(cx.waker());
+ if self.is_queued {
+ // Nothing else to do was the waiter is already queued.
+ return Poll::Pending;
+ }
+
+ // SAFETY: We never move out of `this`.
+ let this = unsafe { self.as_mut().get_unchecked_mut() };
+
+ this.is_queued = true;
+
+ // SAFETY: `wq_entry` is valid for write.
+ unsafe {
+ bindings::init_waitqueue_func_entry(
+ this.wq_entry.get(),
+ Some(Self::wake_callback),
+ )
+ };
+
+ // SAFETY: `wq_entry` was just initialised above and is valid for read/write.
+ // By the type invariants, the socket is always valid.
+ unsafe {
+ bindings::add_wait_queue(
+ core::ptr::addr_of_mut!((*this.sock).wq.wait),
+ this.wq_entry.get(),
+ )
+ };
+
+ // If the future wasn't queued yet, we need to poll again in case it reached
+ // the desired state between the last poll and being queued (in which case we
+ // would have missed the notification).
+ self.poll_once()
+ }
+ }
+ }
+}
+
+impl<Out, F: FnMut() -> Result<Out> + Send> Drop for SocketFuture<'_, Out, F> {
+ fn drop(&mut self) {
+ if !self.is_queued {
+ return;
+ }
+
+ // SAFETY: `wq_entry` is initialised because `is_queued` is set to `true`, so it is valid
+ // for read/write. By the type invariants, the socket is always valid.
+ unsafe {
+ bindings::remove_wait_queue(
+ core::ptr::addr_of_mut!((*self.sock).wq.wait),
+ self.wq_entry.get(),
+ )
+ };
+ }
+}
diff --git a/rust/kernel/kunit.rs b/rust/kernel/kunit.rs
new file mode 100644
index 000000000000..5f3e102962c3
--- /dev/null
+++ b/rust/kernel/kunit.rs
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! KUnit-based macros for Rust unit tests.
+//!
+//! C header: [`include/kunit/test.h`](../../../../../include/kunit/test.h)
+//!
+//! Reference: <https://www.kernel.org/doc/html/latest/dev-tools/kunit/index.html>
+
+/// Asserts that a boolean expression is `true` at runtime.
+///
+/// Public but hidden since it should only be used from generated tests.
+///
+/// Unlike the one in `core`, this one does not panic; instead, it is mapped to the KUnit
+/// facilities. See [`assert!`] for more details.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! kunit_assert {
+ ($test:expr, $cond:expr $(,)?) => {{
+ if !$cond {
+ #[repr(transparent)]
+ struct Location($crate::bindings::kunit_loc);
+
+ #[repr(transparent)]
+ struct UnaryAssert($crate::bindings::kunit_unary_assert);
+
+ // SAFETY: There is only a static instance and in that one the pointer field
+ // points to an immutable C string.
+ unsafe impl Sync for Location {}
+
+ // SAFETY: There is only a static instance and in that one the pointer field
+ // points to an immutable C string.
+ unsafe impl Sync for UnaryAssert {}
+
+ static FILE: &'static $crate::str::CStr = $crate::c_str!(core::file!());
+ static LOCATION: Location = Location($crate::bindings::kunit_loc {
+ file: FILE.as_char_ptr(),
+ line: core::line!() as i32,
+ });
+ static CONDITION: &'static $crate::str::CStr = $crate::c_str!(stringify!($cond));
+ static ASSERTION: UnaryAssert = UnaryAssert($crate::bindings::kunit_unary_assert {
+ assert: $crate::bindings::kunit_assert {
+ format: Some($crate::bindings::kunit_unary_assert_format),
+ },
+ condition: CONDITION.as_char_ptr(),
+ expected_true: true,
+ });
+
+ // SAFETY:
+ // - FFI call.
+ // - The `test` pointer is valid because this hidden macro should only be called by
+ // the generated documentation tests which forward the test pointer given by KUnit.
+ // - The string pointers (`file` and `condition`) point to null-terminated ones.
+ // - The function pointer (`format`) points to the proper function.
+ // - The pointers passed will remain valid since they point to statics.
+ // - The format string is allowed to be null.
+ // - There are, however, problems with this: first of all, this will end up stopping
+ // the thread, without running destructors. While that is problematic in itself,
+ // it is considered UB to have what is effectively an forced foreign unwind
+ // with `extern "C"` ABI. One could observe the stack that is now gone from
+ // another thread. We should avoid pinning stack variables to prevent library UB,
+ // too. For the moment, given test failures are reported immediately before the
+ // next test runs, that test failures should be fixed and that KUnit is explicitly
+ // documented as not suitable for production environments, we feel it is reasonable.
+ unsafe {
+ $crate::bindings::kunit_do_failed_assertion(
+ $test,
+ core::ptr::addr_of!(LOCATION.0),
+ $crate::bindings::kunit_assert_type_KUNIT_ASSERTION,
+ core::ptr::addr_of!(ASSERTION.0.assert),
+ core::ptr::null(),
+ );
+ }
+ }
+ }};
+}
+
+/// Asserts that two expressions are equal to each other (using [`PartialEq`]).
+///
+/// Public but hidden since it should only be used from generated tests.
+///
+/// Unlike the one in `core`, this one does not panic; instead, it is mapped to the KUnit
+/// facilities. See [`assert!`] for more details.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! kunit_assert_eq {
+ ($test:expr, $left:expr, $right:expr $(,)?) => {{
+ // For the moment, we just forward to the expression assert because,
+ // for binary asserts, KUnit supports only a few types (e.g. integers).
+ $crate::kunit_assert!($test, $left == $right);
+ }};
+}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
new file mode 100644
index 000000000000..b55fe00761c2
--- /dev/null
+++ b/rust/kernel/lib.rs
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! The `kernel` crate.
+//!
+//! This crate contains the kernel APIs that have been ported or wrapped for
+//! usage by Rust code in the kernel and is shared by all of them.
+//!
+//! In other words, all the rest of the Rust code in the kernel (e.g. kernel
+//! modules written in Rust) depends on [`core`], [`alloc`] and this crate.
+//!
+//! If you need a kernel C API that is not ported or wrapped yet here, then
+//! do so first instead of bypassing this crate.
+
+#![no_std]
+#![feature(allocator_api)]
+#![feature(associated_type_defaults)]
+#![feature(coerce_unsized)]
+#![feature(const_mut_refs)]
+#![feature(const_ptr_offset_from)]
+#![feature(const_refs_to_cell)]
+#![feature(const_trait_impl)]
+#![feature(core_ffi_c)]
+#![feature(c_size_t)]
+#![feature(dispatch_from_dyn)]
+#![feature(doc_cfg)]
+#![feature(duration_constants)]
+#![feature(generic_associated_types)]
+#![feature(ptr_metadata)]
+#![feature(receiver_trait)]
+#![feature(unsize)]
+
+// Ensure conditional compilation based on the kernel configuration works;
+// otherwise we may silently break things like initcall handling.
+#[cfg(not(CONFIG_RUST))]
+compile_error!("Missing kernel configuration for conditional compilation");
+
+#[cfg(not(test))]
+#[cfg(not(testlib))]
+mod allocator;
+
+#[doc(hidden)]
+pub use bindings;
+
+pub use macros;
+
+#[cfg(CONFIG_ARM_AMBA)]
+pub mod amba;
+pub mod chrdev;
+#[cfg(CONFIG_COMMON_CLK)]
+pub mod clk;
+pub mod cred;
+pub mod delay;
+pub mod device;
+pub mod driver;
+pub mod error;
+pub mod file;
+pub mod fs;
+pub mod gpio;
+pub mod hwrng;
+pub mod irq;
+pub mod kasync;
+pub mod miscdev;
+pub mod mm;
+#[cfg(CONFIG_NET)]
+pub mod net;
+pub mod pages;
+pub mod power;
+pub mod revocable;
+pub mod security;
+pub mod str;
+pub mod task;
+pub mod workqueue;
+
+pub mod linked_list;
+mod raw_list;
+pub mod rbtree;
+pub mod unsafe_list;
+
+#[doc(hidden)]
+pub mod module_param;
+
+mod build_assert;
+pub mod prelude;
+pub mod print;
+pub mod random;
+mod static_assert;
+#[doc(hidden)]
+pub mod std_vendor;
+pub mod sync;
+
+#[cfg(any(CONFIG_SYSCTL, doc))]
+#[doc(cfg(CONFIG_SYSCTL))]
+pub mod sysctl;
+
+pub mod io_buffer;
+#[cfg(CONFIG_HAS_IOMEM)]
+pub mod io_mem;
+pub mod iov_iter;
+pub mod of;
+pub mod platform;
+mod types;
+pub mod user_ptr;
+
+#[cfg(CONFIG_KUNIT)]
+pub mod kunit;
+
+#[doc(hidden)]
+pub use build_error::build_error;
+
+pub use crate::error::{to_result, Error, Result};
+pub use crate::types::{
+ bit, bits_iter, ARef, AlwaysRefCounted, Bool, Either, Either::Left, Either::Right, False, Mode,
+ Opaque, PointerWrapper, ScopeGuard, True,
+};
+
+use core::marker::PhantomData;
+
+/// Page size defined in terms of the `PAGE_SHIFT` macro from C.
+///
+/// [`PAGE_SHIFT`]: ../../../include/asm-generic/page.h
+pub const PAGE_SIZE: usize = 1 << bindings::PAGE_SHIFT;
+
+/// Prefix to appear before log messages printed from within the kernel crate.
+const __LOG_PREFIX: &[u8] = b"rust_kernel\0";
+
+/// The top level entrypoint to implementing a kernel module.
+///
+/// For any teardown or cleanup operations, your type may implement [`Drop`].
+pub trait Module: Sized + Sync {
+ /// Called at module initialization time.
+ ///
+ /// Use this method to perform whatever setup or registration your module
+ /// should do.
+ ///
+ /// Equivalent to the `module_init` macro in the C API.
+ fn init(name: &'static str::CStr, module: &'static ThisModule) -> Result<Self>;
+}
+
+/// Equivalent to `THIS_MODULE` in the C API.
+///
+/// C header: `include/linux/export.h`
+pub struct ThisModule(*mut bindings::module);
+
+// SAFETY: `THIS_MODULE` may be used from all threads within a module.
+unsafe impl Sync for ThisModule {}
+
+impl ThisModule {
+ /// Creates a [`ThisModule`] given the `THIS_MODULE` pointer.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must be equal to the right `THIS_MODULE`.
+ pub const unsafe fn from_ptr(ptr: *mut bindings::module) -> ThisModule {
+ ThisModule(ptr)
+ }
+
+ /// Locks the module parameters to access them.
+ ///
+ /// Returns a [`KParamGuard`] that will release the lock when dropped.
+ pub fn kernel_param_lock(&self) -> KParamGuard<'_> {
+ // SAFETY: `kernel_param_lock` will check if the pointer is null and
+ // use the built-in mutex in that case.
+ #[cfg(CONFIG_SYSFS)]
+ unsafe {
+ bindings::kernel_param_lock(self.0)
+ }
+
+ KParamGuard {
+ #[cfg(CONFIG_SYSFS)]
+ this_module: self,
+ phantom: PhantomData,
+ }
+ }
+}
+
+/// Scoped lock on the kernel parameters of [`ThisModule`].
+///
+/// Lock will be released when this struct is dropped.
+pub struct KParamGuard<'a> {
+ #[cfg(CONFIG_SYSFS)]
+ this_module: &'a ThisModule,
+ phantom: PhantomData<&'a ()>,
+}
+
+#[cfg(CONFIG_SYSFS)]
+impl<'a> Drop for KParamGuard<'a> {
+ fn drop(&mut self) {
+ // SAFETY: `kernel_param_lock` will check if the pointer is null and
+ // use the built-in mutex in that case. The existence of `self`
+ // guarantees that the lock is held.
+ unsafe { bindings::kernel_param_unlock(self.this_module.0) }
+ }
+}
+
+/// Calculates the offset of a field from the beginning of the struct it belongs to.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::prelude::*;
+/// # use kernel::offset_of;
+/// struct Test {
+/// a: u64,
+/// b: u32,
+/// }
+///
+/// assert_eq!(offset_of!(Test, b), 8);
+/// ```
+#[macro_export]
+macro_rules! offset_of {
+ ($type:ty, $($f:tt)*) => {{
+ let tmp = core::mem::MaybeUninit::<$type>::uninit();
+ let outer = tmp.as_ptr();
+ // To avoid warnings when nesting `unsafe` blocks.
+ #[allow(unused_unsafe)]
+ // SAFETY: The pointer is valid and aligned, just not initialised; `addr_of` ensures that
+ // we don't actually read from `outer` (which would be UB) nor create an intermediate
+ // reference.
+ let inner = unsafe { core::ptr::addr_of!((*outer).$($f)*) } as *const u8;
+ // To avoid warnings when nesting `unsafe` blocks.
+ #[allow(unused_unsafe)]
+ // SAFETY: The two pointers are within the same allocation block.
+ unsafe { inner.offset_from(outer as *const u8) }
+ }}
+}
+
+/// Produces a pointer to an object from a pointer to one of its fields.
+///
+/// # Safety
+///
+/// Callers must ensure that the pointer to the field is in fact a pointer to the specified field,
+/// as opposed to a pointer to another object of the same type. If this condition is not met,
+/// any dereference of the resulting pointer is UB.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::container_of;
+/// struct Test {
+/// a: u64,
+/// b: u32,
+/// }
+///
+/// let test = Test { a: 10, b: 20 };
+/// let b_ptr = &test.b;
+/// let test_alias = container_of!(b_ptr, Test, b);
+/// assert!(core::ptr::eq(&test, test_alias));
+/// ```
+#[macro_export]
+macro_rules! container_of {
+ ($ptr:expr, $type:ty, $($f:tt)*) => {{
+ let ptr = $ptr as *const _ as *const u8;
+ let offset = $crate::offset_of!($type, $($f)*);
+ ptr.wrapping_offset(-offset) as *const $type
+ }}
+}
+
+#[cfg(not(any(testlib, test)))]
+#[panic_handler]
+fn panic(info: &core::panic::PanicInfo<'_>) -> ! {
+ pr_emerg!("{}\n", info);
+ // SAFETY: FFI call.
+ unsafe { bindings::BUG() };
+ // Bindgen currently does not recognize `__noreturn` so `BUG` returns `()`
+ // instead of `!`. See <https://github.com/rust-lang/rust-bindgen/issues/2094>.
+ loop {}
+}
diff --git a/rust/kernel/linked_list.rs b/rust/kernel/linked_list.rs
new file mode 100644
index 000000000000..3330edcc7ca8
--- /dev/null
+++ b/rust/kernel/linked_list.rs
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Linked lists.
+//!
+//! TODO: This module is a work in progress.
+
+use alloc::boxed::Box;
+use core::ptr::NonNull;
+
+pub use crate::raw_list::{Cursor, GetLinks, Links};
+use crate::{raw_list, raw_list::RawList, sync::Ref};
+
+// TODO: Use the one from `kernel::file_operations::PointerWrapper` instead.
+/// Wraps an object to be inserted in a linked list.
+pub trait Wrapper<T: ?Sized> {
+ /// Converts the wrapped object into a pointer that represents it.
+ fn into_pointer(self) -> NonNull<T>;
+
+ /// Converts the object back from the pointer representation.
+ ///
+ /// # Safety
+ ///
+ /// The passed pointer must come from a previous call to [`Wrapper::into_pointer()`].
+ unsafe fn from_pointer(ptr: NonNull<T>) -> Self;
+
+ /// Returns a reference to the wrapped object.
+ fn as_ref(&self) -> &T;
+}
+
+impl<T: ?Sized> Wrapper<T> for Box<T> {
+ fn into_pointer(self) -> NonNull<T> {
+ NonNull::new(Box::into_raw(self)).unwrap()
+ }
+
+ unsafe fn from_pointer(ptr: NonNull<T>) -> Self {
+ unsafe { Box::from_raw(ptr.as_ptr()) }
+ }
+
+ fn as_ref(&self) -> &T {
+ AsRef::as_ref(self)
+ }
+}
+
+impl<T: ?Sized> Wrapper<T> for Ref<T> {
+ fn into_pointer(self) -> NonNull<T> {
+ NonNull::new(Ref::into_raw(self) as _).unwrap()
+ }
+
+ unsafe fn from_pointer(ptr: NonNull<T>) -> Self {
+ // SAFETY: The safety requirements of `from_pointer` satisfy the ones from `Ref::from_raw`.
+ unsafe { Ref::from_raw(ptr.as_ptr() as _) }
+ }
+
+ fn as_ref(&self) -> &T {
+ AsRef::as_ref(self)
+ }
+}
+
+impl<T: ?Sized> Wrapper<T> for &T {
+ fn into_pointer(self) -> NonNull<T> {
+ NonNull::from(self)
+ }
+
+ unsafe fn from_pointer(ptr: NonNull<T>) -> Self {
+ unsafe { &*ptr.as_ptr() }
+ }
+
+ fn as_ref(&self) -> &T {
+ self
+ }
+}
+
+/// A descriptor of wrapped list elements.
+pub trait GetLinksWrapped: GetLinks {
+ /// Specifies which wrapper (e.g., `Box` and `Arc`) wraps the list entries.
+ type Wrapped: Wrapper<Self::EntryType>;
+}
+
+impl<T: ?Sized> GetLinksWrapped for Box<T>
+where
+ Box<T>: GetLinks,
+{
+ type Wrapped = Box<<Box<T> as GetLinks>::EntryType>;
+}
+
+impl<T: GetLinks + ?Sized> GetLinks for Box<T> {
+ type EntryType = T::EntryType;
+ fn get_links(data: &Self::EntryType) -> &Links<Self::EntryType> {
+ <T as GetLinks>::get_links(data)
+ }
+}
+
+impl<T: ?Sized> GetLinksWrapped for Ref<T>
+where
+ Ref<T>: GetLinks,
+{
+ type Wrapped = Ref<<Ref<T> as GetLinks>::EntryType>;
+}
+
+impl<T: GetLinks + ?Sized> GetLinks for Ref<T> {
+ type EntryType = T::EntryType;
+
+ fn get_links(data: &Self::EntryType) -> &Links<Self::EntryType> {
+ <T as GetLinks>::get_links(data)
+ }
+}
+
+/// A linked list.
+///
+/// Elements in the list are wrapped and ownership is transferred to the list while the element is
+/// in the list.
+pub struct List<G: GetLinksWrapped> {
+ list: RawList<G>,
+}
+
+impl<G: GetLinksWrapped> List<G> {
+ /// Constructs a new empty linked list.
+ pub fn new() -> Self {
+ Self {
+ list: RawList::new(),
+ }
+ }
+
+ /// Returns whether the list is empty.
+ pub fn is_empty(&self) -> bool {
+ self.list.is_empty()
+ }
+
+ /// Adds the given object to the end (back) of the list.
+ ///
+ /// It is dropped if it's already on this (or another) list; this can happen for
+ /// reference-counted objects, so dropping means decrementing the reference count.
+ pub fn push_back(&mut self, data: G::Wrapped) {
+ let ptr = data.into_pointer();
+
+ // SAFETY: We took ownership of the entry, so it is safe to insert it.
+ if !unsafe { self.list.push_back(ptr.as_ref()) } {
+ // If insertion failed, rebuild object so that it can be freed.
+ // SAFETY: We just called `into_pointer` above.
+ unsafe { G::Wrapped::from_pointer(ptr) };
+ }
+ }
+
+ /// Inserts the given object after `existing`.
+ ///
+ /// It is dropped if it's already on this (or another) list; this can happen for
+ /// reference-counted objects, so dropping means decrementing the reference count.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `existing` points to a valid entry that is on the list.
+ pub unsafe fn insert_after(&mut self, existing: NonNull<G::EntryType>, data: G::Wrapped) {
+ let ptr = data.into_pointer();
+ let entry = unsafe { &*existing.as_ptr() };
+ if unsafe { !self.list.insert_after(entry, ptr.as_ref()) } {
+ // If insertion failed, rebuild object so that it can be freed.
+ unsafe { G::Wrapped::from_pointer(ptr) };
+ }
+ }
+
+ /// Removes the given entry.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `data` is either on this list or in no list. It being on another
+ /// list leads to memory unsafety.
+ pub unsafe fn remove(&mut self, data: &G::Wrapped) -> Option<G::Wrapped> {
+ let entry_ref = Wrapper::as_ref(data);
+ if unsafe { self.list.remove(entry_ref) } {
+ Some(unsafe { G::Wrapped::from_pointer(NonNull::from(entry_ref)) })
+ } else {
+ None
+ }
+ }
+
+ /// Removes the element currently at the front of the list and returns it.
+ ///
+ /// Returns `None` if the list is empty.
+ pub fn pop_front(&mut self) -> Option<G::Wrapped> {
+ let front = self.list.pop_front()?;
+ // SAFETY: Elements on the list were inserted after a call to `into_pointer `.
+ Some(unsafe { G::Wrapped::from_pointer(front) })
+ }
+
+ /// Returns a cursor starting on the first (front) element of the list.
+ pub fn cursor_front(&self) -> Cursor<'_, G> {
+ self.list.cursor_front()
+ }
+
+ /// Returns a mutable cursor starting on the first (front) element of the list.
+ pub fn cursor_front_mut(&mut self) -> CursorMut<'_, G> {
+ CursorMut::new(self.list.cursor_front_mut())
+ }
+}
+
+impl<G: GetLinksWrapped> Default for List<G> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<G: GetLinksWrapped> Drop for List<G> {
+ fn drop(&mut self) {
+ while self.pop_front().is_some() {}
+ }
+}
+
+/// A list cursor that allows traversing a linked list and inspecting & mutating elements.
+pub struct CursorMut<'a, G: GetLinksWrapped> {
+ cursor: raw_list::CursorMut<'a, G>,
+}
+
+impl<'a, G: GetLinksWrapped> CursorMut<'a, G> {
+ fn new(cursor: raw_list::CursorMut<'a, G>) -> Self {
+ Self { cursor }
+ }
+
+ /// Returns the element the cursor is currently positioned on.
+ pub fn current(&mut self) -> Option<&mut G::EntryType> {
+ self.cursor.current()
+ }
+
+ /// Removes the element the cursor is currently positioned on.
+ ///
+ /// After removal, it advances the cursor to the next element.
+ pub fn remove_current(&mut self) -> Option<G::Wrapped> {
+ let ptr = self.cursor.remove_current()?;
+
+ // SAFETY: Elements on the list were inserted after a call to `into_pointer `.
+ Some(unsafe { G::Wrapped::from_pointer(ptr) })
+ }
+
+ /// Returns the element immediately after the one the cursor is positioned on.
+ pub fn peek_next(&mut self) -> Option<&mut G::EntryType> {
+ self.cursor.peek_next()
+ }
+
+ /// Returns the element immediately before the one the cursor is positioned on.
+ pub fn peek_prev(&mut self) -> Option<&mut G::EntryType> {
+ self.cursor.peek_prev()
+ }
+
+ /// Moves the cursor to the next element.
+ pub fn move_next(&mut self) {
+ self.cursor.move_next();
+ }
+}
diff --git a/rust/kernel/miscdev.rs b/rust/kernel/miscdev.rs
new file mode 100644
index 000000000000..65b95d6dba90
--- /dev/null
+++ b/rust/kernel/miscdev.rs
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Miscellaneous devices.
+//!
+//! C header: [`include/linux/miscdevice.h`](../../../../include/linux/miscdevice.h)
+//!
+//! Reference: <https://www.kernel.org/doc/html/latest/driver-api/misc_devices.html>
+
+use crate::bindings;
+use crate::error::{code::*, Error, Result};
+use crate::file;
+use crate::{device, str::CStr, str::CString, ThisModule};
+use alloc::boxed::Box;
+use core::marker::PhantomPinned;
+use core::{fmt, mem::MaybeUninit, pin::Pin};
+
+/// Options which can be used to configure how a misc device is registered.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::{c_str, device::RawDevice, file, miscdev, prelude::*};
+/// fn example(
+/// reg: Pin<&mut miscdev::Registration<impl file::Operations<OpenData = ()>>>,
+/// parent: &dyn RawDevice,
+/// ) -> Result {
+/// miscdev::Options::new()
+/// .mode(0o600)
+/// .minor(10)
+/// .parent(parent)
+/// .register(reg, fmt!("sample"), ())
+/// }
+/// ```
+#[derive(Default)]
+pub struct Options<'a> {
+ minor: Option<i32>,
+ mode: Option<u16>,
+ parent: Option<&'a dyn device::RawDevice>,
+}
+
+impl<'a> Options<'a> {
+ /// Creates new [`Options`] instance with the required fields.
+ pub const fn new() -> Self {
+ Self {
+ minor: None,
+ mode: None,
+ parent: None,
+ }
+ }
+
+ /// Sets the minor device number.
+ pub const fn minor(&mut self, v: i32) -> &mut Self {
+ self.minor = Some(v);
+ self
+ }
+
+ /// Sets the device mode.
+ ///
+ /// This is usually an octal number and describes who can perform read/write/execute operations
+ /// on the device.
+ pub const fn mode(&mut self, m: u16) -> &mut Self {
+ self.mode = Some(m);
+ self
+ }
+
+ /// Sets the device parent.
+ pub const fn parent(&mut self, p: &'a dyn device::RawDevice) -> &mut Self {
+ self.parent = Some(p);
+ self
+ }
+
+ /// Registers a misc device using the configured options.
+ pub fn register<T: file::Operations>(
+ &self,
+ reg: Pin<&mut Registration<T>>,
+ name: fmt::Arguments<'_>,
+ open_data: T::OpenData,
+ ) -> Result {
+ reg.register_with_options(name, open_data, self)
+ }
+
+ /// Allocates a new registration of a misc device and completes the registration with the
+ /// configured options.
+ pub fn register_new<T: file::Operations>(
+ &self,
+ name: fmt::Arguments<'_>,
+ open_data: T::OpenData,
+ ) -> Result<Pin<Box<Registration<T>>>> {
+ let mut r = Pin::from(Box::try_new(Registration::new())?);
+ self.register(r.as_mut(), name, open_data)?;
+ Ok(r)
+ }
+}
+
+/// A registration of a miscellaneous device.
+///
+/// # Invariants
+///
+/// `Context` is always initialised when `registered` is `true`, and not initialised otherwise.
+pub struct Registration<T: file::Operations> {
+ registered: bool,
+ mdev: bindings::miscdevice,
+ name: Option<CString>,
+ _pin: PhantomPinned,
+
+ /// Context initialised on construction and made available to all file instances on
+ /// [`file::Operations::open`].
+ open_data: MaybeUninit<T::OpenData>,
+}
+
+impl<T: file::Operations> Registration<T> {
+ /// Creates a new [`Registration`] but does not register it yet.
+ ///
+ /// It is allowed to move.
+ pub fn new() -> Self {
+ // INVARIANT: `registered` is `false` and `open_data` is not initialised.
+ Self {
+ registered: false,
+ mdev: bindings::miscdevice::default(),
+ name: None,
+ _pin: PhantomPinned,
+ open_data: MaybeUninit::uninit(),
+ }
+ }
+
+ /// Registers a miscellaneous device.
+ ///
+ /// Returns a pinned heap-allocated representation of the registration.
+ pub fn new_pinned(name: fmt::Arguments<'_>, open_data: T::OpenData) -> Result<Pin<Box<Self>>> {
+ Options::new().register_new(name, open_data)
+ }
+
+ /// Registers a miscellaneous device with the rest of the kernel.
+ ///
+ /// It must be pinned because the memory block that represents the registration is
+ /// self-referential.
+ pub fn register(
+ self: Pin<&mut Self>,
+ name: fmt::Arguments<'_>,
+ open_data: T::OpenData,
+ ) -> Result {
+ Options::new().register(self, name, open_data)
+ }
+
+ /// Registers a miscellaneous device with the rest of the kernel. Additional optional settings
+ /// are provided via the `opts` parameter.
+ ///
+ /// It must be pinned because the memory block that represents the registration is
+ /// self-referential.
+ pub fn register_with_options(
+ self: Pin<&mut Self>,
+ name: fmt::Arguments<'_>,
+ open_data: T::OpenData,
+ opts: &Options<'_>,
+ ) -> Result {
+ // SAFETY: We must ensure that we never move out of `this`.
+ let this = unsafe { self.get_unchecked_mut() };
+ if this.registered {
+ // Already registered.
+ return Err(EINVAL);
+ }
+
+ let name = CString::try_from_fmt(name)?;
+
+ // SAFETY: The adapter is compatible with `misc_register`.
+ this.mdev.fops = unsafe { file::OperationsVtable::<Self, T>::build() };
+ this.mdev.name = name.as_char_ptr();
+ this.mdev.minor = opts.minor.unwrap_or(bindings::MISC_DYNAMIC_MINOR as i32);
+ this.mdev.mode = opts.mode.unwrap_or(0);
+ this.mdev.parent = opts
+ .parent
+ .map_or(core::ptr::null_mut(), |p| p.raw_device());
+
+ // We write to `open_data` here because as soon as `misc_register` succeeds, the file can be
+ // opened, so we need `open_data` configured ahead of time.
+ //
+ // INVARIANT: `registered` is set to `true`, but `open_data` is also initialised.
+ this.registered = true;
+ this.open_data.write(open_data);
+
+ let ret = unsafe { bindings::misc_register(&mut this.mdev) };
+ if ret < 0 {
+ // INVARIANT: `registered` is set back to `false` and the `open_data` is destructued.
+ this.registered = false;
+ // SAFETY: `open_data` was initialised a few lines above.
+ unsafe { this.open_data.assume_init_drop() };
+ return Err(Error::from_kernel_errno(ret));
+ }
+
+ this.name = Some(name);
+
+ Ok(())
+ }
+}
+
+impl<T: file::Operations> Default for Registration<T> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<T: file::Operations> file::OpenAdapter<T::OpenData> for Registration<T> {
+ unsafe fn convert(
+ _inode: *mut bindings::inode,
+ file: *mut bindings::file,
+ ) -> *const T::OpenData {
+ // SAFETY: The caller must guarantee that `file` is valid.
+ let reg = crate::container_of!(unsafe { (*file).private_data }, Self, mdev);
+
+ // SAFETY: This function is only called while the misc device is still registered, so the
+ // registration must be valid. Additionally, the type invariants guarantee that while the
+ // miscdev is registered, `open_data` is initialised.
+ unsafe { (*reg).open_data.as_ptr() }
+ }
+}
+
+// SAFETY: The only method is `register()`, which requires a (pinned) mutable `Registration`, so it
+// is safe to pass `&Registration` to multiple threads because it offers no interior mutability.
+unsafe impl<T: file::Operations> Sync for Registration<T> {}
+
+// SAFETY: All functions work from any thread. So as long as the `Registration::open_data` is
+// `Send`, so is `Registration<T>`.
+unsafe impl<T: file::Operations> Send for Registration<T> where T::OpenData: Send {}
+
+impl<T: file::Operations> Drop for Registration<T> {
+ /// Removes the registration from the kernel if it has completed successfully before.
+ fn drop(&mut self) {
+ if self.registered {
+ // SAFETY: `registered` being `true` indicates that a previous call to `misc_register`
+ // succeeded.
+ unsafe { bindings::misc_deregister(&mut self.mdev) };
+
+ // SAFETY: The type invariant guarantees that `open_data` is initialised when
+ // `registered` is `true`.
+ unsafe { self.open_data.assume_init_drop() };
+ }
+ }
+}
+
+/// Kernel module that exposes a single miscdev device implemented by `T`.
+pub struct Module<T: file::Operations<OpenData = ()>> {
+ _dev: Pin<Box<Registration<T>>>,
+}
+
+impl<T: file::Operations<OpenData = ()>> crate::Module for Module<T> {
+ fn init(name: &'static CStr, _module: &'static ThisModule) -> Result<Self> {
+ Ok(Self {
+ _dev: Registration::new_pinned(crate::fmt!("{name}"), ())?,
+ })
+ }
+}
+
+/// Declares a kernel module that exposes a single misc device.
+///
+/// The `type` argument should be a type which implements the [`FileOpener`] trait. Also accepts
+/// various forms of kernel metadata.
+///
+/// C header: [`include/linux/moduleparam.h`](../../../include/linux/moduleparam.h)
+///
+/// [`FileOpener`]: ../kernel/file_operations/trait.FileOpener.html
+///
+/// # Examples
+///
+/// ```ignore
+/// use kernel::prelude::*;
+///
+/// module_misc_device! {
+/// type: MyFile,
+/// name: b"my_miscdev_kernel_module",
+/// author: b"Rust for Linux Contributors",
+/// description: b"My very own misc device kernel module!",
+/// license: b"GPL",
+/// }
+///
+/// #[derive(Default)]
+/// struct MyFile;
+///
+/// #[vtable]
+/// impl kernel::file::Operations for MyFile {}
+/// ```
+#[macro_export]
+macro_rules! module_misc_device {
+ (type: $type:ty, $($f:tt)*) => {
+ type ModuleType = kernel::miscdev::Module<$type>;
+ module! {
+ type: ModuleType,
+ $($f)*
+ }
+ }
+}
diff --git a/rust/kernel/mm.rs b/rust/kernel/mm.rs
new file mode 100644
index 000000000000..8a69c69dddd9
--- /dev/null
+++ b/rust/kernel/mm.rs
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Memory management.
+//!
+//! C header: [`include/linux/mm.h`](../../../../include/linux/mm.h)
+
+use crate::{bindings, pages, to_result, Result};
+
+/// Virtual memory.
+pub mod virt {
+ use super::*;
+
+ /// A wrapper for the kernel's `struct vm_area_struct`.
+ ///
+ /// It represents an area of virtual memory.
+ ///
+ /// # Invariants
+ ///
+ /// `vma` is always non-null and valid.
+ pub struct Area {
+ vma: *mut bindings::vm_area_struct,
+ }
+
+ impl Area {
+ /// Creates a new instance of a virtual memory area.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `vma` is non-null and valid for the duration of the new area's
+ /// lifetime.
+ pub(crate) unsafe fn from_ptr(vma: *mut bindings::vm_area_struct) -> Self {
+ // INVARIANTS: The safety requirements guarantee the invariants.
+ Self { vma }
+ }
+
+ /// Returns the flags associated with the virtual memory area.
+ ///
+ /// The possible flags are a combination of the constants in [`flags`].
+ pub fn flags(&self) -> usize {
+ // SAFETY: `self.vma` is valid by the type invariants.
+ unsafe { (*self.vma).vm_flags as _ }
+ }
+
+ /// Sets the flags associated with the virtual memory area.
+ ///
+ /// The possible flags are a combination of the constants in [`flags`].
+ pub fn set_flags(&mut self, flags: usize) {
+ // SAFETY: `self.vma` is valid by the type invariants.
+ unsafe { (*self.vma).vm_flags = flags as _ };
+ }
+
+ /// Returns the start address of the virtual memory area.
+ pub fn start(&self) -> usize {
+ // SAFETY: `self.vma` is valid by the type invariants.
+ unsafe { (*self.vma).vm_start as _ }
+ }
+
+ /// Returns the end address of the virtual memory area.
+ pub fn end(&self) -> usize {
+ // SAFETY: `self.vma` is valid by the type invariants.
+ unsafe { (*self.vma).vm_end as _ }
+ }
+
+ /// Maps a single page at the given address within the virtual memory area.
+ pub fn insert_page(&mut self, address: usize, page: &pages::Pages<0>) -> Result {
+ // SAFETY: The page is guaranteed to be order 0 by the type system. The range of
+ // `address` is already checked by `vm_insert_page`. `self.vma` and `page.pages` are
+ // guaranteed by their repective type invariants to be valid.
+ to_result(unsafe { bindings::vm_insert_page(self.vma, address as _, page.pages) })
+ }
+ }
+
+ /// Container for [`Area`] flags.
+ pub mod flags {
+ use crate::bindings;
+
+ /// No flags are set.
+ pub const NONE: usize = bindings::VM_NONE as _;
+
+ /// Mapping allows reads.
+ pub const READ: usize = bindings::VM_READ as _;
+
+ /// Mapping allows writes.
+ pub const WRITE: usize = bindings::VM_WRITE as _;
+
+ /// Mapping allows execution.
+ pub const EXEC: usize = bindings::VM_EXEC as _;
+
+ /// Mapping is shared.
+ pub const SHARED: usize = bindings::VM_SHARED as _;
+
+ /// Mapping may be updated to allow reads.
+ pub const MAYREAD: usize = bindings::VM_MAYREAD as _;
+
+ /// Mapping may be updated to allow writes.
+ pub const MAYWRITE: usize = bindings::VM_MAYWRITE as _;
+
+ /// Mapping may be updated to allow execution.
+ pub const MAYEXEC: usize = bindings::VM_MAYEXEC as _;
+
+ /// Mapping may be updated to be shared.
+ pub const MAYSHARE: usize = bindings::VM_MAYSHARE as _;
+
+ /// Do not copy this vma on fork.
+ pub const DONTCOPY: usize = bindings::VM_DONTCOPY as _;
+
+ /// Cannot expand with mremap().
+ pub const DONTEXPAND: usize = bindings::VM_DONTEXPAND as _;
+
+ /// Lock the pages covered when they are faulted in.
+ pub const LOCKONFAULT: usize = bindings::VM_LOCKONFAULT as _;
+
+ /// Is a VM accounted object.
+ pub const ACCOUNT: usize = bindings::VM_ACCOUNT as _;
+
+ /// should the VM suppress accounting.
+ pub const NORESERVE: usize = bindings::VM_NORESERVE as _;
+
+ /// Huge TLB Page VM.
+ pub const HUGETLB: usize = bindings::VM_HUGETLB as _;
+
+ /// Synchronous page faults.
+ pub const SYNC: usize = bindings::VM_SYNC as _;
+
+ /// Architecture-specific flag.
+ pub const ARCH_1: usize = bindings::VM_ARCH_1 as _;
+
+ /// Wipe VMA contents in child..
+ pub const WIPEONFORK: usize = bindings::VM_WIPEONFORK as _;
+
+ /// Do not include in the core dump.
+ pub const DONTDUMP: usize = bindings::VM_DONTDUMP as _;
+
+ /// Not soft dirty clean area.
+ pub const SOFTDIRTY: usize = bindings::VM_SOFTDIRTY as _;
+
+ /// Can contain "struct page" and pure PFN pages.
+ pub const MIXEDMAP: usize = bindings::VM_MIXEDMAP as _;
+
+ /// MADV_HUGEPAGE marked this vma.
+ pub const HUGEPAGE: usize = bindings::VM_HUGEPAGE as _;
+
+ /// MADV_NOHUGEPAGE marked this vma.
+ pub const NOHUGEPAGE: usize = bindings::VM_NOHUGEPAGE as _;
+
+ /// KSM may merge identical pages.
+ pub const MERGEABLE: usize = bindings::VM_MERGEABLE as _;
+ }
+}
diff --git a/rust/kernel/module_param.rs b/rust/kernel/module_param.rs
new file mode 100644
index 000000000000..6df38c78c65c
--- /dev/null
+++ b/rust/kernel/module_param.rs
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Types for module parameters.
+//!
+//! C header: [`include/linux/moduleparam.h`](../../../include/linux/moduleparam.h)
+
+use crate::error::{code::*, from_kernel_result};
+use crate::str::{CStr, Formatter};
+use core::fmt::Write;
+
+/// Types that can be used for module parameters.
+///
+/// Note that displaying the type in `sysfs` will fail if
+/// [`alloc::string::ToString::to_string`] (as implemented through the
+/// [`core::fmt::Display`] trait) writes more than [`PAGE_SIZE`]
+/// bytes (including an additional null terminator).
+///
+/// [`PAGE_SIZE`]: `crate::PAGE_SIZE`
+pub trait ModuleParam: core::fmt::Display + core::marker::Sized {
+ /// The `ModuleParam` will be used by the kernel module through this type.
+ ///
+ /// This may differ from `Self` if, for example, `Self` needs to track
+ /// ownership without exposing it or allocate extra space for other possible
+ /// parameter values. See [`StringParam`] or [`ArrayParam`] for examples.
+ type Value: ?Sized;
+
+ /// Whether the parameter is allowed to be set without an argument.
+ ///
+ /// Setting this to `true` allows the parameter to be passed without an
+ /// argument (e.g. just `module.param` instead of `module.param=foo`).
+ const NOARG_ALLOWED: bool;
+
+ /// Convert a parameter argument into the parameter value.
+ ///
+ /// `None` should be returned when parsing of the argument fails.
+ /// `arg == None` indicates that the parameter was passed without an
+ /// argument. If `NOARG_ALLOWED` is set to `false` then `arg` is guaranteed
+ /// to always be `Some(_)`.
+ ///
+ /// Parameters passed at boot time will be set before [`kmalloc`] is
+ /// available (even if the module is loaded at a later time). However, in
+ /// this case, the argument buffer will be valid for the entire lifetime of
+ /// the kernel. So implementations of this method which need to allocate
+ /// should first check that the allocator is available (with
+ /// [`crate::bindings::slab_is_available`]) and when it is not available
+ /// provide an alternative implementation which doesn't allocate. In cases
+ /// where the allocator is not available it is safe to save references to
+ /// `arg` in `Self`, but in other cases a copy should be made.
+ ///
+ /// [`kmalloc`]: ../../../include/linux/slab.h
+ fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self>;
+
+ /// Get the current value of the parameter for use in the kernel module.
+ ///
+ /// This function should not be used directly. Instead use the wrapper
+ /// `read` which will be generated by [`macros::module`].
+ fn value(&self) -> &Self::Value;
+
+ /// Set the module parameter from a string.
+ ///
+ /// Used to set the parameter value when loading the module or when set
+ /// through `sysfs`.
+ ///
+ /// # Safety
+ ///
+ /// If `val` is non-null then it must point to a valid null-terminated
+ /// string. The `arg` field of `param` must be an instance of `Self`.
+ unsafe extern "C" fn set_param(
+ val: *const core::ffi::c_char,
+ param: *const crate::bindings::kernel_param,
+ ) -> core::ffi::c_int {
+ let arg = if val.is_null() {
+ None
+ } else {
+ Some(unsafe { CStr::from_char_ptr(val).as_bytes() })
+ };
+ match Self::try_from_param_arg(arg) {
+ Some(new_value) => {
+ let old_value = unsafe { (*param).__bindgen_anon_1.arg as *mut Self };
+ let _ = unsafe { core::ptr::replace(old_value, new_value) };
+ 0
+ }
+ None => EINVAL.to_kernel_errno(),
+ }
+ }
+
+ /// Write a string representation of the current parameter value to `buf`.
+ ///
+ /// Used for displaying the current parameter value in `sysfs`.
+ ///
+ /// # Safety
+ ///
+ /// `buf` must be a buffer of length at least `kernel::PAGE_SIZE` that is
+ /// writeable. The `arg` field of `param` must be an instance of `Self`.
+ unsafe extern "C" fn get_param(
+ buf: *mut core::ffi::c_char,
+ param: *const crate::bindings::kernel_param,
+ ) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: The C contracts guarantees that the buffer is at least `PAGE_SIZE` bytes.
+ let mut f = unsafe { Formatter::from_buffer(buf.cast(), crate::PAGE_SIZE) };
+ unsafe { write!(f, "{}\0", *((*param).__bindgen_anon_1.arg as *mut Self)) }?;
+ Ok(f.bytes_written().try_into()?)
+ }
+ }
+
+ /// Drop the parameter.
+ ///
+ /// Called when unloading a module.
+ ///
+ /// # Safety
+ ///
+ /// The `arg` field of `param` must be an instance of `Self`.
+ unsafe extern "C" fn free(arg: *mut core::ffi::c_void) {
+ unsafe { core::ptr::drop_in_place(arg as *mut Self) };
+ }
+}
+
+/// Trait for parsing integers.
+///
+/// Strings beginning with `0x`, `0o`, or `0b` are parsed as hex, octal, or
+/// binary respectively. Strings beginning with `0` otherwise are parsed as
+/// octal. Anything else is parsed as decimal. A leading `+` or `-` is also
+/// permitted. Any string parsed by [`kstrtol()`] or [`kstrtoul()`] will be
+/// successfully parsed.
+///
+/// [`kstrtol()`]: https://www.kernel.org/doc/html/latest/core-api/kernel-api.html#c.kstrtol
+/// [`kstrtoul()`]: https://www.kernel.org/doc/html/latest/core-api/kernel-api.html#c.kstrtoul
+trait ParseInt: Sized {
+ fn from_str_radix(src: &str, radix: u32) -> Result<Self, core::num::ParseIntError>;
+ fn checked_neg(self) -> Option<Self>;
+
+ fn from_str_unsigned(src: &str) -> Result<Self, core::num::ParseIntError> {
+ let (radix, digits) = if let Some(n) = src.strip_prefix("0x") {
+ (16, n)
+ } else if let Some(n) = src.strip_prefix("0X") {
+ (16, n)
+ } else if let Some(n) = src.strip_prefix("0o") {
+ (8, n)
+ } else if let Some(n) = src.strip_prefix("0O") {
+ (8, n)
+ } else if let Some(n) = src.strip_prefix("0b") {
+ (2, n)
+ } else if let Some(n) = src.strip_prefix("0B") {
+ (2, n)
+ } else if src.starts_with('0') {
+ (8, src)
+ } else {
+ (10, src)
+ };
+ Self::from_str_radix(digits, radix)
+ }
+
+ fn from_str(src: &str) -> Option<Self> {
+ match src.bytes().next() {
+ None => None,
+ Some(b'-') => Self::from_str_unsigned(&src[1..]).ok()?.checked_neg(),
+ Some(b'+') => Some(Self::from_str_unsigned(&src[1..]).ok()?),
+ Some(_) => Some(Self::from_str_unsigned(src).ok()?),
+ }
+ }
+}
+
+macro_rules! impl_parse_int {
+ ($ty:ident) => {
+ impl ParseInt for $ty {
+ fn from_str_radix(src: &str, radix: u32) -> Result<Self, core::num::ParseIntError> {
+ $ty::from_str_radix(src, radix)
+ }
+
+ fn checked_neg(self) -> Option<Self> {
+ self.checked_neg()
+ }
+ }
+ };
+}
+
+impl_parse_int!(i8);
+impl_parse_int!(u8);
+impl_parse_int!(i16);
+impl_parse_int!(u16);
+impl_parse_int!(i32);
+impl_parse_int!(u32);
+impl_parse_int!(i64);
+impl_parse_int!(u64);
+impl_parse_int!(isize);
+impl_parse_int!(usize);
+
+macro_rules! impl_module_param {
+ ($ty:ident) => {
+ impl ModuleParam for $ty {
+ type Value = $ty;
+
+ const NOARG_ALLOWED: bool = false;
+
+ fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self> {
+ let bytes = arg?;
+ let utf8 = core::str::from_utf8(bytes).ok()?;
+ <$ty as crate::module_param::ParseInt>::from_str(utf8)
+ }
+
+ fn value(&self) -> &Self::Value {
+ self
+ }
+ }
+ };
+}
+
+#[doc(hidden)]
+#[macro_export]
+/// Generate a static [`kernel_param_ops`](../../../include/linux/moduleparam.h) struct.
+///
+/// # Examples
+///
+/// ```ignore
+/// make_param_ops!(
+/// /// Documentation for new param ops.
+/// PARAM_OPS_MYTYPE, // Name for the static.
+/// MyType // A type which implements [`ModuleParam`].
+/// );
+/// ```
+macro_rules! make_param_ops {
+ ($ops:ident, $ty:ty) => {
+ $crate::make_param_ops!(
+ #[doc=""]
+ $ops,
+ $ty
+ );
+ };
+ ($(#[$meta:meta])* $ops:ident, $ty:ty) => {
+ $(#[$meta])*
+ ///
+ /// Static [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// struct generated by [`make_param_ops`].
+ pub static $ops: $crate::bindings::kernel_param_ops = $crate::bindings::kernel_param_ops {
+ flags: if <$ty as $crate::module_param::ModuleParam>::NOARG_ALLOWED {
+ $crate::bindings::KERNEL_PARAM_OPS_FL_NOARG
+ } else {
+ 0
+ },
+ set: Some(<$ty as $crate::module_param::ModuleParam>::set_param),
+ get: Some(<$ty as $crate::module_param::ModuleParam>::get_param),
+ free: Some(<$ty as $crate::module_param::ModuleParam>::free),
+ };
+ };
+}
+
+impl_module_param!(i8);
+impl_module_param!(u8);
+impl_module_param!(i16);
+impl_module_param!(u16);
+impl_module_param!(i32);
+impl_module_param!(u32);
+impl_module_param!(i64);
+impl_module_param!(u64);
+impl_module_param!(isize);
+impl_module_param!(usize);
+
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`i8`].
+ PARAM_OPS_I8,
+ i8
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`u8`].
+ PARAM_OPS_U8,
+ u8
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`i16`].
+ PARAM_OPS_I16,
+ i16
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`u16`].
+ PARAM_OPS_U16,
+ u16
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`i32`].
+ PARAM_OPS_I32,
+ i32
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`u32`].
+ PARAM_OPS_U32,
+ u32
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`i64`].
+ PARAM_OPS_I64,
+ i64
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`u64`].
+ PARAM_OPS_U64,
+ u64
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`isize`].
+ PARAM_OPS_ISIZE,
+ isize
+);
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`usize`].
+ PARAM_OPS_USIZE,
+ usize
+);
+
+impl ModuleParam for bool {
+ type Value = bool;
+
+ const NOARG_ALLOWED: bool = true;
+
+ fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self> {
+ match arg {
+ None => Some(true),
+ Some(b"y") | Some(b"Y") | Some(b"1") | Some(b"true") => Some(true),
+ Some(b"n") | Some(b"N") | Some(b"0") | Some(b"false") => Some(false),
+ _ => None,
+ }
+ }
+
+ fn value(&self) -> &Self::Value {
+ self
+ }
+}
+
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`bool`].
+ PARAM_OPS_BOOL,
+ bool
+);
+
+/// An array of at __most__ `N` values.
+///
+/// # Invariant
+///
+/// The first `self.used` elements of `self.values` are initialized.
+pub struct ArrayParam<T, const N: usize> {
+ values: [core::mem::MaybeUninit<T>; N],
+ used: usize,
+}
+
+impl<T, const N: usize> ArrayParam<T, { N }> {
+ fn values(&self) -> &[T] {
+ // SAFETY: The invariant maintained by `ArrayParam` allows us to cast
+ // the first `self.used` elements to `T`.
+ unsafe {
+ &*(&self.values[0..self.used] as *const [core::mem::MaybeUninit<T>] as *const [T])
+ }
+ }
+}
+
+impl<T: Copy, const N: usize> ArrayParam<T, { N }> {
+ const fn new() -> Self {
+ // INVARIANT: The first `self.used` elements of `self.values` are
+ // initialized.
+ ArrayParam {
+ values: [core::mem::MaybeUninit::uninit(); N],
+ used: 0,
+ }
+ }
+
+ const fn push(&mut self, val: T) {
+ if self.used < N {
+ // INVARIANT: The first `self.used` elements of `self.values` are
+ // initialized.
+ self.values[self.used] = core::mem::MaybeUninit::new(val);
+ self.used += 1;
+ }
+ }
+
+ /// Create an instance of `ArrayParam` initialized with `vals`.
+ ///
+ /// This function is only meant to be used in the [`module::module`] macro.
+ pub const fn create(vals: &[T]) -> Self {
+ let mut result = ArrayParam::new();
+ let mut i = 0;
+ while i < vals.len() {
+ result.push(vals[i]);
+ i += 1;
+ }
+ result
+ }
+}
+
+impl<T: core::fmt::Display, const N: usize> core::fmt::Display for ArrayParam<T, { N }> {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ for val in self.values() {
+ write!(f, "{},", val)?;
+ }
+ Ok(())
+ }
+}
+
+impl<T: Copy + core::fmt::Display + ModuleParam, const N: usize> ModuleParam
+ for ArrayParam<T, { N }>
+{
+ type Value = [T];
+
+ const NOARG_ALLOWED: bool = false;
+
+ fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self> {
+ arg.and_then(|args| {
+ let mut result = Self::new();
+ for arg in args.split(|b| *b == b',') {
+ result.push(T::try_from_param_arg(Some(arg))?);
+ }
+ Some(result)
+ })
+ }
+
+ fn value(&self) -> &Self::Value {
+ self.values()
+ }
+}
+
+/// A C-style string parameter.
+///
+/// The Rust version of the [`charp`] parameter. This type is meant to be
+/// used by the [`macros::module`] macro, not handled directly. Instead use the
+/// `read` method generated by that macro.
+///
+/// [`charp`]: ../../../include/linux/moduleparam.h
+pub enum StringParam {
+ /// A borrowed parameter value.
+ ///
+ /// Either the default value (which is static in the module) or borrowed
+ /// from the original argument buffer used to set the value.
+ Ref(&'static [u8]),
+
+ /// A value that was allocated when the parameter was set.
+ ///
+ /// The value needs to be freed when the parameter is reset or the module is
+ /// unloaded.
+ Owned(alloc::vec::Vec<u8>),
+}
+
+impl StringParam {
+ fn bytes(&self) -> &[u8] {
+ match self {
+ StringParam::Ref(bytes) => *bytes,
+ StringParam::Owned(vec) => &vec[..],
+ }
+ }
+}
+
+impl core::fmt::Display for StringParam {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ let bytes = self.bytes();
+ match core::str::from_utf8(bytes) {
+ Ok(utf8) => write!(f, "{}", utf8),
+ Err(_) => write!(f, "{:?}", bytes),
+ }
+ }
+}
+
+impl ModuleParam for StringParam {
+ type Value = [u8];
+
+ const NOARG_ALLOWED: bool = false;
+
+ fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self> {
+ // SAFETY: It is always safe to call [`slab_is_available`](../../../include/linux/slab.h).
+ let slab_available = unsafe { crate::bindings::slab_is_available() };
+ arg.and_then(|arg| {
+ if slab_available {
+ let mut vec = alloc::vec::Vec::new();
+ vec.try_extend_from_slice(arg).ok()?;
+ Some(StringParam::Owned(vec))
+ } else {
+ Some(StringParam::Ref(arg))
+ }
+ })
+ }
+
+ fn value(&self) -> &Self::Value {
+ self.bytes()
+ }
+}
+
+make_param_ops!(
+ /// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
+ /// for [`StringParam`].
+ PARAM_OPS_STR,
+ StringParam
+);
diff --git a/rust/kernel/net.rs b/rust/kernel/net.rs
new file mode 100644
index 000000000000..0115f3a35cd0
--- /dev/null
+++ b/rust/kernel/net.rs
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Networking core.
+//!
+//! C headers: [`include/net/net_namespace.h`](../../../../include/linux/net/net_namespace.h),
+//! [`include/linux/netdevice.h`](../../../../include/linux/netdevice.h),
+//! [`include/linux/skbuff.h`](../../../../include/linux/skbuff.h).
+
+use crate::{bindings, str::CStr, to_result, ARef, AlwaysRefCounted, Error, Result};
+use core::{cell::UnsafeCell, ptr::NonNull};
+
+#[cfg(CONFIG_NETFILTER)]
+pub mod filter;
+
+/// Wraps the kernel's `struct net_device`.
+#[repr(transparent)]
+pub struct Device(UnsafeCell<bindings::net_device>);
+
+// SAFETY: Instances of `Device` are created on the C side. They are always refcounted.
+unsafe impl AlwaysRefCounted for Device {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference means that the refcount is nonzero.
+ unsafe { bindings::dev_hold(self.0.get()) };
+ }
+
+ unsafe fn dec_ref(obj: core::ptr::NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is nonzero.
+ unsafe { bindings::dev_put(obj.cast().as_ptr()) };
+ }
+}
+
+/// Wraps the kernel's `struct net`.
+#[repr(transparent)]
+pub struct Namespace(UnsafeCell<bindings::net>);
+
+impl Namespace {
+ /// Finds a network device with the given name in the namespace.
+ pub fn dev_get_by_name(&self, name: &CStr) -> Option<ARef<Device>> {
+ // SAFETY: The existence of a shared reference guarantees the refcount is nonzero.
+ let ptr =
+ NonNull::new(unsafe { bindings::dev_get_by_name(self.0.get(), name.as_char_ptr()) })?;
+ Some(unsafe { ARef::from_raw(ptr.cast()) })
+ }
+}
+
+// SAFETY: Instances of `Namespace` are created on the C side. They are always refcounted.
+unsafe impl AlwaysRefCounted for Namespace {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference means that the refcount is nonzero.
+ unsafe { bindings::get_net(self.0.get()) };
+ }
+
+ unsafe fn dec_ref(obj: core::ptr::NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is nonzero.
+ unsafe { bindings::put_net(obj.cast().as_ptr()) };
+ }
+}
+
+/// Returns the network namespace for the `init` process.
+pub fn init_ns() -> &'static Namespace {
+ unsafe { &*core::ptr::addr_of!(bindings::init_net).cast() }
+}
+
+/// Wraps the kernel's `struct sk_buff`.
+#[repr(transparent)]
+pub struct SkBuff(UnsafeCell<bindings::sk_buff>);
+
+impl SkBuff {
+ /// Creates a reference to an [`SkBuff`] from a valid pointer.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `ptr` is valid and remains valid for the lifetime of the
+ /// returned [`SkBuff`] instance.
+ pub unsafe fn from_ptr<'a>(ptr: *const bindings::sk_buff) -> &'a SkBuff {
+ // SAFETY: The safety requirements guarantee the validity of the dereference, while the
+ // `SkBuff` type being transparent makes the cast ok.
+ unsafe { &*ptr.cast() }
+ }
+
+ /// Returns the remaining data in the buffer's first segment.
+ pub fn head_data(&self) -> &[u8] {
+ // SAFETY: The existence of a shared reference means that the refcount is nonzero.
+ let headlen = unsafe { bindings::skb_headlen(self.0.get()) };
+ let len = headlen.try_into().unwrap_or(usize::MAX);
+ // SAFETY: The existence of a shared reference means `self.0` is valid.
+ let data = unsafe { core::ptr::addr_of!((*self.0.get()).data).read() };
+ // SAFETY: The `struct sk_buff` conventions guarantee that at least `skb_headlen(skb)` bytes
+ // are valid from `skb->data`.
+ unsafe { core::slice::from_raw_parts(data, len) }
+ }
+
+ /// Returns the total length of the data (in all segments) in the skb.
+ #[allow(clippy::len_without_is_empty)]
+ pub fn len(&self) -> u32 {
+ // SAFETY: The existence of a shared reference means `self.0` is valid.
+ unsafe { core::ptr::addr_of!((*self.0.get()).len).read() }
+ }
+}
+
+// SAFETY: Instances of `SkBuff` are created on the C side. They are always refcounted.
+unsafe impl AlwaysRefCounted for SkBuff {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference means that the refcount is nonzero.
+ unsafe { bindings::skb_get(self.0.get()) };
+ }
+
+ unsafe fn dec_ref(obj: core::ptr::NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is nonzero.
+ unsafe {
+ bindings::kfree_skb_reason(
+ obj.cast().as_ptr(),
+ bindings::skb_drop_reason_SKB_DROP_REASON_NOT_SPECIFIED,
+ )
+ };
+ }
+}
+
+/// An IPv4 address.
+///
+/// This is equivalent to C's `in_addr`.
+#[repr(transparent)]
+pub struct Ipv4Addr(bindings::in_addr);
+
+impl Ipv4Addr {
+ /// A wildcard IPv4 address.
+ ///
+ /// Binding to this address means binding to all IPv4 addresses.
+ pub const ANY: Self = Self::new(0, 0, 0, 0);
+
+ /// The IPv4 loopback address.
+ pub const LOOPBACK: Self = Self::new(127, 0, 0, 1);
+
+ /// The IPv4 broadcast address.
+ pub const BROADCAST: Self = Self::new(255, 255, 255, 255);
+
+ /// Creates a new IPv4 address with the given components.
+ pub const fn new(a: u8, b: u8, c: u8, d: u8) -> Self {
+ Self(bindings::in_addr {
+ s_addr: u32::from_be_bytes([a, b, c, d]).to_be(),
+ })
+ }
+}
+
+/// An IPv6 address.
+///
+/// This is equivalent to C's `in6_addr`.
+#[repr(transparent)]
+pub struct Ipv6Addr(bindings::in6_addr);
+
+impl Ipv6Addr {
+ /// A wildcard IPv6 address.
+ ///
+ /// Binding to this address means binding to all IPv6 addresses.
+ pub const ANY: Self = Self::new(0, 0, 0, 0, 0, 0, 0, 0);
+
+ /// The IPv6 loopback address.
+ pub const LOOPBACK: Self = Self::new(0, 0, 0, 0, 0, 0, 0, 1);
+
+ /// Creates a new IPv6 address with the given components.
+ #[allow(clippy::too_many_arguments)]
+ pub const fn new(a: u16, b: u16, c: u16, d: u16, e: u16, f: u16, g: u16, h: u16) -> Self {
+ Self(bindings::in6_addr {
+ in6_u: bindings::in6_addr__bindgen_ty_1 {
+ u6_addr16: [
+ a.to_be(),
+ b.to_be(),
+ c.to_be(),
+ d.to_be(),
+ e.to_be(),
+ f.to_be(),
+ g.to_be(),
+ h.to_be(),
+ ],
+ },
+ })
+ }
+}
+
+/// A socket address.
+///
+/// It's an enum with either an IPv4 or IPv6 socket address.
+pub enum SocketAddr {
+ /// An IPv4 socket address.
+ V4(SocketAddrV4),
+
+ /// An IPv6 socket address.
+ V6(SocketAddrV6),
+}
+
+/// An IPv4 socket address.
+///
+/// This is equivalent to C's `sockaddr_in`.
+#[repr(transparent)]
+pub struct SocketAddrV4(bindings::sockaddr_in);
+
+impl SocketAddrV4 {
+ /// Creates a new IPv4 socket address.
+ pub const fn new(addr: Ipv4Addr, port: u16) -> Self {
+ Self(bindings::sockaddr_in {
+ sin_family: bindings::AF_INET as _,
+ sin_port: port.to_be(),
+ sin_addr: addr.0,
+ __pad: [0; 8],
+ })
+ }
+}
+
+/// An IPv6 socket address.
+///
+/// This is equivalent to C's `sockaddr_in6`.
+#[repr(transparent)]
+pub struct SocketAddrV6(bindings::sockaddr_in6);
+
+impl SocketAddrV6 {
+ /// Creates a new IPv6 socket address.
+ pub const fn new(addr: Ipv6Addr, port: u16, flowinfo: u32, scopeid: u32) -> Self {
+ Self(bindings::sockaddr_in6 {
+ sin6_family: bindings::AF_INET6 as _,
+ sin6_port: port.to_be(),
+ sin6_addr: addr.0,
+ sin6_flowinfo: flowinfo,
+ sin6_scope_id: scopeid,
+ })
+ }
+}
+
+/// A socket listening on a TCP port.
+///
+/// # Invariants
+///
+/// The socket pointer is always non-null and valid.
+pub struct TcpListener {
+ pub(crate) sock: *mut bindings::socket,
+}
+
+// SAFETY: `TcpListener` is just a wrapper for a kernel socket, which can be used from any thread.
+unsafe impl Send for TcpListener {}
+
+// SAFETY: `TcpListener` is just a wrapper for a kernel socket, which can be used from any thread.
+unsafe impl Sync for TcpListener {}
+
+impl TcpListener {
+ /// Creates a new TCP listener.
+ ///
+ /// It is configured to listen on the given socket address for the given namespace.
+ pub fn try_new(ns: &Namespace, addr: &SocketAddr) -> Result<Self> {
+ let mut socket = core::ptr::null_mut();
+ let (pf, addr, addrlen) = match addr {
+ SocketAddr::V4(addr) => (
+ bindings::PF_INET,
+ addr as *const _ as _,
+ core::mem::size_of::<bindings::sockaddr_in>(),
+ ),
+ SocketAddr::V6(addr) => (
+ bindings::PF_INET6,
+ addr as *const _ as _,
+ core::mem::size_of::<bindings::sockaddr_in6>(),
+ ),
+ };
+
+ // SAFETY: The namespace is valid and the output socket pointer is valid for write.
+ to_result(unsafe {
+ bindings::sock_create_kern(
+ ns.0.get(),
+ pf as _,
+ bindings::sock_type_SOCK_STREAM as _,
+ bindings::IPPROTO_TCP as _,
+ &mut socket,
+ )
+ })?;
+
+ // INVARIANT: The socket was just created, so it is valid.
+ let listener = Self { sock: socket };
+
+ // SAFETY: The type invariant guarantees that the socket is valid, and `addr` and `addrlen`
+ // were initialised based on valid values provided in the address enum.
+ to_result(unsafe { bindings::kernel_bind(socket, addr, addrlen as _) })?;
+
+ // SAFETY: The socket is valid per the type invariant.
+ to_result(unsafe { bindings::kernel_listen(socket, bindings::SOMAXCONN as _) })?;
+
+ Ok(listener)
+ }
+
+ /// Accepts a new connection.
+ ///
+ /// On success, returns the newly-accepted socket stream.
+ ///
+ /// If no connection is available to be accepted, one of two behaviours will occur:
+ /// - If `block` is `false`, returns [`crate::error::code::EAGAIN`];
+ /// - If `block` is `true`, blocks until an error occurs or some connection can be accepted.
+ pub fn accept(&self, block: bool) -> Result<TcpStream> {
+ let mut new = core::ptr::null_mut();
+ let flags = if block { 0 } else { bindings::O_NONBLOCK };
+ // SAFETY: The type invariant guarantees that the socket is valid, and the output argument
+ // is also valid for write.
+ to_result(unsafe { bindings::kernel_accept(self.sock, &mut new, flags as _) })?;
+ Ok(TcpStream { sock: new })
+ }
+}
+
+impl Drop for TcpListener {
+ fn drop(&mut self) {
+ // SAFETY: The type invariant guarantees that the socket is valid.
+ unsafe { bindings::sock_release(self.sock) };
+ }
+}
+
+/// A connected TCP socket.
+///
+/// # Invariants
+///
+/// The socket pointer is always non-null and valid.
+pub struct TcpStream {
+ pub(crate) sock: *mut bindings::socket,
+}
+
+// SAFETY: `TcpStream` is just a wrapper for a kernel socket, which can be used from any thread.
+unsafe impl Send for TcpStream {}
+
+// SAFETY: `TcpStream` is just a wrapper for a kernel socket, which can be used from any thread.
+unsafe impl Sync for TcpStream {}
+
+impl TcpStream {
+ /// Reads data from a connected socket.
+ ///
+ /// On success, returns the number of bytes read, which will be zero if the connection is
+ /// closed.
+ ///
+ /// If no data is immediately available for reading, one of two behaviours will occur:
+ /// - If `block` is `false`, returns [`crate::error::code::EAGAIN`];
+ /// - If `block` is `true`, blocks until an error occurs, the connection is closed, or some
+ /// becomes readable.
+ pub fn read(&self, buf: &mut [u8], block: bool) -> Result<usize> {
+ let mut msg = bindings::msghdr::default();
+ let mut vec = bindings::kvec {
+ iov_base: buf.as_mut_ptr().cast(),
+ iov_len: buf.len(),
+ };
+ // SAFETY: The type invariant guarantees that the socket is valid, and `vec` was
+ // initialised with the output buffer.
+ let r = unsafe {
+ bindings::kernel_recvmsg(
+ self.sock,
+ &mut msg,
+ &mut vec,
+ 1,
+ vec.iov_len,
+ if block { 0 } else { bindings::MSG_DONTWAIT } as _,
+ )
+ };
+ if r < 0 {
+ Err(Error::from_kernel_errno(r))
+ } else {
+ Ok(r as _)
+ }
+ }
+
+ /// Writes data to the connected socket.
+ ///
+ /// On success, returns the number of bytes written.
+ ///
+ /// If the send buffer of the socket is full, one of two behaviours will occur:
+ /// - If `block` is `false`, returns [`crate::error::code::EAGAIN`];
+ /// - If `block` is `true`, blocks until an error occurs or some data is written.
+ pub fn write(&self, buf: &[u8], block: bool) -> Result<usize> {
+ let mut msg = bindings::msghdr {
+ msg_flags: if block { 0 } else { bindings::MSG_DONTWAIT },
+ ..bindings::msghdr::default()
+ };
+ let mut vec = bindings::kvec {
+ iov_base: buf.as_ptr() as *mut u8 as _,
+ iov_len: buf.len(),
+ };
+ // SAFETY: The type invariant guarantees that the socket is valid, and `vec` was
+ // initialised with the input buffer.
+ let r = unsafe { bindings::kernel_sendmsg(self.sock, &mut msg, &mut vec, 1, vec.iov_len) };
+ if r < 0 {
+ Err(Error::from_kernel_errno(r))
+ } else {
+ Ok(r as _)
+ }
+ }
+}
+
+impl Drop for TcpStream {
+ fn drop(&mut self) {
+ // SAFETY: The type invariant guarantees that the socket is valid.
+ unsafe { bindings::sock_release(self.sock) };
+ }
+}
diff --git a/rust/kernel/net/filter.rs b/rust/kernel/net/filter.rs
new file mode 100644
index 000000000000..a50422d53848
--- /dev/null
+++ b/rust/kernel/net/filter.rs
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Networking filters.
+//!
+//! C header: [`include/linux/netfilter.h`](../../../../../include/linux/netfilter.h)
+
+use crate::{
+ bindings,
+ error::{code::*, to_result},
+ net,
+ types::PointerWrapper,
+ ARef, AlwaysRefCounted, Result, ScopeGuard,
+};
+use alloc::boxed::Box;
+use core::{
+ marker::{PhantomData, PhantomPinned},
+ pin::Pin,
+};
+
+/// A network filter.
+pub trait Filter {
+ /// The type of the context data stored on registration and made available to the
+ /// [`Filter::filter`] function.
+ type Data: PointerWrapper + Sync = ();
+
+ /// Filters the packet stored in the given buffer.
+ ///
+ /// It dictates to the netfilter core what the fate of the packet should be.
+ fn filter(
+ _data: <Self::Data as PointerWrapper>::Borrowed<'_>,
+ _skb: &net::SkBuff,
+ ) -> Disposition;
+}
+
+/// Specifies the action to be taken by the netfilter core.
+pub enum Disposition {
+ /// Drop the packet.
+ Drop,
+
+ /// Accept the packet.
+ Accept,
+
+ /// The packet was stolen by the filter and must be treated as if it didn't exist.
+ Stolen,
+
+ /// Queue the packet to the given user-space queue.
+ Queue {
+ /// The identifier of the queue to which the packet should be added.
+ queue_id: u16,
+
+ /// Specifies the behaviour if a queue with the given identifier doesn't exist: if `true`,
+ /// the packet is accepted, otherwise it is rejected.
+ accept_if_queue_non_existent: bool,
+ },
+}
+
+/// The filter hook families.
+pub enum Family {
+ /// IPv4 and IPv6 packets.
+ INet(inet::Hook),
+
+ /// IPv4 packets.
+ Ipv4(ipv4::Hook, ipv4::PriorityBase),
+
+ /// All packets through a device.
+ ///
+ /// When this family is used, a device _must_ be specified.
+ NetDev(netdev::Hook),
+
+ /// IPv6 packets.
+ Ipv6(ipv6::Hook, ipv6::PriorityBase),
+
+ /// Address resolution protocol (ARP) packets.
+ Arp(arp::Hook),
+}
+
+/// A registration of a networking filter.
+///
+/// # Examples
+///
+/// The following is an example of a function that attaches an inbound filter (that always accepts
+/// all packets after printing their lengths) on the specified device (in the `init` ns).
+///
+/// ```
+/// use kernel::net::{self, filter as netfilter};
+///
+/// struct MyFilter;
+/// impl netfilter::Filter for MyFilter {
+/// fn filter(_data: (), skb: &net::SkBuff) -> netfilter::Disposition {
+/// pr_info!("Packet of length {}\n", skb.len());
+/// netfilter::Disposition::Accept
+/// }
+/// }
+///
+/// fn register(name: &CStr) -> Result<Pin<Box<netfilter::Registration<MyFilter>>>> {
+/// let ns = net::init_ns();
+/// let dev = ns.dev_get_by_name(name).ok_or(ENOENT)?;
+/// netfilter::Registration::new_pinned(
+/// netfilter::Family::NetDev(netfilter::netdev::Hook::Ingress),
+/// 0,
+/// ns.into(),
+/// Some(dev),
+/// (),
+/// )
+/// }
+/// ```
+#[derive(Default)]
+pub struct Registration<T: Filter> {
+ hook: bindings::nf_hook_ops,
+ // When `ns` is `Some(_)`, the hook is registered.
+ ns: Option<ARef<net::Namespace>>,
+ dev: Option<ARef<net::Device>>,
+ _p: PhantomData<T>,
+ _pinned: PhantomPinned,
+}
+
+// SAFETY: `Registration` does not expose any of its state across threads.
+unsafe impl<T: Filter> Sync for Registration<T> {}
+
+impl<T: Filter> Registration<T> {
+ /// Creates a new [`Registration`] but does not register it yet.
+ ///
+ /// It is allowed to move.
+ pub fn new() -> Self {
+ Self {
+ hook: bindings::nf_hook_ops::default(),
+ dev: None,
+ ns: None,
+ _p: PhantomData,
+ _pinned: PhantomPinned,
+ }
+ }
+
+ /// Creates a new filter registration and registers it.
+ ///
+ /// Returns a pinned heap-allocated representation of the registration.
+ pub fn new_pinned(
+ family: Family,
+ priority: i32,
+ ns: ARef<net::Namespace>,
+ dev: Option<ARef<net::Device>>,
+ data: T::Data,
+ ) -> Result<Pin<Box<Self>>> {
+ let mut filter = Pin::from(Box::try_new(Self::new())?);
+ filter.as_mut().register(family, priority, ns, dev, data)?;
+ Ok(filter)
+ }
+
+ /// Registers a network filter.
+ ///
+ /// It must be pinned because the C portion of the kernel stores a pointer to it while it is
+ /// registered.
+ ///
+ /// The priority is relative to the family's base priority. For example, if the base priority
+ /// is `100` and `priority` is `-1`, the actual priority will be `99`. If a family doesn't
+ /// explicitly allow a base to be specified, `0` is assumed.
+ pub fn register(
+ self: Pin<&mut Self>,
+ family: Family,
+ priority: i32,
+ ns: ARef<net::Namespace>,
+ dev: Option<ARef<net::Device>>,
+ data: T::Data,
+ ) -> Result {
+ // SAFETY: We must ensure that we never move out of `this`.
+ let this = unsafe { self.get_unchecked_mut() };
+ if this.ns.is_some() {
+ // Already registered.
+ return Err(EINVAL);
+ }
+
+ let data_pointer = data.into_pointer();
+
+ // SAFETY: `data_pointer` comes from the call to `data.into_pointer()` above.
+ let guard = ScopeGuard::new(|| unsafe {
+ T::Data::from_pointer(data_pointer);
+ });
+
+ let mut pri_base = 0i32;
+ match family {
+ Family::INet(hook) => {
+ this.hook.pf = bindings::NFPROTO_INET as _;
+ this.hook.hooknum = hook as _;
+ }
+ Family::Ipv4(hook, pbase) => {
+ this.hook.pf = bindings::NFPROTO_IPV4 as _;
+ this.hook.hooknum = hook as _;
+ pri_base = pbase as _;
+ }
+ Family::Ipv6(hook, pbase) => {
+ this.hook.pf = bindings::NFPROTO_IPV6 as _;
+ this.hook.hooknum = hook as _;
+ pri_base = pbase as _;
+ }
+ Family::NetDev(hook) => {
+ this.hook.pf = bindings::NFPROTO_NETDEV as _;
+ this.hook.hooknum = hook as _;
+ }
+ Family::Arp(hook) => {
+ this.hook.pf = bindings::NFPROTO_ARP as _;
+ this.hook.hooknum = hook as _;
+ }
+ }
+
+ this.hook.priority = pri_base.saturating_add(priority);
+ this.hook.priv_ = data_pointer as _;
+ this.hook.hook = Some(Self::hook_callback);
+ crate::static_assert!(bindings::nf_hook_ops_type_NF_HOOK_OP_UNDEFINED == 0);
+
+ if let Some(ref device) = dev {
+ this.hook.dev = device.0.get();
+ }
+
+ // SAFETY: `ns` has a valid reference to the namespace, and `this.hook` was just
+ // initialised above, so they're both valid.
+ to_result(unsafe { bindings::nf_register_net_hook(ns.0.get(), &this.hook) })?;
+
+ this.dev = dev;
+ this.ns = Some(ns);
+ guard.dismiss();
+ Ok(())
+ }
+
+ unsafe extern "C" fn hook_callback(
+ priv_: *mut core::ffi::c_void,
+ skb: *mut bindings::sk_buff,
+ _state: *const bindings::nf_hook_state,
+ ) -> core::ffi::c_uint {
+ // SAFETY: `priv_` was initialised on registration by a value returned from
+ // `T::Data::into_pointer`, and it remains valid until the hook is unregistered.
+ let data = unsafe { T::Data::borrow(priv_) };
+
+ // SAFETY: The C contract guarantees that `skb` remains valid for the duration of this
+ // function call.
+ match T::filter(data, unsafe { net::SkBuff::from_ptr(skb) }) {
+ Disposition::Drop => bindings::NF_DROP,
+ Disposition::Accept => bindings::NF_ACCEPT,
+ Disposition::Stolen => {
+ // SAFETY: This function takes over ownership of `skb` when it returns `NF_STOLEN`,
+ // so we decrement the refcount here to avoid a leak.
+ unsafe { net::SkBuff::dec_ref(core::ptr::NonNull::new(skb).unwrap().cast()) };
+ bindings::NF_STOLEN
+ }
+ Disposition::Queue {
+ queue_id,
+ accept_if_queue_non_existent,
+ } => {
+ // SAFETY: Just an FFI call, no additional safety requirements.
+ let verdict = unsafe { bindings::NF_QUEUE_NR(queue_id as _) };
+ if accept_if_queue_non_existent {
+ verdict | bindings::NF_VERDICT_FLAG_QUEUE_BYPASS
+ } else {
+ verdict
+ }
+ }
+ }
+ }
+}
+
+impl<T: Filter> Drop for Registration<T> {
+ fn drop(&mut self) {
+ if let Some(ref ns) = self.ns {
+ // SAFETY: `self.ns` is `Some(_)` only when a previous call to `nf_register_net_hook`
+ // succeeded. And the arguments are the same.
+ unsafe { bindings::nf_unregister_net_hook(ns.0.get(), &self.hook) };
+
+ // `self.hook.priv_` was initialised during registration to a value returned from
+ // `T::Data::into_pointer`, so it is ok to convert back here.
+ unsafe { T::Data::from_pointer(self.hook.priv_) };
+ }
+ }
+}
+
+/// Definitions used when defining hooks for the [`Family::NetDev`] family.
+pub mod netdev {
+ use crate::bindings;
+
+ /// Hooks allowed in the [`super::Family::NetDev`] family.
+ #[repr(u32)]
+ pub enum Hook {
+ /// All inbound packets through the given device.
+ Ingress = bindings::nf_dev_hooks_NF_NETDEV_INGRESS,
+
+ /// All outbound packets through the given device.
+ Egress = bindings::nf_dev_hooks_NF_NETDEV_EGRESS,
+ }
+}
+
+/// Definitions used when defining hooks for the [`Family::Ipv4`] family.
+pub mod ipv4 {
+ use crate::bindings;
+
+ /// Hooks allowed in [`super::Family::Ipv4`] family.
+ pub type Hook = super::inet::Hook;
+
+ /// The base priority for [`super::Family::Ipv4`] hooks.
+ ///
+ /// The actual priority is the base priority plus the priority specified when registering.
+ #[repr(i32)]
+ pub enum PriorityBase {
+ /// Same as the `NF_IP_PRI_FIRST` C constant.
+ First = bindings::nf_ip_hook_priorities_NF_IP_PRI_FIRST,
+
+ /// Same as the `NF_IP_PRI_RAW_BEFORE_DEFRAG` C constant.
+ RawBeforeDefrag = bindings::nf_ip_hook_priorities_NF_IP_PRI_RAW_BEFORE_DEFRAG,
+
+ /// Same as the `NF_IP_PRI_CONNTRACK_DEFRAG` C constant.
+ ConnTrackDefrag = bindings::nf_ip_hook_priorities_NF_IP_PRI_CONNTRACK_DEFRAG,
+
+ /// Same as the `NF_IP_PRI_RAW` C constant.
+ Raw = bindings::nf_ip_hook_priorities_NF_IP_PRI_RAW,
+
+ /// Same as the `NF_IP_PRI_SELINUX_FIRST` C constant.
+ SeLinuxFirst = bindings::nf_ip_hook_priorities_NF_IP_PRI_SELINUX_FIRST,
+
+ /// Same as the `NF_IP_PRI_CONNTRACK` C constant.
+ ConnTrack = bindings::nf_ip_hook_priorities_NF_IP_PRI_CONNTRACK,
+
+ /// Same as the `NF_IP_PRI_MANGLE` C constant.
+ Mangle = bindings::nf_ip_hook_priorities_NF_IP_PRI_MANGLE,
+
+ /// Same as the `NF_IP_PRI_NAT_DST` C constant.
+ NatDst = bindings::nf_ip_hook_priorities_NF_IP_PRI_NAT_DST,
+
+ /// Same as the `NF_IP_PRI_FILTER` C constant.
+ Filter = bindings::nf_ip_hook_priorities_NF_IP_PRI_FILTER,
+
+ /// Same as the `NF_IP_PRI_SECURITY` C constant.
+ Security = bindings::nf_ip_hook_priorities_NF_IP_PRI_SECURITY,
+
+ /// Same as the `NF_IP_PRI_NAT_SRC` C constant.
+ NatSrc = bindings::nf_ip_hook_priorities_NF_IP_PRI_NAT_SRC,
+
+ /// Same as the `NF_IP_PRI_SELINUX_LAST` C constant.
+ SeLinuxLast = bindings::nf_ip_hook_priorities_NF_IP_PRI_SELINUX_LAST,
+
+ /// Same as the `NF_IP_PRI_CONNTRACK_HELPER` C constant.
+ ConnTrackHelper = bindings::nf_ip_hook_priorities_NF_IP_PRI_CONNTRACK_HELPER,
+
+ /// Same as the `NF_IP_PRI_LAST` and `NF_IP_PRI_CONNTRACK_CONFIRM` C constants.
+ Last = bindings::nf_ip_hook_priorities_NF_IP_PRI_LAST,
+ }
+}
+
+/// Definitions used when defining hooks for the [`Family::Ipv6`] family.
+pub mod ipv6 {
+ use crate::bindings;
+
+ /// Hooks allowed in [`super::Family::Ipv6`] family.
+ pub type Hook = super::inet::Hook;
+
+ /// The base priority for [`super::Family::Ipv6`] hooks.
+ ///
+ /// The actual priority is the base priority plus the priority specified when registering.
+ #[repr(i32)]
+ pub enum PriorityBase {
+ /// Same as the `NF_IP6_PRI_FIRST` C constant.
+ First = bindings::nf_ip6_hook_priorities_NF_IP6_PRI_FIRST,
+
+ /// Same as the `NF_IP6_PRI_RAW_BEFORE_DEFRAG` C constant.
+ RawBeforeDefrag = bindings::nf_ip6_hook_priorities_NF_IP6_PRI_RAW_BEFORE_DEFRAG,
+
+ /// Same as the `NF_IP6_PRI_CONNTRACK_DEFRAG` C constant.
+ ConnTrackDefrag = bindings::nf_ip6_hook_priorities_NF_IP6_PRI_CONNTRACK_DEFRAG,
+
+ /// Same as the `NF_IP6_PRI_RAW` C constant.
+ Raw = bindings::nf_ip6_hook_priorities_NF_IP6_PRI_RAW,
+
+ /// Same as the `NF_IP6_PRI_SELINUX_FIRST` C constant.
+ SeLinuxFirst = bindings::nf_ip6_hook_priorities_NF_IP6_PRI_SELINUX_FIRST,
+
+ /// Same as the `NF_IP6_PRI_CONNTRACK` C constant.
+ ConnTrack = bindings::nf_ip6_hook_priorities_NF_IP6_PRI_CONNTRACK,
+
+ /// Same as the `NF_IP6_PRI_MANGLE` C constant.
+ Mangle = bindings::nf_ip6_hook_priorities_NF_IP6_PRI_MANGLE,
+
+ /// Same as the `NF_IP6_PRI_NAT_DST` C constant.
+ NatDst = bindings::nf_ip6_hook_priorities_NF_IP6_PRI_NAT_DST,
+
+ /// Same as the `NF_IP6_PRI_FILTER` C constant.
+ Filter = bindings::nf_ip6_hook_priorities_NF_IP6_PRI_FILTER,
+
+ /// Same as the `NF_IP6_PRI_SECURITY` C constant.
+ Security = bindings::nf_ip6_hook_priorities_NF_IP6_PRI_SECURITY,
+
+ /// Same as the `NF_IP6_PRI_NAT_SRC` C constant.
+ NatSrc = bindings::nf_ip6_hook_priorities_NF_IP6_PRI_NAT_SRC,
+
+ /// Same as the `NF_IP6_PRI_SELINUX_LAST` C constant.
+ SeLinuxLast = bindings::nf_ip6_hook_priorities_NF_IP6_PRI_SELINUX_LAST,
+
+ /// Same as the `NF_IP6_PRI_CONNTRACK_HELPER` C constant.
+ ConnTrackHelper = bindings::nf_ip6_hook_priorities_NF_IP6_PRI_CONNTRACK_HELPER,
+
+ /// Same as the `NF_IP6_PRI_LAST` C constant.
+ Last = bindings::nf_ip6_hook_priorities_NF_IP6_PRI_LAST,
+ }
+}
+
+/// Definitions used when defining hooks for the [`Family::Arp`] family.
+pub mod arp {
+ use crate::bindings;
+
+ /// Hooks allowed in the [`super::Family::Arp`] family.
+ #[repr(u32)]
+ pub enum Hook {
+ /// Inbound ARP packets.
+ In = bindings::NF_ARP_IN,
+
+ /// Outbound ARP packets.
+ Out = bindings::NF_ARP_OUT,
+
+ /// Forwarded ARP packets.
+ Forward = bindings::NF_ARP_FORWARD,
+ }
+}
+
+/// Definitions used when defining hooks for the [`Family::INet`] family.
+pub mod inet {
+ use crate::bindings;
+
+ /// Hooks allowed in the [`super::Family::INet`], [`super::Family::Ipv4`], and
+ /// [`super::Family::Ipv6`] families.
+ #[repr(u32)]
+ pub enum Hook {
+ /// Inbound packets before routing decisions are made (i.e., before it's determined if the
+ /// packet is to be delivered locally or forwarded to another host).
+ PreRouting = bindings::nf_inet_hooks_NF_INET_PRE_ROUTING as _,
+
+ /// Inbound packets that are meant to be delivered locally.
+ LocalIn = bindings::nf_inet_hooks_NF_INET_LOCAL_IN as _,
+
+ /// Inbound packets that are meant to be forwarded to another host.
+ Forward = bindings::nf_inet_hooks_NF_INET_FORWARD as _,
+
+ /// Outbound packet created by the local networking stack.
+ LocalOut = bindings::nf_inet_hooks_NF_INET_LOCAL_OUT as _,
+
+ /// All outbound packets (i.e., generated locally or being forwarded to another host).
+ PostRouting = bindings::nf_inet_hooks_NF_INET_POST_ROUTING as _,
+
+ /// Equivalent to [`super::netdev::Hook::Ingress`], so a device must be specified. Packets
+ /// of all types (not just ipv4/ipv6) will be delivered to the filter.
+ Ingress = bindings::nf_inet_hooks_NF_INET_INGRESS as _,
+ }
+}
diff --git a/rust/kernel/of.rs b/rust/kernel/of.rs
new file mode 100644
index 000000000000..cdcd83244337
--- /dev/null
+++ b/rust/kernel/of.rs
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Devicetree and Open Firmware abstractions.
+//!
+//! C header: [`include/linux/of_*.h`](../../../../include/linux/of_*.h)
+
+use crate::{bindings, driver, str::BStr};
+
+/// An open firmware device id.
+#[derive(Clone, Copy)]
+pub enum DeviceId {
+ /// An open firmware device id where only a compatible string is specified.
+ Compatible(&'static BStr),
+}
+
+/// Defines a const open firmware device id table that also carries per-entry data/context/info.
+///
+/// The name of the const is `OF_DEVICE_ID_TABLE`, which is what buses are expected to name their
+/// open firmware tables.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::define_of_id_table;
+/// use kernel::of;
+///
+/// define_of_id_table! {u32, [
+/// (of::DeviceId::Compatible(b"test-device1,test-device2"), Some(0xff)),
+/// (of::DeviceId::Compatible(b"test-device3"), None),
+/// ]};
+/// ```
+#[macro_export]
+macro_rules! define_of_id_table {
+ ($data_type:ty, $($t:tt)*) => {
+ $crate::define_id_table!(OF_DEVICE_ID_TABLE, $crate::of::DeviceId, $data_type, $($t)*);
+ };
+}
+
+// SAFETY: `ZERO` is all zeroed-out and `to_rawid` stores `offset` in `of_device_id::data`.
+unsafe impl const driver::RawDeviceId for DeviceId {
+ type RawType = bindings::of_device_id;
+ const ZERO: Self::RawType = bindings::of_device_id {
+ name: [0; 32],
+ type_: [0; 32],
+ compatible: [0; 128],
+ data: core::ptr::null(),
+ };
+
+ fn to_rawid(&self, offset: isize) -> Self::RawType {
+ let DeviceId::Compatible(compatible) = self;
+ let mut id = Self::ZERO;
+ let mut i = 0;
+ while i < compatible.len() {
+ // If `compatible` does not fit in `id.compatible`, an "index out of bounds" build time
+ // error will be triggered.
+ id.compatible[i] = compatible[i] as _;
+ i += 1;
+ }
+ id.compatible[i] = b'\0' as _;
+ id.data = offset as _;
+ id
+ }
+}
diff --git a/rust/kernel/pages.rs b/rust/kernel/pages.rs
new file mode 100644
index 000000000000..f2bb26810cd7
--- /dev/null
+++ b/rust/kernel/pages.rs
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Kernel page allocation and management.
+//!
+//! TODO: This module is a work in progress.
+
+use crate::{
+ bindings, error::code::*, io_buffer::IoBufferReader, user_ptr::UserSlicePtrReader, Result,
+ PAGE_SIZE,
+};
+use core::{marker::PhantomData, ptr};
+
+/// A set of physical pages.
+///
+/// `Pages` holds a reference to a set of pages of order `ORDER`. Having the order as a generic
+/// const allows the struct to have the same size as a pointer.
+///
+/// # Invariants
+///
+/// The pointer `Pages::pages` is valid and points to 2^ORDER pages.
+pub struct Pages<const ORDER: u32> {
+ pub(crate) pages: *mut bindings::page,
+}
+
+impl<const ORDER: u32> Pages<ORDER> {
+ /// Allocates a new set of contiguous pages.
+ pub fn new() -> Result<Self> {
+ // TODO: Consider whether we want to allow callers to specify flags.
+ // SAFETY: This only allocates pages. We check that it succeeds in the next statement.
+ let pages = unsafe {
+ bindings::alloc_pages(
+ bindings::GFP_KERNEL | bindings::__GFP_ZERO | bindings::__GFP_HIGHMEM,
+ ORDER,
+ )
+ };
+ if pages.is_null() {
+ return Err(ENOMEM);
+ }
+ // INVARIANTS: We checked that the allocation above succeeded.
+ Ok(Self { pages })
+ }
+
+ /// Copies data from the given [`UserSlicePtrReader`] into the pages.
+ pub fn copy_into_page(
+ &self,
+ reader: &mut UserSlicePtrReader,
+ offset: usize,
+ len: usize,
+ ) -> Result {
+ // TODO: For now this only works on the first page.
+ let end = offset.checked_add(len).ok_or(EINVAL)?;
+ if end > PAGE_SIZE {
+ return Err(EINVAL);
+ }
+
+ let mapping = self.kmap(0).ok_or(EINVAL)?;
+
+ // SAFETY: We ensured that the buffer was valid with the check above.
+ unsafe { reader.read_raw((mapping.ptr as usize + offset) as _, len) }?;
+ Ok(())
+ }
+
+ /// Maps the pages and reads from them into the given buffer.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that the destination buffer is valid for the given length.
+ /// Additionally, if the raw buffer is intended to be recast, they must ensure that the data
+ /// can be safely cast; [`crate::io_buffer::ReadableFromBytes`] has more details about it.
+ pub unsafe fn read(&self, dest: *mut u8, offset: usize, len: usize) -> Result {
+ // TODO: For now this only works on the first page.
+ let end = offset.checked_add(len).ok_or(EINVAL)?;
+ if end > PAGE_SIZE {
+ return Err(EINVAL);
+ }
+
+ let mapping = self.kmap(0).ok_or(EINVAL)?;
+ unsafe { ptr::copy((mapping.ptr as *mut u8).add(offset), dest, len) };
+ Ok(())
+ }
+
+ /// Maps the pages and writes into them from the given buffer.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that the buffer is valid for the given length. Additionally, if the
+ /// page is (or will be) mapped by userspace, they must ensure that no kernel data is leaked
+ /// through padding if it was cast from another type; [`crate::io_buffer::WritableToBytes`] has
+ /// more details about it.
+ pub unsafe fn write(&self, src: *const u8, offset: usize, len: usize) -> Result {
+ // TODO: For now this only works on the first page.
+ let end = offset.checked_add(len).ok_or(EINVAL)?;
+ if end > PAGE_SIZE {
+ return Err(EINVAL);
+ }
+
+ let mapping = self.kmap(0).ok_or(EINVAL)?;
+ unsafe { ptr::copy(src, (mapping.ptr as *mut u8).add(offset), len) };
+ Ok(())
+ }
+
+ /// Maps the page at index `index`.
+ fn kmap(&self, index: usize) -> Option<PageMapping<'_>> {
+ if index >= 1usize << ORDER {
+ return None;
+ }
+
+ // SAFETY: We checked above that `index` is within range.
+ let page = unsafe { self.pages.add(index) };
+
+ // SAFETY: `page` is valid based on the checks above.
+ let ptr = unsafe { bindings::kmap(page) };
+ if ptr.is_null() {
+ return None;
+ }
+
+ Some(PageMapping {
+ page,
+ ptr,
+ _phantom: PhantomData,
+ })
+ }
+}
+
+impl<const ORDER: u32> Drop for Pages<ORDER> {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariants, we know the pages are allocated with the given order.
+ unsafe { bindings::__free_pages(self.pages, ORDER) };
+ }
+}
+
+struct PageMapping<'a> {
+ page: *mut bindings::page,
+ ptr: *mut core::ffi::c_void,
+ _phantom: PhantomData<&'a i32>,
+}
+
+impl Drop for PageMapping<'_> {
+ fn drop(&mut self) {
+ // SAFETY: An instance of `PageMapping` is created only when `kmap` succeeded for the given
+ // page, so it is safe to unmap it here.
+ unsafe { bindings::kunmap(self.page) };
+ }
+}
diff --git a/rust/kernel/platform.rs b/rust/kernel/platform.rs
new file mode 100644
index 000000000000..d8cc0e0120aa
--- /dev/null
+++ b/rust/kernel/platform.rs
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Platform devices and drivers.
+//!
+//! Also called `platdev`, `pdev`.
+//!
+//! C header: [`include/linux/platform_device.h`](../../../../include/linux/platform_device.h)
+
+use crate::{
+ bindings,
+ device::{self, RawDevice},
+ driver,
+ error::{from_kernel_result, Result},
+ of,
+ str::CStr,
+ to_result,
+ types::PointerWrapper,
+ ThisModule,
+};
+
+/// A registration of a platform driver.
+pub type Registration<T> = driver::Registration<Adapter<T>>;
+
+/// An adapter for the registration of platform drivers.
+pub struct Adapter<T: Driver>(T);
+
+impl<T: Driver> driver::DriverOps for Adapter<T> {
+ type RegType = bindings::platform_driver;
+
+ unsafe fn register(
+ reg: *mut bindings::platform_driver,
+ name: &'static CStr,
+ module: &'static ThisModule,
+ ) -> Result {
+ // SAFETY: By the safety requirements of this function (defined in the trait definition),
+ // `reg` is non-null and valid.
+ let pdrv = unsafe { &mut *reg };
+
+ pdrv.driver.name = name.as_char_ptr();
+ pdrv.probe = Some(Self::probe_callback);
+ pdrv.remove = Some(Self::remove_callback);
+ if let Some(t) = T::OF_DEVICE_ID_TABLE {
+ pdrv.driver.of_match_table = t.as_ref();
+ }
+ // SAFETY:
+ // - `pdrv` lives at least until the call to `platform_driver_unregister()` returns.
+ // - `name` pointer has static lifetime.
+ // - `module.0` lives at least as long as the module.
+ // - `probe()` and `remove()` are static functions.
+ // - `of_match_table` is either a raw pointer with static lifetime,
+ // as guaranteed by the [`driver::IdTable`] type, or null.
+ to_result(unsafe { bindings::__platform_driver_register(reg, module.0) })
+ }
+
+ unsafe fn unregister(reg: *mut bindings::platform_driver) {
+ // SAFETY: By the safety requirements of this function (defined in the trait definition),
+ // `reg` was passed (and updated) by a previous successful call to
+ // `platform_driver_register`.
+ unsafe { bindings::platform_driver_unregister(reg) };
+ }
+}
+
+impl<T: Driver> Adapter<T> {
+ fn get_id_info(dev: &Device) -> Option<&'static T::IdInfo> {
+ let table = T::OF_DEVICE_ID_TABLE?;
+
+ // SAFETY: `table` has static lifetime, so it is valid for read. `dev` is guaranteed to be
+ // valid while it's alive, so is the raw device returned by it.
+ let id = unsafe { bindings::of_match_device(table.as_ref(), dev.raw_device()) };
+ if id.is_null() {
+ return None;
+ }
+
+ // SAFETY: `id` is a pointer within the static table, so it's always valid.
+ let offset = unsafe { (*id).data };
+ if offset.is_null() {
+ return None;
+ }
+
+ // SAFETY: The offset comes from a previous call to `offset_from` in `IdArray::new`, which
+ // guarantees that the resulting pointer is within the table.
+ let ptr = unsafe {
+ id.cast::<u8>()
+ .offset(offset as _)
+ .cast::<Option<T::IdInfo>>()
+ };
+
+ // SAFETY: The id table has a static lifetime, so `ptr` is guaranteed to be valid for read.
+ unsafe { (&*ptr).as_ref() }
+ }
+
+ extern "C" fn probe_callback(pdev: *mut bindings::platform_device) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: `pdev` is valid by the contract with the C code. `dev` is alive only for the
+ // duration of this call, so it is guaranteed to remain alive for the lifetime of
+ // `pdev`.
+ let mut dev = unsafe { Device::from_ptr(pdev) };
+ let info = Self::get_id_info(&dev);
+ let data = T::probe(&mut dev, info)?;
+ // SAFETY: `pdev` is guaranteed to be a valid, non-null pointer.
+ unsafe { bindings::platform_set_drvdata(pdev, data.into_pointer() as _) };
+ Ok(0)
+ }
+ }
+
+ extern "C" fn remove_callback(pdev: *mut bindings::platform_device) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: `pdev` is guaranteed to be a valid, non-null pointer.
+ let ptr = unsafe { bindings::platform_get_drvdata(pdev) };
+ // SAFETY:
+ // - we allocated this pointer using `T::Data::into_pointer`,
+ // so it is safe to turn back into a `T::Data`.
+ // - the allocation happened in `probe`, no-one freed the memory,
+ // `remove` is the canonical kernel location to free driver data. so OK
+ // to convert the pointer back to a Rust structure here.
+ let data = unsafe { T::Data::from_pointer(ptr) };
+ let ret = T::remove(&data);
+ <T::Data as driver::DeviceRemoval>::device_remove(&data);
+ ret?;
+ Ok(0)
+ }
+ }
+}
+
+/// A platform driver.
+pub trait Driver {
+ /// Data stored on device by driver.
+ ///
+ /// Corresponds to the data set or retrieved via the kernel's
+ /// `platform_{set,get}_drvdata()` functions.
+ ///
+ /// Require that `Data` implements `PointerWrapper`. We guarantee to
+ /// never move the underlying wrapped data structure. This allows
+ type Data: PointerWrapper + Send + Sync + driver::DeviceRemoval = ();
+
+ /// The type holding information about each device id supported by the driver.
+ type IdInfo: 'static = ();
+
+ /// The table of device ids supported by the driver.
+ const OF_DEVICE_ID_TABLE: Option<driver::IdTable<'static, of::DeviceId, Self::IdInfo>> = None;
+
+ /// Platform driver probe.
+ ///
+ /// Called when a new platform device is added or discovered.
+ /// Implementers should attempt to initialize the device here.
+ fn probe(dev: &mut Device, id_info: Option<&Self::IdInfo>) -> Result<Self::Data>;
+
+ /// Platform driver remove.
+ ///
+ /// Called when a platform device is removed.
+ /// Implementers should prepare the device for complete removal here.
+ fn remove(_data: &Self::Data) -> Result {
+ Ok(())
+ }
+}
+
+/// A platform device.
+///
+/// # Invariants
+///
+/// The field `ptr` is non-null and valid for the lifetime of the object.
+pub struct Device {
+ ptr: *mut bindings::platform_device,
+}
+
+impl Device {
+ /// Creates a new device from the given pointer.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must be non-null and valid. It must remain valid for the lifetime of the returned
+ /// instance.
+ unsafe fn from_ptr(ptr: *mut bindings::platform_device) -> Self {
+ // INVARIANT: The safety requirements of the function ensure the lifetime invariant.
+ Self { ptr }
+ }
+
+ /// Returns id of the platform device.
+ pub fn id(&self) -> i32 {
+ // SAFETY: By the type invariants, we know that `self.ptr` is non-null and valid.
+ unsafe { (*self.ptr).id }
+ }
+}
+
+// SAFETY: The device returned by `raw_device` is the raw platform device.
+unsafe impl device::RawDevice for Device {
+ fn raw_device(&self) -> *mut bindings::device {
+ // SAFETY: By the type invariants, we know that `self.ptr` is non-null and valid.
+ unsafe { &mut (*self.ptr).dev }
+ }
+}
+
+/// Declares a kernel module that exposes a single platform driver.
+///
+/// # Examples
+///
+/// ```ignore
+/// # use kernel::{platform, define_of_id_table, module_platform_driver};
+/// #
+/// struct MyDriver;
+/// impl platform::Driver for MyDriver {
+/// // [...]
+/// # fn probe(_dev: &mut platform::Device, _id_info: Option<&Self::IdInfo>) -> Result {
+/// # Ok(())
+/// # }
+/// # define_of_id_table! {(), [
+/// # (of::DeviceId::Compatible(b"brcm,bcm2835-rng"), None),
+/// # ]}
+/// }
+///
+/// module_platform_driver! {
+/// type: MyDriver,
+/// name: b"module_name",
+/// author: b"Author name",
+/// license: b"GPL",
+/// }
+/// ```
+#[macro_export]
+macro_rules! module_platform_driver {
+ ($($f:tt)*) => {
+ $crate::module_driver!(<T>, $crate::platform::Adapter<T>, { $($f)* });
+ };
+}
diff --git a/rust/kernel/power.rs b/rust/kernel/power.rs
new file mode 100644
index 000000000000..ef788557b269
--- /dev/null
+++ b/rust/kernel/power.rs
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Power management interfaces.
+//!
+//! C header: [`include/linux/pm.h`](../../../../include/linux/pm.h)
+
+#![allow(dead_code)]
+
+use crate::{bindings, error::from_kernel_result, types::PointerWrapper, Result};
+use core::marker::PhantomData;
+
+/// Corresponds to the kernel's `struct dev_pm_ops`.
+///
+/// It is meant to be implemented by drivers that support power-management operations.
+pub trait Operations {
+ /// The type of the context data stored by the driver on each device.
+ type Data: PointerWrapper + Sync + Send;
+
+ /// Called before the system goes into a sleep state.
+ fn suspend(_data: <Self::Data as PointerWrapper>::Borrowed<'_>) -> Result {
+ Ok(())
+ }
+
+ /// Called after the system comes back from a sleep state.
+ fn resume(_data: <Self::Data as PointerWrapper>::Borrowed<'_>) -> Result {
+ Ok(())
+ }
+
+ /// Called before creating a hibernation image.
+ fn freeze(_data: <Self::Data as PointerWrapper>::Borrowed<'_>) -> Result {
+ Ok(())
+ }
+
+ /// Called after the system is restored from a hibernation image.
+ fn restore(_data: <Self::Data as PointerWrapper>::Borrowed<'_>) -> Result {
+ Ok(())
+ }
+}
+
+macro_rules! pm_callback {
+ ($callback:ident, $method:ident) => {
+ unsafe extern "C" fn $callback<T: Operations>(
+ dev: *mut bindings::device,
+ ) -> core::ffi::c_int {
+ from_kernel_result! {
+ // SAFETY: `dev` is valid as it was passed in by the C portion.
+ let ptr = unsafe { bindings::dev_get_drvdata(dev) };
+ // SAFETY: By the safety requirements of `OpsTable::build`, we know that `ptr` came
+ // from a previous call to `T::Data::into_pointer`.
+ let data = unsafe { T::Data::borrow(ptr) };
+ T::$method(data)?;
+ Ok(0)
+ }
+ }
+ };
+}
+
+pm_callback!(suspend_callback, suspend);
+pm_callback!(resume_callback, resume);
+pm_callback!(freeze_callback, freeze);
+pm_callback!(restore_callback, restore);
+
+pub(crate) struct OpsTable<T: Operations>(PhantomData<*const T>);
+
+impl<T: Operations> OpsTable<T> {
+ const VTABLE: bindings::dev_pm_ops = bindings::dev_pm_ops {
+ prepare: None,
+ complete: None,
+ suspend: Some(suspend_callback::<T>),
+ resume: Some(resume_callback::<T>),
+ freeze: Some(freeze_callback::<T>),
+ thaw: None,
+ poweroff: None,
+ restore: Some(restore_callback::<T>),
+ suspend_late: None,
+ resume_early: None,
+ freeze_late: None,
+ thaw_early: None,
+ poweroff_late: None,
+ restore_early: None,
+ suspend_noirq: None,
+ resume_noirq: None,
+ freeze_noirq: None,
+ thaw_noirq: None,
+ poweroff_noirq: None,
+ restore_noirq: None,
+ runtime_suspend: None,
+ runtime_resume: None,
+ runtime_idle: None,
+ };
+
+ /// Builds an instance of `struct dev_pm_ops`.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `dev_get_drvdata` will result in a value returned by
+ /// [`T::Data::into_pointer`].
+ pub(crate) const unsafe fn build() -> &'static bindings::dev_pm_ops {
+ &Self::VTABLE
+ }
+}
+
+/// Implements the [`Operations`] trait as no-ops.
+///
+/// This is useful when one doesn't want to provide the implementation of any power-manager related
+/// operation.
+pub struct NoOperations<T: PointerWrapper>(PhantomData<T>);
+
+impl<T: PointerWrapper + Send + Sync> Operations for NoOperations<T> {
+ type Data = T;
+}
+
+// SAFETY: `NoOperation` provides no functionality, it is safe to send a reference to it to
+// different threads.
+unsafe impl<T: PointerWrapper> Sync for NoOperations<T> {}
+
+// SAFETY: `NoOperation` provides no functionality, it is safe to send it to different threads.
+unsafe impl<T: PointerWrapper> Send for NoOperations<T> {}
diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs
new file mode 100644
index 000000000000..26f8af9e16ab
--- /dev/null
+++ b/rust/kernel/prelude.rs
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! The `kernel` prelude.
+//!
+//! These are the most common items used by Rust code in the kernel,
+//! intended to be imported by all Rust code, for convenience.
+//!
+//! # Examples
+//!
+//! ```
+//! use kernel::prelude::*;
+//! ```
+
+pub use core::pin::Pin;
+
+pub use alloc::{boxed::Box, string::String, vec::Vec};
+
+pub use macros::{module, vtable};
+
+pub use super::build_assert;
+
+pub use super::{
+ dbg, dev_alert, dev_crit, dev_dbg, dev_emerg, dev_err, dev_info, dev_notice, dev_warn, fmt,
+ pr_alert, pr_crit, pr_debug, pr_emerg, pr_err, pr_info, pr_notice, pr_warn,
+};
+
+pub use super::{module_fs, module_misc_device};
+
+#[cfg(CONFIG_ARM_AMBA)]
+pub use super::module_amba_driver;
+
+pub use super::static_assert;
+
+pub use super::{error::code::*, Error, Result};
+
+pub use super::{str::CStr, ARef, ThisModule};
diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs
new file mode 100644
index 000000000000..92541efc7e22
--- /dev/null
+++ b/rust/kernel/print.rs
@@ -0,0 +1,406 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Printing facilities.
+//!
+//! C header: [`include/linux/printk.h`](../../../../include/linux/printk.h)
+//!
+//! Reference: <https://www.kernel.org/doc/html/latest/core-api/printk-basics.html>
+
+use core::{
+ ffi::{c_char, c_void},
+ fmt,
+};
+
+use crate::str::RawFormatter;
+
+#[cfg(CONFIG_PRINTK)]
+use crate::bindings;
+
+// Called from `vsprintf` with format specifier `%pA`.
+#[no_mangle]
+unsafe fn rust_fmt_argument(buf: *mut c_char, end: *mut c_char, ptr: *const c_void) -> *mut c_char {
+ use fmt::Write;
+ // SAFETY: The C contract guarantees that `buf` is valid if it's less than `end`.
+ let mut w = unsafe { RawFormatter::from_ptrs(buf.cast(), end.cast()) };
+ let _ = w.write_fmt(unsafe { *(ptr as *const fmt::Arguments<'_>) });
+ w.pos().cast()
+}
+
+/// Format strings.
+///
+/// Public but hidden since it should only be used from public macros.
+#[doc(hidden)]
+pub mod format_strings {
+ use crate::bindings;
+
+ /// The length we copy from the `KERN_*` kernel prefixes.
+ const LENGTH_PREFIX: usize = 2;
+
+ /// The length of the fixed format strings.
+ pub const LENGTH: usize = 10;
+
+ /// Generates a fixed format string for the kernel's [`_printk`].
+ ///
+ /// The format string is always the same for a given level, i.e. for a
+ /// given `prefix`, which are the kernel's `KERN_*` constants.
+ ///
+ /// [`_printk`]: ../../../../include/linux/printk.h
+ const fn generate(is_cont: bool, prefix: &[u8; 3]) -> [u8; LENGTH] {
+ // Ensure the `KERN_*` macros are what we expect.
+ assert!(prefix[0] == b'\x01');
+ if is_cont {
+ assert!(prefix[1] == b'c');
+ } else {
+ assert!(prefix[1] >= b'0' && prefix[1] <= b'7');
+ }
+ assert!(prefix[2] == b'\x00');
+
+ let suffix: &[u8; LENGTH - LENGTH_PREFIX] = if is_cont {
+ b"%pA\0\0\0\0\0"
+ } else {
+ b"%s: %pA\0"
+ };
+
+ [
+ prefix[0], prefix[1], suffix[0], suffix[1], suffix[2], suffix[3], suffix[4], suffix[5],
+ suffix[6], suffix[7],
+ ]
+ }
+
+ // Generate the format strings at compile-time.
+ //
+ // This avoids the compiler generating the contents on the fly in the stack.
+ //
+ // Furthermore, `static` instead of `const` is used to share the strings
+ // for all the kernel.
+ pub static EMERG: [u8; LENGTH] = generate(false, bindings::KERN_EMERG);
+ pub static ALERT: [u8; LENGTH] = generate(false, bindings::KERN_ALERT);
+ pub static CRIT: [u8; LENGTH] = generate(false, bindings::KERN_CRIT);
+ pub static ERR: [u8; LENGTH] = generate(false, bindings::KERN_ERR);
+ pub static WARNING: [u8; LENGTH] = generate(false, bindings::KERN_WARNING);
+ pub static NOTICE: [u8; LENGTH] = generate(false, bindings::KERN_NOTICE);
+ pub static INFO: [u8; LENGTH] = generate(false, bindings::KERN_INFO);
+ pub static DEBUG: [u8; LENGTH] = generate(false, bindings::KERN_DEBUG);
+ pub static CONT: [u8; LENGTH] = generate(true, bindings::KERN_CONT);
+}
+
+/// Prints a message via the kernel's [`_printk`].
+///
+/// Public but hidden since it should only be used from public macros.
+///
+/// # Safety
+///
+/// The format string must be one of the ones in [`format_strings`], and
+/// the module name must be null-terminated.
+///
+/// [`_printk`]: ../../../../include/linux/_printk.h
+#[doc(hidden)]
+#[cfg_attr(not(CONFIG_PRINTK), allow(unused_variables))]
+pub unsafe fn call_printk(
+ format_string: &[u8; format_strings::LENGTH],
+ module_name: &[u8],
+ args: fmt::Arguments<'_>,
+) {
+ // `_printk` does not seem to fail in any path.
+ #[cfg(CONFIG_PRINTK)]
+ unsafe {
+ bindings::_printk(
+ format_string.as_ptr() as _,
+ module_name.as_ptr(),
+ &args as *const _ as *const c_void,
+ );
+ }
+}
+
+/// Prints a message via the kernel's [`_printk`] for the `CONT` level.
+///
+/// Public but hidden since it should only be used from public macros.
+///
+/// [`_printk`]: ../../../../include/linux/printk.h
+#[doc(hidden)]
+#[cfg_attr(not(CONFIG_PRINTK), allow(unused_variables))]
+pub fn call_printk_cont(args: fmt::Arguments<'_>) {
+ // `_printk` does not seem to fail in any path.
+ //
+ // SAFETY: The format string is fixed.
+ #[cfg(CONFIG_PRINTK)]
+ unsafe {
+ bindings::_printk(
+ format_strings::CONT.as_ptr() as _,
+ &args as *const _ as *const c_void,
+ );
+ }
+}
+
+/// Performs formatting and forwards the string to [`call_printk`].
+///
+/// Public but hidden since it should only be used from public macros.
+#[doc(hidden)]
+#[cfg(not(testlib))]
+#[macro_export]
+#[allow(clippy::crate_in_macro_def)]
+macro_rules! print_macro (
+ // The non-continuation cases (most of them, e.g. `INFO`).
+ ($format_string:path, false, $($arg:tt)+) => (
+ // SAFETY: This hidden macro should only be called by the documented
+ // printing macros which ensure the format string is one of the fixed
+ // ones. All `__LOG_PREFIX`s are null-terminated as they are generated
+ // by the `module!` proc macro or fixed values defined in a kernel
+ // crate.
+ unsafe {
+ $crate::print::call_printk(
+ &$format_string,
+ crate::__LOG_PREFIX,
+ format_args!($($arg)+),
+ );
+ }
+ );
+
+ // The `CONT` case.
+ ($format_string:path, true, $($arg:tt)+) => (
+ $crate::print::call_printk_cont(
+ format_args!($($arg)+),
+ );
+ );
+);
+
+/// Stub for doctests
+#[cfg(testlib)]
+#[macro_export]
+macro_rules! print_macro (
+ ($format_string:path, $e:expr, $($arg:tt)+) => (
+ ()
+ );
+);
+
+// We could use a macro to generate these macros. However, doing so ends
+// up being a bit ugly: it requires the dollar token trick to escape `$` as
+// well as playing with the `doc` attribute. Furthermore, they cannot be easily
+// imported in the prelude due to [1]. So, for the moment, we just write them
+// manually, like in the C side; while keeping most of the logic in another
+// macro, i.e. [`print_macro`].
+//
+// [1]: https://github.com/rust-lang/rust/issues/52234
+
+/// Prints an emergency-level message (level 0).
+///
+/// Use this level if the system is unusable.
+///
+/// Equivalent to the kernel's [`pr_emerg`] macro.
+///
+/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
+/// [`alloc::format!`] for information about the formatting syntax.
+///
+/// [`pr_emerg`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_emerg
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// pr_emerg!("hello {}\n", "there");
+/// ```
+#[macro_export]
+macro_rules! pr_emerg (
+ ($($arg:tt)*) => (
+ $crate::print_macro!($crate::print::format_strings::EMERG, false, $($arg)*)
+ )
+);
+
+/// Prints an alert-level message (level 1).
+///
+/// Use this level if action must be taken immediately.
+///
+/// Equivalent to the kernel's [`pr_alert`] macro.
+///
+/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
+/// [`alloc::format!`] for information about the formatting syntax.
+///
+/// [`pr_alert`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_alert
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// pr_alert!("hello {}\n", "there");
+/// ```
+#[macro_export]
+macro_rules! pr_alert (
+ ($($arg:tt)*) => (
+ $crate::print_macro!($crate::print::format_strings::ALERT, false, $($arg)*)
+ )
+);
+
+/// Prints a critical-level message (level 2).
+///
+/// Use this level for critical conditions.
+///
+/// Equivalent to the kernel's [`pr_crit`] macro.
+///
+/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
+/// [`alloc::format!`] for information about the formatting syntax.
+///
+/// [`pr_crit`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_crit
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// pr_crit!("hello {}\n", "there");
+/// ```
+#[macro_export]
+macro_rules! pr_crit (
+ ($($arg:tt)*) => (
+ $crate::print_macro!($crate::print::format_strings::CRIT, false, $($arg)*)
+ )
+);
+
+/// Prints an error-level message (level 3).
+///
+/// Use this level for error conditions.
+///
+/// Equivalent to the kernel's [`pr_err`] macro.
+///
+/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
+/// [`alloc::format!`] for information about the formatting syntax.
+///
+/// [`pr_err`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_err
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// pr_err!("hello {}\n", "there");
+/// ```
+#[macro_export]
+macro_rules! pr_err (
+ ($($arg:tt)*) => (
+ $crate::print_macro!($crate::print::format_strings::ERR, false, $($arg)*)
+ )
+);
+
+/// Prints a warning-level message (level 4).
+///
+/// Use this level for warning conditions.
+///
+/// Equivalent to the kernel's [`pr_warn`] macro.
+///
+/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
+/// [`alloc::format!`] for information about the formatting syntax.
+///
+/// [`pr_warn`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_warn
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// pr_warn!("hello {}\n", "there");
+/// ```
+#[macro_export]
+macro_rules! pr_warn (
+ ($($arg:tt)*) => (
+ $crate::print_macro!($crate::print::format_strings::WARNING, false, $($arg)*)
+ )
+);
+
+/// Prints a notice-level message (level 5).
+///
+/// Use this level for normal but significant conditions.
+///
+/// Equivalent to the kernel's [`pr_notice`] macro.
+///
+/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
+/// [`alloc::format!`] for information about the formatting syntax.
+///
+/// [`pr_notice`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_notice
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// pr_notice!("hello {}\n", "there");
+/// ```
+#[macro_export]
+macro_rules! pr_notice (
+ ($($arg:tt)*) => (
+ $crate::print_macro!($crate::print::format_strings::NOTICE, false, $($arg)*)
+ )
+);
+
+/// Prints an info-level message (level 6).
+///
+/// Use this level for informational messages.
+///
+/// Equivalent to the kernel's [`pr_info`] macro.
+///
+/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
+/// [`alloc::format!`] for information about the formatting syntax.
+///
+/// [`pr_info`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_info
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// pr_info!("hello {}\n", "there");
+/// ```
+#[macro_export]
+#[doc(alias = "print")]
+macro_rules! pr_info (
+ ($($arg:tt)*) => (
+ $crate::print_macro!($crate::print::format_strings::INFO, false, $($arg)*)
+ )
+);
+
+/// Prints a debug-level message (level 7).
+///
+/// Use this level for debug messages.
+///
+/// Equivalent to the kernel's [`pr_debug`] macro, except that it doesn't support dynamic debug
+/// yet.
+///
+/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
+/// [`alloc::format!`] for information about the formatting syntax.
+///
+/// [`pr_debug`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_debug
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// pr_debug!("hello {}\n", "there");
+/// ```
+#[macro_export]
+#[doc(alias = "print")]
+macro_rules! pr_debug (
+ ($($arg:tt)*) => (
+ if cfg!(debug_assertions) {
+ $crate::print_macro!($crate::print::format_strings::DEBUG, false, $($arg)*)
+ }
+ )
+);
+
+/// Continues a previous log message in the same line.
+///
+/// Use only when continuing a previous `pr_*!` macro (e.g. [`pr_info!`]).
+///
+/// Equivalent to the kernel's [`pr_cont`] macro.
+///
+/// Mimics the interface of [`std::print!`]. See [`core::fmt`] and
+/// [`alloc::format!`] for information about the formatting syntax.
+///
+/// [`pr_cont`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_cont
+/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::pr_cont;
+/// pr_info!("hello");
+/// pr_cont!(" {}\n", "there");
+/// ```
+#[macro_export]
+macro_rules! pr_cont (
+ ($($arg:tt)*) => (
+ $crate::print_macro!($crate::print::format_strings::CONT, true, $($arg)*)
+ )
+);
diff --git a/rust/kernel/random.rs b/rust/kernel/random.rs
new file mode 100644
index 000000000000..c30dd675d6a0
--- /dev/null
+++ b/rust/kernel/random.rs
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Random numbers.
+//!
+//! C header: [`include/linux/random.h`](../../../../include/linux/random.h)
+
+use crate::{bindings, error::code::*, Error, Result};
+
+/// Fills a byte slice with random bytes generated from the kernel's CSPRNG.
+///
+/// Ensures that the CSPRNG has been seeded before generating any random bytes,
+/// and will block until it is ready.
+pub fn getrandom(dest: &mut [u8]) -> Result {
+ let res = unsafe { bindings::wait_for_random_bytes() };
+ if res != 0 {
+ return Err(Error::from_kernel_errno(res));
+ }
+
+ unsafe {
+ bindings::get_random_bytes(dest.as_mut_ptr() as *mut core::ffi::c_void, dest.len());
+ }
+ Ok(())
+}
+
+/// Fills a byte slice with random bytes generated from the kernel's CSPRNG.
+///
+/// If the CSPRNG is not yet seeded, returns an `Err(EAGAIN)` immediately.
+pub fn getrandom_nonblock(dest: &mut [u8]) -> Result {
+ if !unsafe { bindings::rng_is_initialized() } {
+ return Err(EAGAIN);
+ }
+ getrandom(dest)
+}
+
+/// Contributes the contents of a byte slice to the kernel's entropy pool.
+///
+/// Does *not* credit the kernel entropy counter though.
+pub fn add_randomness(data: &[u8]) {
+ unsafe {
+ bindings::add_device_randomness(data.as_ptr() as *const core::ffi::c_void, data.len());
+ }
+}
diff --git a/rust/kernel/raw_list.rs b/rust/kernel/raw_list.rs
new file mode 100644
index 000000000000..267b21709c29
--- /dev/null
+++ b/rust/kernel/raw_list.rs
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Raw lists.
+//!
+//! TODO: This module is a work in progress.
+
+use core::{
+ cell::UnsafeCell,
+ ptr,
+ ptr::NonNull,
+ sync::atomic::{AtomicBool, Ordering},
+};
+
+/// A descriptor of list elements.
+///
+/// It describes the type of list elements and provides a function to determine how to get the
+/// links to be used on a list.
+///
+/// A type that may be in multiple lists simultaneously needs to implement one of these for each
+/// simultaneous list.
+pub trait GetLinks {
+ /// The type of the entries in the list.
+ type EntryType: ?Sized;
+
+ /// Returns the links to be used when linking an entry within a list.
+ fn get_links(data: &Self::EntryType) -> &Links<Self::EntryType>;
+}
+
+/// The links used to link an object on a linked list.
+///
+/// Instances of this type are usually embedded in structures and returned in calls to
+/// [`GetLinks::get_links`].
+pub struct Links<T: ?Sized> {
+ inserted: AtomicBool,
+ entry: UnsafeCell<ListEntry<T>>,
+}
+
+impl<T: ?Sized> Links<T> {
+ /// Constructs a new [`Links`] instance that isn't inserted on any lists yet.
+ pub fn new() -> Self {
+ Self {
+ inserted: AtomicBool::new(false),
+ entry: UnsafeCell::new(ListEntry::new()),
+ }
+ }
+
+ fn acquire_for_insertion(&self) -> bool {
+ self.inserted
+ .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed)
+ .is_ok()
+ }
+
+ fn release_after_removal(&self) {
+ self.inserted.store(false, Ordering::Release);
+ }
+}
+
+impl<T: ?Sized> Default for Links<T> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+struct ListEntry<T: ?Sized> {
+ next: Option<NonNull<T>>,
+ prev: Option<NonNull<T>>,
+}
+
+impl<T: ?Sized> ListEntry<T> {
+ fn new() -> Self {
+ Self {
+ next: None,
+ prev: None,
+ }
+ }
+}
+
+/// A linked list.
+///
+/// # Invariants
+///
+/// The links of objects added to a list are owned by the list.
+pub(crate) struct RawList<G: GetLinks> {
+ head: Option<NonNull<G::EntryType>>,
+}
+
+impl<G: GetLinks> RawList<G> {
+ pub(crate) fn new() -> Self {
+ Self { head: None }
+ }
+
+ pub(crate) fn is_empty(&self) -> bool {
+ self.head.is_none()
+ }
+
+ fn insert_after_priv(
+ &mut self,
+ existing: &G::EntryType,
+ new_entry: &mut ListEntry<G::EntryType>,
+ new_ptr: Option<NonNull<G::EntryType>>,
+ ) {
+ {
+ // SAFETY: It's safe to get the previous entry of `existing` because the list cannot
+ // change.
+ let existing_links = unsafe { &mut *G::get_links(existing).entry.get() };
+ new_entry.next = existing_links.next;
+ existing_links.next = new_ptr;
+ }
+
+ new_entry.prev = Some(NonNull::from(existing));
+
+ // SAFETY: It's safe to get the next entry of `existing` because the list cannot change.
+ let next_links =
+ unsafe { &mut *G::get_links(new_entry.next.unwrap().as_ref()).entry.get() };
+ next_links.prev = new_ptr;
+ }
+
+ /// Inserts the given object after `existing`.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `existing` points to a valid entry that is on the list.
+ pub(crate) unsafe fn insert_after(
+ &mut self,
+ existing: &G::EntryType,
+ new: &G::EntryType,
+ ) -> bool {
+ let links = G::get_links(new);
+ if !links.acquire_for_insertion() {
+ // Nothing to do if already inserted.
+ return false;
+ }
+
+ // SAFETY: The links are now owned by the list, so it is safe to get a mutable reference.
+ let new_entry = unsafe { &mut *links.entry.get() };
+ self.insert_after_priv(existing, new_entry, Some(NonNull::from(new)));
+ true
+ }
+
+ fn push_back_internal(&mut self, new: &G::EntryType) -> bool {
+ let links = G::get_links(new);
+ if !links.acquire_for_insertion() {
+ // Nothing to do if already inserted.
+ return false;
+ }
+
+ // SAFETY: The links are now owned by the list, so it is safe to get a mutable reference.
+ let new_entry = unsafe { &mut *links.entry.get() };
+ let new_ptr = Some(NonNull::from(new));
+ match self.back() {
+ // SAFETY: `back` is valid as the list cannot change.
+ Some(back) => self.insert_after_priv(unsafe { back.as_ref() }, new_entry, new_ptr),
+ None => {
+ self.head = new_ptr;
+ new_entry.next = new_ptr;
+ new_entry.prev = new_ptr;
+ }
+ }
+ true
+ }
+
+ pub(crate) unsafe fn push_back(&mut self, new: &G::EntryType) -> bool {
+ self.push_back_internal(new)
+ }
+
+ fn remove_internal(&mut self, data: &G::EntryType) -> bool {
+ let links = G::get_links(data);
+
+ // SAFETY: The links are now owned by the list, so it is safe to get a mutable reference.
+ let entry = unsafe { &mut *links.entry.get() };
+ let next = if let Some(next) = entry.next {
+ next
+ } else {
+ // Nothing to do if the entry is not on the list.
+ return false;
+ };
+
+ if ptr::eq(data, next.as_ptr()) {
+ // We're removing the only element.
+ self.head = None
+ } else {
+ // Update the head if we're removing it.
+ if let Some(raw_head) = self.head {
+ if ptr::eq(data, raw_head.as_ptr()) {
+ self.head = Some(next);
+ }
+ }
+
+ // SAFETY: It's safe to get the previous entry because the list cannot change.
+ unsafe { &mut *G::get_links(entry.prev.unwrap().as_ref()).entry.get() }.next =
+ entry.next;
+
+ // SAFETY: It's safe to get the next entry because the list cannot change.
+ unsafe { &mut *G::get_links(next.as_ref()).entry.get() }.prev = entry.prev;
+ }
+
+ // Reset the links of the element we're removing so that we know it's not on any list.
+ entry.next = None;
+ entry.prev = None;
+ links.release_after_removal();
+ true
+ }
+
+ /// Removes the given entry.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `data` is either on this list or in no list. It being on another
+ /// list leads to memory unsafety.
+ pub(crate) unsafe fn remove(&mut self, data: &G::EntryType) -> bool {
+ self.remove_internal(data)
+ }
+
+ fn pop_front_internal(&mut self) -> Option<NonNull<G::EntryType>> {
+ let head = self.head?;
+ // SAFETY: The head is on the list as we just got it from there and it cannot change.
+ unsafe { self.remove(head.as_ref()) };
+ Some(head)
+ }
+
+ pub(crate) fn pop_front(&mut self) -> Option<NonNull<G::EntryType>> {
+ self.pop_front_internal()
+ }
+
+ pub(crate) fn front(&self) -> Option<NonNull<G::EntryType>> {
+ self.head
+ }
+
+ pub(crate) fn back(&self) -> Option<NonNull<G::EntryType>> {
+ // SAFETY: The links of head are owned by the list, so it is safe to get a reference.
+ unsafe { &*G::get_links(self.head?.as_ref()).entry.get() }.prev
+ }
+
+ pub(crate) fn cursor_front(&self) -> Cursor<'_, G> {
+ Cursor::new(self, self.front())
+ }
+
+ pub(crate) fn cursor_front_mut(&mut self) -> CursorMut<'_, G> {
+ CursorMut::new(self, self.front())
+ }
+}
+
+struct CommonCursor<G: GetLinks> {
+ cur: Option<NonNull<G::EntryType>>,
+}
+
+impl<G: GetLinks> CommonCursor<G> {
+ fn new(cur: Option<NonNull<G::EntryType>>) -> Self {
+ Self { cur }
+ }
+
+ fn move_next(&mut self, list: &RawList<G>) {
+ match self.cur.take() {
+ None => self.cur = list.head,
+ Some(cur) => {
+ if let Some(head) = list.head {
+ // SAFETY: We have a shared ref to the linked list, so the links can't change.
+ let links = unsafe { &*G::get_links(cur.as_ref()).entry.get() };
+ if links.next.unwrap() != head {
+ self.cur = links.next;
+ }
+ }
+ }
+ }
+ }
+
+ fn move_prev(&mut self, list: &RawList<G>) {
+ match list.head {
+ None => self.cur = None,
+ Some(head) => {
+ let next = match self.cur.take() {
+ None => head,
+ Some(cur) => {
+ if cur == head {
+ return;
+ }
+ cur
+ }
+ };
+ // SAFETY: There's a shared ref to the list, so the links can't change.
+ let links = unsafe { &*G::get_links(next.as_ref()).entry.get() };
+ self.cur = links.prev;
+ }
+ }
+ }
+}
+
+/// A list cursor that allows traversing a linked list and inspecting elements.
+pub struct Cursor<'a, G: GetLinks> {
+ cursor: CommonCursor<G>,
+ list: &'a RawList<G>,
+}
+
+impl<'a, G: GetLinks> Cursor<'a, G> {
+ fn new(list: &'a RawList<G>, cur: Option<NonNull<G::EntryType>>) -> Self {
+ Self {
+ list,
+ cursor: CommonCursor::new(cur),
+ }
+ }
+
+ /// Returns the element the cursor is currently positioned on.
+ pub fn current(&self) -> Option<&'a G::EntryType> {
+ let cur = self.cursor.cur?;
+ // SAFETY: Objects must be kept alive while on the list.
+ Some(unsafe { &*cur.as_ptr() })
+ }
+
+ /// Moves the cursor to the next element.
+ pub fn move_next(&mut self) {
+ self.cursor.move_next(self.list);
+ }
+}
+
+pub(crate) struct CursorMut<'a, G: GetLinks> {
+ cursor: CommonCursor<G>,
+ list: &'a mut RawList<G>,
+}
+
+impl<'a, G: GetLinks> CursorMut<'a, G> {
+ fn new(list: &'a mut RawList<G>, cur: Option<NonNull<G::EntryType>>) -> Self {
+ Self {
+ list,
+ cursor: CommonCursor::new(cur),
+ }
+ }
+
+ pub(crate) fn current(&mut self) -> Option<&mut G::EntryType> {
+ let cur = self.cursor.cur?;
+ // SAFETY: Objects must be kept alive while on the list.
+ Some(unsafe { &mut *cur.as_ptr() })
+ }
+
+ /// Removes the entry the cursor is pointing to and advances the cursor to the next entry. It
+ /// returns a raw pointer to the removed element (if one is removed).
+ pub(crate) fn remove_current(&mut self) -> Option<NonNull<G::EntryType>> {
+ let entry = self.cursor.cur?;
+ self.cursor.move_next(self.list);
+ // SAFETY: The entry is on the list as we just got it from there and it cannot change.
+ unsafe { self.list.remove(entry.as_ref()) };
+ Some(entry)
+ }
+
+ pub(crate) fn peek_next(&mut self) -> Option<&mut G::EntryType> {
+ let mut new = CommonCursor::new(self.cursor.cur);
+ new.move_next(self.list);
+ // SAFETY: Objects must be kept alive while on the list.
+ Some(unsafe { &mut *new.cur?.as_ptr() })
+ }
+
+ pub(crate) fn peek_prev(&mut self) -> Option<&mut G::EntryType> {
+ let mut new = CommonCursor::new(self.cursor.cur);
+ new.move_prev(self.list);
+ // SAFETY: Objects must be kept alive while on the list.
+ Some(unsafe { &mut *new.cur?.as_ptr() })
+ }
+
+ pub(crate) fn move_next(&mut self) {
+ self.cursor.move_next(self.list);
+ }
+}
diff --git a/rust/kernel/rbtree.rs b/rust/kernel/rbtree.rs
new file mode 100644
index 000000000000..a30739cc6839
--- /dev/null
+++ b/rust/kernel/rbtree.rs
@@ -0,0 +1,563 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Red-black trees.
+//!
+//! C header: [`include/linux/rbtree.h`](../../../../include/linux/rbtree.h)
+//!
+//! Reference: <https://www.kernel.org/doc/html/latest/core-api/rbtree.html>
+
+use crate::{bindings, Result};
+use alloc::boxed::Box;
+use core::{
+ cmp::{Ord, Ordering},
+ iter::{IntoIterator, Iterator},
+ marker::PhantomData,
+ mem::MaybeUninit,
+ ptr::{addr_of_mut, NonNull},
+};
+
+struct Node<K, V> {
+ links: bindings::rb_node,
+ key: K,
+ value: V,
+}
+
+/// A red-black tree with owned nodes.
+///
+/// It is backed by the kernel C red-black trees.
+///
+/// # Invariants
+///
+/// Non-null parent/children pointers stored in instances of the `rb_node` C struct are always
+/// valid, and pointing to a field of our internal representation of a node.
+///
+/// # Examples
+///
+/// In the example below we do several operations on a tree. We note that insertions may fail if
+/// the system is out of memory.
+///
+/// ```
+/// use kernel::rbtree::RBTree;
+///
+/// # fn test() -> Result {
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_insert(20, 200)?;
+/// tree.try_insert(10, 100)?;
+/// tree.try_insert(30, 300)?;
+///
+/// // Check the nodes we just inserted.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &300));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Print all elements.
+/// for (key, value) in &tree {
+/// pr_info!("{} = {}\n", key, value);
+/// }
+///
+/// // Replace one of the elements.
+/// tree.try_insert(10, 1000)?;
+///
+/// // Check that the tree reflects the replacement.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &1000));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &300));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Change the value of one of the elements.
+/// *tree.get_mut(&30).unwrap() = 3000;
+///
+/// // Check that the tree reflects the update.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &1000));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &3000));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Remove an element.
+/// tree.remove(&10);
+///
+/// // Check that the tree reflects the removal.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &3000));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Update all values.
+/// for value in tree.values_mut() {
+/// *value *= 10;
+/// }
+///
+/// // Check that the tree reflects the changes to values.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&20, &2000));
+/// assert_eq!(iter.next().unwrap(), (&30, &30000));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// # Ok(())
+/// # }
+/// #
+/// # assert_eq!(test(), Ok(()));
+/// ```
+///
+/// In the example below, we first allocate a node, acquire a spinlock, then insert the node into
+/// the tree. This is useful when the insertion context does not allow sleeping, for example, when
+/// holding a spinlock.
+///
+/// ```
+/// use kernel::{rbtree::RBTree, sync::SpinLock};
+///
+/// fn insert_test(tree: &SpinLock<RBTree<u32, u32>>) -> Result {
+/// // Pre-allocate node. This may fail (as it allocates memory).
+/// let node = RBTree::try_allocate_node(10, 100)?;
+///
+/// // Insert node while holding the lock. It is guaranteed to succeed with no allocation
+/// // attempts.
+/// let mut guard = tree.lock();
+/// guard.insert(node);
+/// Ok(())
+/// }
+/// ```
+///
+/// In the example below, we reuse an existing node allocation from an element we removed.
+///
+/// ```
+/// use kernel::rbtree::RBTree;
+///
+/// # fn test() -> Result {
+/// // Create a new tree.
+/// let mut tree = RBTree::new();
+///
+/// // Insert three elements.
+/// tree.try_insert(20, 200)?;
+/// tree.try_insert(10, 100)?;
+/// tree.try_insert(30, 300)?;
+///
+/// // Check the nodes we just inserted.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert_eq!(iter.next().unwrap(), (&30, &300));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Remove a node, getting back ownership of it.
+/// let existing = tree.remove_node(&30).unwrap();
+///
+/// // Check that the tree reflects the removal.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// // Turn the node into a reservation so that we can reuse it with a different key/value.
+/// let reservation = existing.into_reservation();
+///
+/// // Insert a new node into the tree, reusing the previous allocation. This is guaranteed to
+/// // succeed (no memory allocations).
+/// tree.insert(reservation.into_node(15, 150));
+///
+/// // Check that the tree reflect the new insertion.
+/// {
+/// let mut iter = tree.iter();
+/// assert_eq!(iter.next().unwrap(), (&10, &100));
+/// assert_eq!(iter.next().unwrap(), (&15, &150));
+/// assert_eq!(iter.next().unwrap(), (&20, &200));
+/// assert!(iter.next().is_none());
+/// }
+///
+/// # Ok(())
+/// # }
+/// #
+/// # assert_eq!(test(), Ok(()));
+/// ```
+pub struct RBTree<K, V> {
+ root: bindings::rb_root,
+ _p: PhantomData<Node<K, V>>,
+}
+
+impl<K, V> RBTree<K, V> {
+ /// Creates a new and empty tree.
+ pub fn new() -> Self {
+ Self {
+ // INVARIANT: There are no nodes in the tree, so the invariant holds vacuously.
+ root: bindings::rb_root::default(),
+ _p: PhantomData,
+ }
+ }
+
+ /// Tries to insert a new value into the tree.
+ ///
+ /// It overwrites a node if one already exists with the same key and returns it (containing the
+ /// key/value pair). Returns [`None`] if a node with the same key didn't already exist.
+ ///
+ /// Returns an error if it cannot allocate memory for the new node.
+ pub fn try_insert(&mut self, key: K, value: V) -> Result<Option<RBTreeNode<K, V>>>
+ where
+ K: Ord,
+ {
+ Ok(self.insert(Self::try_allocate_node(key, value)?))
+ }
+
+ /// Allocates memory for a node to be eventually initialised and inserted into the tree via a
+ /// call to [`RBTree::insert`].
+ pub fn try_reserve_node() -> Result<RBTreeNodeReservation<K, V>> {
+ Ok(RBTreeNodeReservation {
+ node: Box::try_new(MaybeUninit::uninit())?,
+ })
+ }
+
+ /// Allocates and initialiases a node that can be inserted into the tree via
+ /// [`RBTree::insert`].
+ pub fn try_allocate_node(key: K, value: V) -> Result<RBTreeNode<K, V>> {
+ Ok(Self::try_reserve_node()?.into_node(key, value))
+ }
+
+ /// Inserts a new node into the tree.
+ ///
+ /// It overwrites a node if one already exists with the same key and returns it (containing the
+ /// key/value pair). Returns [`None`] if a node with the same key didn't already exist.
+ ///
+ /// This function always succeeds.
+ pub fn insert(&mut self, node: RBTreeNode<K, V>) -> Option<RBTreeNode<K, V>>
+ where
+ K: Ord,
+ {
+ let RBTreeNode { node } = node;
+ let node = Box::into_raw(node);
+ // SAFETY: `node` is valid at least until we call `Box::from_raw`, which only happens when
+ // the node is removed or replaced.
+ let node_links = unsafe { addr_of_mut!((*node).links) };
+ let mut new_link: &mut *mut bindings::rb_node = &mut self.root.rb_node;
+ let mut parent = core::ptr::null_mut();
+ while !new_link.is_null() {
+ let this = crate::container_of!(*new_link, Node<K, V>, links);
+
+ parent = *new_link;
+
+ // SAFETY: `this` is a non-null node so it is valid by the type invariants. `node` is
+ // valid until the node is removed.
+ match unsafe { (*node).key.cmp(&(*this).key) } {
+ // SAFETY: `parent` is a non-null node so it is valid by the type invariants.
+ Ordering::Less => new_link = unsafe { &mut (*parent).rb_left },
+ // SAFETY: `parent` is a non-null node so it is valid by the type invariants.
+ Ordering::Greater => new_link = unsafe { &mut (*parent).rb_right },
+ Ordering::Equal => {
+ // INVARIANT: We are replacing an existing node with a new one, which is valid.
+ // It remains valid because we "forgot" it with `Box::into_raw`.
+ // SAFETY: All pointers are non-null and valid (parent, despite the name, really
+ // is the node we're replacing).
+ unsafe { bindings::rb_replace_node(parent, node_links, &mut self.root) };
+
+ // INVARIANT: The node is being returned and the caller may free it, however,
+ // it was removed from the tree. So the invariants still hold.
+ return Some(RBTreeNode {
+ // SAFETY: `this` was a node in the tree, so it is valid.
+ node: unsafe { Box::from_raw(this as _) },
+ });
+ }
+ }
+ }
+
+ // INVARIANT: We are linking in a new node, which is valid. It remains valid because we
+ // "forgot" it with `Box::into_raw`.
+ // SAFETY: All pointers are non-null and valid (`*new_link` is null, but `new_link` is a
+ // mutable reference).
+ unsafe { bindings::rb_link_node(node_links, parent, new_link) };
+
+ // SAFETY: All pointers are valid. `node` has just been inserted into the tree.
+ unsafe { bindings::rb_insert_color(node_links, &mut self.root) };
+ None
+ }
+
+ /// Returns a node with the given key, if one exists.
+ fn find(&self, key: &K) -> Option<NonNull<Node<K, V>>>
+ where
+ K: Ord,
+ {
+ let mut node = self.root.rb_node;
+ while !node.is_null() {
+ let this = crate::container_of!(node, Node<K, V>, links);
+ // SAFETY: `this` is a non-null node so it is valid by the type invariants.
+ node = match key.cmp(unsafe { &(*this).key }) {
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ Ordering::Less => unsafe { (*node).rb_left },
+ // SAFETY: `node` is a non-null node so it is valid by the type invariants.
+ Ordering::Greater => unsafe { (*node).rb_right },
+ Ordering::Equal => return NonNull::new(this as _),
+ }
+ }
+ None
+ }
+
+ /// Returns a reference to the value corresponding to the key.
+ pub fn get(&self, key: &K) -> Option<&V>
+ where
+ K: Ord,
+ {
+ // SAFETY: The `find` return value is a node in the tree, so it is valid.
+ self.find(key).map(|node| unsafe { &node.as_ref().value })
+ }
+
+ /// Returns a mutable reference to the value corresponding to the key.
+ pub fn get_mut(&mut self, key: &K) -> Option<&mut V>
+ where
+ K: Ord,
+ {
+ // SAFETY: The `find` return value is a node in the tree, so it is valid.
+ self.find(key)
+ .map(|mut node| unsafe { &mut node.as_mut().value })
+ }
+
+ /// Removes the node with the given key from the tree.
+ ///
+ /// It returns the node that was removed if one exists, or [`None`] otherwise.
+ pub fn remove_node(&mut self, key: &K) -> Option<RBTreeNode<K, V>>
+ where
+ K: Ord,
+ {
+ let mut node = self.find(key)?;
+
+ // SAFETY: The `find` return value is a node in the tree, so it is valid.
+ unsafe { bindings::rb_erase(&mut node.as_mut().links, &mut self.root) };
+
+ // INVARIANT: The node is being returned and the caller may free it, however, it was
+ // removed from the tree. So the invariants still hold.
+ Some(RBTreeNode {
+ // SAFETY: The `find` return value was a node in the tree, so it is valid.
+ node: unsafe { Box::from_raw(node.as_ptr()) },
+ })
+ }
+
+ /// Removes the node with the given key from the tree.
+ ///
+ /// It returns the value that was removed if one exists, or [`None`] otherwise.
+ pub fn remove(&mut self, key: &K) -> Option<V>
+ where
+ K: Ord,
+ {
+ let node = self.remove_node(key)?;
+ let RBTreeNode { node } = node;
+ let Node {
+ links: _,
+ key: _,
+ value,
+ } = *node;
+ Some(value)
+ }
+
+ /// Returns an iterator over the tree nodes, sorted by key.
+ pub fn iter(&self) -> RBTreeIterator<'_, K, V> {
+ RBTreeIterator {
+ _tree: PhantomData,
+ // SAFETY: `root` is valid as it's embedded in `self` and we have a valid `self`.
+ next: unsafe { bindings::rb_first(&self.root) },
+ }
+ }
+
+ /// Returns a mutable iterator over the tree nodes, sorted by key.
+ pub fn iter_mut(&mut self) -> RBTreeIteratorMut<'_, K, V> {
+ RBTreeIteratorMut {
+ _tree: PhantomData,
+ // SAFETY: `root` is valid as it's embedded in `self` and we have a valid `self`.
+ next: unsafe { bindings::rb_first(&self.root) },
+ }
+ }
+
+ /// Returns an iterator over the keys of the nodes in the tree, in sorted order.
+ pub fn keys(&self) -> impl Iterator<Item = &'_ K> {
+ self.iter().map(|(k, _)| k)
+ }
+
+ /// Returns an iterator over the values of the nodes in the tree, sorted by key.
+ pub fn values(&self) -> impl Iterator<Item = &'_ V> {
+ self.iter().map(|(_, v)| v)
+ }
+
+ /// Returns a mutable iterator over the values of the nodes in the tree, sorted by key.
+ pub fn values_mut(&mut self) -> impl Iterator<Item = &'_ mut V> {
+ self.iter_mut().map(|(_, v)| v)
+ }
+}
+
+impl<K, V> Default for RBTree<K, V> {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<K, V> Drop for RBTree<K, V> {
+ fn drop(&mut self) {
+ // SAFETY: `root` is valid as it's embedded in `self` and we have a valid `self`.
+ let mut next = unsafe { bindings::rb_first_postorder(&self.root) };
+
+ // INVARIANT: The loop invariant is that all tree nodes from `next` in postorder are valid.
+ while !next.is_null() {
+ let this = crate::container_of!(next, Node<K, V>, links);
+
+ // Find out what the next node is before disposing of the current one.
+ // SAFETY: `next` and all nodes in postorder are still valid.
+ next = unsafe { bindings::rb_next_postorder(next) };
+
+ // INVARIANT: This is the destructor, so we break the type invariant during clean-up,
+ // but it is not observable. The loop invariant is still maintained.
+ // SAFETY: `this` is valid per the loop invariant.
+ unsafe { Box::from_raw(this as *mut Node<K, V>) };
+ }
+ }
+}
+
+impl<'a, K, V> IntoIterator for &'a RBTree<K, V> {
+ type Item = (&'a K, &'a V);
+ type IntoIter = RBTreeIterator<'a, K, V>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+/// An iterator over the nodes of a [`RBTree`].
+///
+/// Instances are created by calling [`RBTree::iter`].
+pub struct RBTreeIterator<'a, K, V> {
+ _tree: PhantomData<&'a RBTree<K, V>>,
+ next: *mut bindings::rb_node,
+}
+
+impl<'a, K, V> Iterator for RBTreeIterator<'a, K, V> {
+ type Item = (&'a K, &'a V);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.next.is_null() {
+ return None;
+ }
+
+ let cur = crate::container_of!(self.next, Node<K, V>, links);
+
+ // SAFETY: The reference to the tree used to create the iterator outlives the iterator, so
+ // the tree cannot change. By the tree invariant, all nodes are valid.
+ self.next = unsafe { bindings::rb_next(self.next) };
+
+ // SAFETY: By the same reasoning above, it is safe to dereference the node. Additionally,
+ // it is ok to return a reference to members because the iterator must outlive it.
+ Some(unsafe { (&(*cur).key, &(*cur).value) })
+ }
+}
+
+impl<'a, K, V> IntoIterator for &'a mut RBTree<K, V> {
+ type Item = (&'a K, &'a mut V);
+ type IntoIter = RBTreeIteratorMut<'a, K, V>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
+
+/// A mutable iterator over the nodes of a [`RBTree`].
+///
+/// Instances are created by calling [`RBTree::iter_mut`].
+pub struct RBTreeIteratorMut<'a, K, V> {
+ _tree: PhantomData<&'a RBTree<K, V>>,
+ next: *mut bindings::rb_node,
+}
+
+impl<'a, K, V> Iterator for RBTreeIteratorMut<'a, K, V> {
+ type Item = (&'a K, &'a mut V);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.next.is_null() {
+ return None;
+ }
+
+ let cur = crate::container_of!(self.next, Node<K, V>, links) as *mut Node<K, V>;
+
+ // SAFETY: The reference to the tree used to create the iterator outlives the iterator, so
+ // the tree cannot change (except for the value of previous nodes, but those don't affect
+ // the iteration process). By the tree invariant, all nodes are valid.
+ self.next = unsafe { bindings::rb_next(self.next) };
+
+ // SAFETY: By the same reasoning above, it is safe to dereference the node. Additionally,
+ // it is ok to return a reference to members because the iterator must outlive it.
+ Some(unsafe { (&(*cur).key, &mut (*cur).value) })
+ }
+}
+
+/// A memory reservation for a red-black tree node.
+///
+/// It contains the memory needed to hold a node that can be inserted into a red-black tree. One
+/// can be obtained by directly allocating it ([`RBTree::try_reserve_node`]) or by "uninitialising"
+/// ([`RBTreeNode::into_reservation`]) an actual node (usually returned by some operation like
+/// removal from a tree).
+pub struct RBTreeNodeReservation<K, V> {
+ node: Box<MaybeUninit<Node<K, V>>>,
+}
+
+impl<K, V> RBTreeNodeReservation<K, V> {
+ /// Initialises a node reservation.
+ ///
+ /// It then becomes an [`RBTreeNode`] that can be inserted into a tree.
+ pub fn into_node(mut self, key: K, value: V) -> RBTreeNode<K, V> {
+ let node_ptr = self.node.as_mut_ptr();
+ // SAFETY: `node_ptr` is valid, and so are its fields.
+ unsafe { addr_of_mut!((*node_ptr).links).write(bindings::rb_node::default()) };
+ // SAFETY: `node_ptr` is valid, and so are its fields.
+ unsafe { addr_of_mut!((*node_ptr).key).write(key) };
+ // SAFETY: `node_ptr` is valid, and so are its fields.
+ unsafe { addr_of_mut!((*node_ptr).value).write(value) };
+ let raw = Box::into_raw(self.node);
+ RBTreeNode {
+ // SAFETY: The pointer came from a `MaybeUninit<Node>` whose fields have all been
+ // initialised. Additionally, it has the same layout as `Node`.
+ node: unsafe { Box::from_raw(raw as _) },
+ }
+ }
+}
+
+/// A red-black tree node.
+///
+/// The node is fully initialised (with key and value) and can be inserted into a tree without any
+/// extra allocations or failure paths.
+pub struct RBTreeNode<K, V> {
+ node: Box<Node<K, V>>,
+}
+
+impl<K, V> RBTreeNode<K, V> {
+ /// "Uninitialises" a node.
+ ///
+ /// It then becomes a reservation that can be re-initialised into a different node (i.e., with
+ /// a different key and/or value).
+ ///
+ /// The existing key and value are dropped in-place as part of this operation, that is, memory
+ /// may be freed (but only for the key/value; memory for the node itself is kept for reuse).
+ pub fn into_reservation(self) -> RBTreeNodeReservation<K, V> {
+ let raw = Box::into_raw(self.node);
+ let mut ret = RBTreeNodeReservation {
+ // SAFETY: The pointer came from a valid `Node`, which has the same layout as
+ // `MaybeUninit<Node>`.
+ node: unsafe { Box::from_raw(raw as _) },
+ };
+ // SAFETY: Although the type is `MaybeUninit<Node>`, we know it has been initialised
+ // because it came from a `Node`. So it is safe to drop it.
+ unsafe { core::ptr::drop_in_place(ret.node.as_mut_ptr()) };
+ ret
+ }
+}
diff --git a/rust/kernel/revocable.rs b/rust/kernel/revocable.rs
new file mode 100644
index 000000000000..1093c4d26026
--- /dev/null
+++ b/rust/kernel/revocable.rs
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Revocable objects.
+//!
+//! The [`Revocable`] type wraps other types and allows access to them to be revoked. The existence
+//! of a [`RevocableGuard`] ensures that objects remain valid.
+
+use crate::{bindings, sync::rcu};
+use core::{
+ cell::UnsafeCell,
+ marker::PhantomData,
+ mem::MaybeUninit,
+ ops::Deref,
+ ptr::drop_in_place,
+ sync::atomic::{fence, AtomicBool, AtomicU32, Ordering},
+};
+
+/// An object that can become inaccessible at runtime.
+///
+/// Once access is revoked and all concurrent users complete (i.e., all existing instances of
+/// [`RevocableGuard`] are dropped), the wrapped object is also dropped.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::revocable::Revocable;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn add_two(v: &Revocable<Example>) -> Option<u32> {
+/// let guard = v.try_access()?;
+/// Some(guard.a + guard.b)
+/// }
+///
+/// let v = Revocable::new(Example { a: 10, b: 20 });
+/// assert_eq!(add_two(&v), Some(30));
+/// v.revoke();
+/// assert_eq!(add_two(&v), None);
+/// ```
+///
+/// Sample example as above, but explicitly using the rcu read side lock.
+///
+/// ```
+/// # use kernel::revocable::Revocable;
+/// use kernel::sync::rcu;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn add_two(v: &Revocable<Example>) -> Option<u32> {
+/// let guard = rcu::read_lock();
+/// let e = v.try_access_with_guard(&guard)?;
+/// Some(e.a + e.b)
+/// }
+///
+/// let v = Revocable::new(Example { a: 10, b: 20 });
+/// assert_eq!(add_two(&v), Some(30));
+/// v.revoke();
+/// assert_eq!(add_two(&v), None);
+/// ```
+pub struct Revocable<T> {
+ is_available: AtomicBool,
+ data: MaybeUninit<UnsafeCell<T>>,
+}
+
+// SAFETY: `Revocable` is `Send` if the wrapped object is also `Send`. This is because while the
+// functionality exposed by `Revocable` can be accessed from any thread/CPU, it is possible that
+// this isn't supported by the wrapped object.
+unsafe impl<T: Send> Send for Revocable<T> {}
+
+// SAFETY: `Revocable` is `Sync` if the wrapped object is both `Send` and `Sync`. We require `Send`
+// from the wrapped object as well because of `Revocable::revoke`, which can trigger the `Drop`
+// implementation of the wrapped object from an arbitrary thread.
+unsafe impl<T: Sync + Send> Sync for Revocable<T> {}
+
+impl<T> Revocable<T> {
+ /// Creates a new revocable instance of the given data.
+ pub const fn new(data: T) -> Self {
+ Self {
+ is_available: AtomicBool::new(true),
+ data: MaybeUninit::new(UnsafeCell::new(data)),
+ }
+ }
+
+ /// Tries to access the \[revocable\] wrapped object.
+ ///
+ /// Returns `None` if the object has been revoked and is therefore no longer accessible.
+ ///
+ /// Returns a guard that gives access to the object otherwise; the object is guaranteed to
+ /// remain accessible while the guard is alive. In such cases, callers are not allowed to sleep
+ /// because another CPU may be waiting to complete the revocation of this object.
+ pub fn try_access(&self) -> Option<RevocableGuard<'_, T>> {
+ let guard = rcu::read_lock();
+ if self.is_available.load(Ordering::Relaxed) {
+ // SAFETY: Since `self.is_available` is true, data is initialised and has to remain
+ // valid because the RCU read side lock prevents it from being dropped.
+ Some(unsafe { RevocableGuard::new(self.data.assume_init_ref().get(), guard) })
+ } else {
+ None
+ }
+ }
+
+ /// Tries to access the \[revocable\] wrapped object.
+ ///
+ /// Returns `None` if the object has been revoked and is therefore no longer accessible.
+ ///
+ /// Returns a shared reference to the object otherwise; the object is guaranteed to
+ /// remain accessible while the rcu read side guard is alive. In such cases, callers are not
+ /// allowed to sleep because another CPU may be waiting to complete the revocation of this
+ /// object.
+ pub fn try_access_with_guard<'a>(&'a self, _guard: &'a rcu::Guard) -> Option<&'a T> {
+ if self.is_available.load(Ordering::Relaxed) {
+ // SAFETY: Since `self.is_available` is true, data is initialised and has to remain
+ // valid because the RCU read side lock prevents it from being dropped.
+ Some(unsafe { &*self.data.assume_init_ref().get() })
+ } else {
+ None
+ }
+ }
+
+ /// Revokes access to and drops the wrapped object.
+ ///
+ /// Access to the object is revoked immediately to new callers of [`Revocable::try_access`]. If
+ /// there are concurrent users of the object (i.e., ones that called [`Revocable::try_access`]
+ /// beforehand and still haven't dropped the returned guard), this function waits for the
+ /// concurrent access to complete before dropping the wrapped object.
+ pub fn revoke(&self) {
+ if self
+ .is_available
+ .compare_exchange(true, false, Ordering::Relaxed, Ordering::Relaxed)
+ .is_ok()
+ {
+ // SAFETY: Just an FFI call, there are no further requirements.
+ unsafe { bindings::synchronize_rcu() };
+
+ // SAFETY: We know `self.data` is valid because only one CPU can succeed the
+ // `compare_exchange` above that takes `is_available` from `true` to `false`.
+ unsafe { drop_in_place(self.data.assume_init_ref().get()) };
+ }
+ }
+}
+
+impl<T> Drop for Revocable<T> {
+ fn drop(&mut self) {
+ // Drop only if the data hasn't been revoked yet (in which case it has already been
+ // dropped).
+ if *self.is_available.get_mut() {
+ // SAFETY: We know `self.data` is valid because no other CPU has changed
+ // `is_available` to `false` yet, and no other CPU can do it anymore because this CPU
+ // holds the only reference (mutable) to `self` now.
+ unsafe { drop_in_place(self.data.assume_init_ref().get()) };
+ }
+ }
+}
+
+/// A guard that allows access to a revocable object and keeps it alive.
+///
+/// CPUs may not sleep while holding on to [`RevocableGuard`] because it's in atomic context
+/// holding the RCU read-side lock.
+///
+/// # Invariants
+///
+/// The RCU read-side lock is held while the guard is alive.
+pub struct RevocableGuard<'a, T> {
+ data_ref: *const T,
+ _rcu_guard: rcu::Guard,
+ _p: PhantomData<&'a ()>,
+}
+
+impl<T> RevocableGuard<'_, T> {
+ fn new(data_ref: *const T, rcu_guard: rcu::Guard) -> Self {
+ Self {
+ data_ref,
+ _rcu_guard: rcu_guard,
+ _p: PhantomData,
+ }
+ }
+}
+
+impl<T> Deref for RevocableGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: By the type invariants, we hold the rcu read-side lock, so the object is
+ // guaranteed to remain valid.
+ unsafe { &*self.data_ref }
+ }
+}
+
+/// An object that can become inaccessible at runtime.
+///
+/// Once access is revoked and all concurrent users complete (i.e., all existing instances of
+/// [`AsyncRevocableGuard`] are dropped), the wrapped object is also dropped.
+///
+/// Unlike [`Revocable`], [`AsyncRevocable`] does not wait for concurrent users of the wrapped
+/// object to finish before [`AsyncRevocable::revoke`] completes -- thus the async qualifier. This
+/// has the advantage of not requiring RCU locks or waits of any kind.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::revocable::AsyncRevocable;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn add_two(v: &AsyncRevocable<Example>) -> Option<u32> {
+/// let guard = v.try_access()?;
+/// Some(guard.a + guard.b)
+/// }
+///
+/// let v = AsyncRevocable::new(Example { a: 10, b: 20 });
+/// assert_eq!(add_two(&v), Some(30));
+/// v.revoke();
+/// assert_eq!(add_two(&v), None);
+/// ```
+///
+/// Example where revocation happens while there is a user:
+///
+/// ```
+/// # use kernel::revocable::AsyncRevocable;
+/// use core::sync::atomic::{AtomicBool, Ordering};
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// static DROPPED: AtomicBool = AtomicBool::new(false);
+///
+/// impl Drop for Example {
+/// fn drop(&mut self) {
+/// DROPPED.store(true, Ordering::Relaxed);
+/// }
+/// }
+///
+/// fn add_two(v: &AsyncRevocable<Example>) -> Option<u32> {
+/// let guard = v.try_access()?;
+/// Some(guard.a + guard.b)
+/// }
+///
+/// let v = AsyncRevocable::new(Example { a: 10, b: 20 });
+/// assert_eq!(add_two(&v), Some(30));
+///
+/// let guard = v.try_access().unwrap();
+/// assert!(!v.is_revoked());
+/// assert!(!DROPPED.load(Ordering::Relaxed));
+/// v.revoke();
+/// assert!(!DROPPED.load(Ordering::Relaxed));
+/// assert!(v.is_revoked());
+/// assert!(v.try_access().is_none());
+/// assert_eq!(guard.a + guard.b, 30);
+/// drop(guard);
+/// assert!(DROPPED.load(Ordering::Relaxed));
+/// ```
+pub struct AsyncRevocable<T> {
+ usage_count: AtomicU32,
+ data: MaybeUninit<UnsafeCell<T>>,
+}
+
+// SAFETY: `AsyncRevocable` is `Send` if the wrapped object is also `Send`. This is because while
+// the functionality exposed by `AsyncRevocable` can be accessed from any thread/CPU, it is
+// possible that this isn't supported by the wrapped object.
+unsafe impl<T: Send> Send for AsyncRevocable<T> {}
+
+// SAFETY: `AsyncRevocable` is `Sync` if the wrapped object is both `Send` and `Sync`. We require
+// `Send` from the wrapped object as well because of `AsyncRevocable::revoke`, which can trigger
+// the `Drop` implementation of the wrapped object from an arbitrary thread.
+unsafe impl<T: Sync + Send> Sync for AsyncRevocable<T> {}
+
+const REVOKED: u32 = 0x80000000;
+const COUNT_MASK: u32 = !REVOKED;
+const SATURATED_COUNT: u32 = REVOKED - 1;
+
+impl<T> AsyncRevocable<T> {
+ /// Creates a new asynchronously revocable instance of the given data.
+ pub fn new(data: T) -> Self {
+ Self {
+ usage_count: AtomicU32::new(0),
+ data: MaybeUninit::new(UnsafeCell::new(data)),
+ }
+ }
+
+ /// Tries to access the \[revocable\] wrapped object.
+ ///
+ /// Returns `None` if the object has been revoked and is therefore no longer accessible.
+ ///
+ /// Returns a guard that gives access to the object otherwise; the object is guaranteed to
+ /// remain accessible while the guard is alive.
+ pub fn try_access(&self) -> Option<AsyncRevocableGuard<'_, T>> {
+ loop {
+ let count = self.usage_count.load(Ordering::Relaxed);
+
+ // Fail attempt to access if the object is already revoked.
+ if count & REVOKED != 0 {
+ return None;
+ }
+
+ // No need to increment if the count is saturated.
+ if count == SATURATED_COUNT
+ || self
+ .usage_count
+ .compare_exchange(count, count + 1, Ordering::Relaxed, Ordering::Relaxed)
+ .is_ok()
+ {
+ return Some(AsyncRevocableGuard { revocable: self });
+ }
+ }
+ }
+
+ /// Revokes access to the protected object.
+ ///
+ /// Returns `true` if access has been revoked, or `false` when the object has already been
+ /// revoked by a previous call to [`AsyncRevocable::revoke`].
+ ///
+ /// This call is non-blocking, that is, no new users of the revocable object will be allowed,
+ /// but potential current users are able to continue to use it and the thread won't wait for
+ /// them to finish. In such cases, the object will be dropped when the last user completes.
+ pub fn revoke(&self) -> bool {
+ // Set the `REVOKED` bit.
+ //
+ // The acquire barrier matches up with the release when decrementing the usage count.
+ let prev = self.usage_count.fetch_or(REVOKED, Ordering::Acquire);
+ if prev & REVOKED != 0 {
+ // Another thread already revoked this object.
+ return false;
+ }
+
+ if prev == 0 {
+ // SAFETY: This thread just revoked the object and the usage count is zero, so the
+ // object is valid and there will be no future users.
+ unsafe { drop_in_place(UnsafeCell::raw_get(self.data.as_ptr())) };
+ }
+
+ true
+ }
+
+ /// Returns whether access to the object has been revoked.
+ pub fn is_revoked(&self) -> bool {
+ self.usage_count.load(Ordering::Relaxed) & REVOKED != 0
+ }
+}
+
+impl<T> Drop for AsyncRevocable<T> {
+ fn drop(&mut self) {
+ let count = *self.usage_count.get_mut();
+ if count != REVOKED {
+ // The object hasn't been dropped yet, so we do it now.
+
+ // This matches with the release when decrementing the usage count.
+ fence(Ordering::Acquire);
+
+ // SAFETY: Since `count` is does not indicate a count of 0 and the REVOKED bit set, the
+ // object is still valid.
+ unsafe { drop_in_place(UnsafeCell::raw_get(self.data.as_ptr())) };
+ }
+ }
+}
+
+/// A guard that allows access to a revocable object and keeps it alive.
+///
+/// # Invariants
+///
+/// The owner owns an increment on the usage count (which may have saturated it), which keeps the
+/// revocable object alive.
+pub struct AsyncRevocableGuard<'a, T> {
+ revocable: &'a AsyncRevocable<T>,
+}
+
+impl<T> Deref for AsyncRevocableGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The type invariants guarantee that the caller owns an increment.
+ unsafe { &*self.revocable.data.assume_init_ref().get() }
+ }
+}
+
+impl<T> Drop for AsyncRevocableGuard<'_, T> {
+ fn drop(&mut self) {
+ loop {
+ let count = self.revocable.usage_count.load(Ordering::Relaxed);
+ let actual_count = count & COUNT_MASK;
+ if actual_count == SATURATED_COUNT {
+ // The count is saturated, so we won't decrement (nor do we drop the object).
+ return;
+ }
+
+ if actual_count == 0 {
+ // Trying to underflow the count.
+ panic!("actual_count is zero");
+ }
+
+ // On success, we use release ordering, which matches with the acquire in one of the
+ // places where we drop the object, namely: below, in `AsyncRevocable::revoke`, or in
+ // `AsyncRevocable::drop`.
+ if self
+ .revocable
+ .usage_count
+ .compare_exchange(count, count - 1, Ordering::Release, Ordering::Relaxed)
+ .is_ok()
+ {
+ if count == 1 | REVOKED {
+ // `count` is now zero and it is revoked, so free it now.
+
+ // This matches with the release above (which may have happened in other
+ // threads concurrently).
+ fence(Ordering::Acquire);
+
+ // SAFETY: Since `count` was 1, the object is still alive.
+ unsafe { drop_in_place(UnsafeCell::raw_get(self.revocable.data.as_ptr())) };
+ }
+
+ return;
+ }
+ }
+ }
+}
diff --git a/rust/kernel/security.rs b/rust/kernel/security.rs
new file mode 100644
index 000000000000..0a33363289d3
--- /dev/null
+++ b/rust/kernel/security.rs
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Linux Security Modules (LSM).
+//!
+//! C header: [`include/linux/security.h`](../../../../include/linux/security.h).
+
+use crate::{bindings, cred::Credential, file::File, to_result, Result};
+
+/// Calls the security modules to determine if the given task can become the manager of a binder
+/// context.
+pub fn binder_set_context_mgr(mgr: &Credential) -> Result {
+ // SAFETY: `mrg.0` is valid because the shared reference guarantees a nonzero refcount.
+ to_result(unsafe { bindings::security_binder_set_context_mgr(mgr.0.get()) })
+}
+
+/// Calls the security modules to determine if binder transactions are allowed from task `from` to
+/// task `to`.
+pub fn binder_transaction(from: &Credential, to: &Credential) -> Result {
+ // SAFETY: `from` and `to` are valid because the shared references guarantee nonzero refcounts.
+ to_result(unsafe { bindings::security_binder_transaction(from.0.get(), to.0.get()) })
+}
+
+/// Calls the security modules to determine if task `from` is allowed to send binder objects
+/// (owned by itself or other processes) to task `to` through a binder transaction.
+pub fn binder_transfer_binder(from: &Credential, to: &Credential) -> Result {
+ // SAFETY: `from` and `to` are valid because the shared references guarantee nonzero refcounts.
+ to_result(unsafe { bindings::security_binder_transfer_binder(from.0.get(), to.0.get()) })
+}
+
+/// Calls the security modules to determine if task `from` is allowed to send the given file to
+/// task `to` (which would get its own file descriptor) through a binder transaction.
+pub fn binder_transfer_file(from: &Credential, to: &Credential, file: &File) -> Result {
+ // SAFETY: `from`, `to` and `file` are valid because the shared references guarantee nonzero
+ // refcounts.
+ to_result(unsafe {
+ bindings::security_binder_transfer_file(from.0.get(), to.0.get(), file.0.get())
+ })
+}
diff --git a/rust/kernel/static_assert.rs b/rust/kernel/static_assert.rs
new file mode 100644
index 000000000000..3115ee0ba8e9
--- /dev/null
+++ b/rust/kernel/static_assert.rs
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Static assert.
+
+/// Static assert (i.e. compile-time assert).
+///
+/// Similar to C11 [`_Static_assert`] and C++11 [`static_assert`].
+///
+/// The feature may be added to Rust in the future: see [RFC 2790].
+///
+/// [`_Static_assert`]: https://en.cppreference.com/w/c/language/_Static_assert
+/// [`static_assert`]: https://en.cppreference.com/w/cpp/language/static_assert
+/// [RFC 2790]: https://github.com/rust-lang/rfcs/issues/2790
+///
+/// # Examples
+///
+/// ```
+/// static_assert!(42 > 24);
+/// static_assert!(core::mem::size_of::<u8>() == 1);
+///
+/// const X: &[u8] = b"bar";
+/// static_assert!(X[1] == b'a');
+///
+/// const fn f(x: i32) -> i32 {
+/// x + 2
+/// }
+/// static_assert!(f(40) == 42);
+/// ```
+#[macro_export]
+macro_rules! static_assert {
+ ($condition:expr) => {
+ const _: () = core::assert!($condition);
+ };
+}
diff --git a/rust/kernel/std_vendor.rs b/rust/kernel/std_vendor.rs
new file mode 100644
index 000000000000..ffbaca0c4cb7
--- /dev/null
+++ b/rust/kernel/std_vendor.rs
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: Apache-2.0 OR MIT
+
+//! The contents of this file come from the Rust standard library, hosted in
+//! the <https://github.com/rust-lang/rust> repository, licensed under
+//! "Apache-2.0 OR MIT" and adapted for kernel use. For copyright details,
+//! see <https://github.com/rust-lang/rust/blob/master/COPYRIGHT>.
+
+/// [`std::dbg`], but using [`pr_info`] instead of [`eprintln`].
+///
+/// Prints and returns the value of a given expression for quick and dirty
+/// debugging.
+///
+/// An example:
+///
+/// ```rust
+/// let a = 2;
+/// # #[allow(clippy::dbg_macro)]
+/// let b = dbg!(a * 2) + 1;
+/// // ^-- prints: [src/main.rs:2] a * 2 = 4
+/// assert_eq!(b, 5);
+/// ```
+///
+/// The macro works by using the `Debug` implementation of the type of
+/// the given expression to print the value with [`printk`] along with the
+/// source location of the macro invocation as well as the source code
+/// of the expression.
+///
+/// Invoking the macro on an expression moves and takes ownership of it
+/// before returning the evaluated expression unchanged. If the type
+/// of the expression does not implement `Copy` and you don't want
+/// to give up ownership, you can instead borrow with `dbg!(&expr)`
+/// for some expression `expr`.
+///
+/// The `dbg!` macro works exactly the same in release builds.
+/// This is useful when debugging issues that only occur in release
+/// builds or when debugging in release mode is significantly faster.
+///
+/// Note that the macro is intended as a debugging tool and therefore you
+/// should avoid having uses of it in version control for long periods
+/// (other than in tests and similar).
+///
+/// # Stability
+///
+/// The exact output printed by this macro should not be relied upon
+/// and is subject to future changes.
+///
+/// # Further examples
+///
+/// With a method call:
+///
+/// ```rust
+/// # #[allow(clippy::dbg_macro)]
+/// fn foo(n: usize) {
+/// if dbg!(n.checked_sub(4)).is_some() {
+/// // ...
+/// }
+/// }
+///
+/// foo(3)
+/// ```
+///
+/// This prints to the kernel log:
+///
+/// ```text,ignore
+/// [src/main.rs:4] n.checked_sub(4) = None
+/// ```
+///
+/// Naive factorial implementation:
+///
+/// ```rust
+/// # #[allow(clippy::dbg_macro)]
+/// # {
+/// fn factorial(n: u32) -> u32 {
+/// if dbg!(n <= 1) {
+/// dbg!(1)
+/// } else {
+/// dbg!(n * factorial(n - 1))
+/// }
+/// }
+///
+/// dbg!(factorial(4));
+/// # }
+/// ```
+///
+/// This prints to the kernel log:
+///
+/// ```text,ignore
+/// [src/main.rs:3] n <= 1 = false
+/// [src/main.rs:3] n <= 1 = false
+/// [src/main.rs:3] n <= 1 = false
+/// [src/main.rs:3] n <= 1 = true
+/// [src/main.rs:4] 1 = 1
+/// [src/main.rs:5] n * factorial(n - 1) = 2
+/// [src/main.rs:5] n * factorial(n - 1) = 6
+/// [src/main.rs:5] n * factorial(n - 1) = 24
+/// [src/main.rs:11] factorial(4) = 24
+/// ```
+///
+/// The `dbg!(..)` macro moves the input:
+///
+// TODO: Could be `compile_fail` when supported.
+/// ```ignore
+/// /// A wrapper around `usize` which importantly is not Copyable.
+/// #[derive(Debug)]
+/// struct NoCopy(usize);
+///
+/// let a = NoCopy(42);
+/// let _ = dbg!(a); // <-- `a` is moved here.
+/// let _ = dbg!(a); // <-- `a` is moved again; error!
+/// ```
+///
+/// You can also use `dbg!()` without a value to just print the
+/// file and line whenever it's reached.
+///
+/// Finally, if you want to `dbg!(..)` multiple values, it will treat them as
+/// a tuple (and return it, too):
+///
+/// ```
+/// # #[allow(clippy::dbg_macro)]
+/// assert_eq!(dbg!(1usize, 2u32), (1, 2));
+/// ```
+///
+/// However, a single argument with a trailing comma will still not be treated
+/// as a tuple, following the convention of ignoring trailing commas in macro
+/// invocations. You can use a 1-tuple directly if you need one:
+///
+/// ```
+/// # #[allow(clippy::dbg_macro)]
+/// # {
+/// assert_eq!(1, dbg!(1u32,)); // trailing comma ignored
+/// assert_eq!((1,), dbg!((1u32,))); // 1-tuple
+/// # }
+/// ```
+///
+/// [`std::dbg`]: https://doc.rust-lang.org/std/macro.dbg.html
+/// [`eprintln`]: https://doc.rust-lang.org/std/macro.eprintln.html
+/// [`printk`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html
+#[macro_export]
+macro_rules! dbg {
+ // NOTE: We cannot use `concat!` to make a static string as a format argument
+ // of `pr_info!` because `file!` could contain a `{` or
+ // `$val` expression could be a block (`{ .. }`), in which case the `pr_info!`
+ // will be malformed.
+ () => {
+ $crate::pr_info!("[{}:{}]\n", ::core::file!(), ::core::line!())
+ };
+ ($val:expr $(,)?) => {
+ // Use of `match` here is intentional because it affects the lifetimes
+ // of temporaries - https://stackoverflow.com/a/48732525/1063961
+ match $val {
+ tmp => {
+ $crate::pr_info!("[{}:{}] {} = {:#?}\n",
+ ::core::file!(), ::core::line!(), ::core::stringify!($val), &tmp);
+ tmp
+ }
+ }
+ };
+ ($($val:expr),+ $(,)?) => {
+ ($($crate::dbg!($val)),+,)
+ };
+}
diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs
new file mode 100644
index 000000000000..874003e39cba
--- /dev/null
+++ b/rust/kernel/str.rs
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! String representations.
+
+use alloc::vec::Vec;
+use core::fmt::{self, Write};
+use core::ops::{self, Deref, Index};
+
+use crate::{bindings, error::code::*, Error};
+
+/// Byte string without UTF-8 validity guarantee.
+///
+/// `BStr` is simply an alias to `[u8]`, but has a more evident semantical meaning.
+pub type BStr = [u8];
+
+/// Creates a new [`BStr`] from a string literal.
+///
+/// `b_str!` converts the supplied string literal to byte string, so non-ASCII
+/// characters can be included.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::b_str;
+/// # use kernel::str::BStr;
+/// const MY_BSTR: &BStr = b_str!("My awesome BStr!");
+/// ```
+#[macro_export]
+macro_rules! b_str {
+ ($str:literal) => {{
+ const S: &'static str = $str;
+ const C: &'static $crate::str::BStr = S.as_bytes();
+ C
+ }};
+}
+
+/// Possible errors when using conversion functions in [`CStr`].
+#[derive(Debug, Clone, Copy)]
+pub enum CStrConvertError {
+ /// Supplied bytes contain an interior `NUL`.
+ InteriorNul,
+
+ /// Supplied bytes are not terminated by `NUL`.
+ NotNulTerminated,
+}
+
+impl From<CStrConvertError> for Error {
+ #[inline]
+ fn from(_: CStrConvertError) -> Error {
+ EINVAL
+ }
+}
+
+/// A string that is guaranteed to have exactly one `NUL` byte, which is at the
+/// end.
+///
+/// Used for interoperability with kernel APIs that take C strings.
+#[repr(transparent)]
+pub struct CStr([u8]);
+
+impl CStr {
+ /// Returns the length of this string excluding `NUL`.
+ #[inline]
+ pub const fn len(&self) -> usize {
+ self.len_with_nul() - 1
+ }
+
+ /// Returns the length of this string with `NUL`.
+ #[inline]
+ pub const fn len_with_nul(&self) -> usize {
+ // SAFETY: This is one of the invariant of `CStr`.
+ // We add a `unreachable_unchecked` here to hint the optimizer that
+ // the value returned from this function is non-zero.
+ if self.0.is_empty() {
+ unsafe { core::hint::unreachable_unchecked() };
+ }
+ self.0.len()
+ }
+
+ /// Returns `true` if the string only includes `NUL`.
+ #[inline]
+ pub const fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Wraps a raw C string pointer.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must be a valid pointer to a `NUL`-terminated C string, and it must
+ /// last at least `'a`. When `CStr` is alive, the memory pointed by `ptr`
+ /// must not be mutated.
+ #[inline]
+ pub unsafe fn from_char_ptr<'a>(ptr: *const core::ffi::c_char) -> &'a Self {
+ // SAFETY: The safety precondition guarantees `ptr` is a valid pointer
+ // to a `NUL`-terminated C string.
+ let len = unsafe { bindings::strlen(ptr) } + 1;
+ // SAFETY: Lifetime guaranteed by the safety precondition.
+ let bytes = unsafe { core::slice::from_raw_parts(ptr as _, len as _) };
+ // SAFETY: As `len` is returned by `strlen`, `bytes` does not contain interior `NUL`.
+ // As we have added 1 to `len`, the last byte is known to be `NUL`.
+ unsafe { Self::from_bytes_with_nul_unchecked(bytes) }
+ }
+
+ /// Creates a [`CStr`] from a `[u8]`.
+ ///
+ /// The provided slice must be `NUL`-terminated, does not contain any
+ /// interior `NUL` bytes.
+ pub const fn from_bytes_with_nul(bytes: &[u8]) -> Result<&Self, CStrConvertError> {
+ if bytes.is_empty() {
+ return Err(CStrConvertError::NotNulTerminated);
+ }
+ if bytes[bytes.len() - 1] != 0 {
+ return Err(CStrConvertError::NotNulTerminated);
+ }
+ let mut i = 0;
+ // `i + 1 < bytes.len()` allows LLVM to optimize away bounds checking,
+ // while it couldn't optimize away bounds checks for `i < bytes.len() - 1`.
+ while i + 1 < bytes.len() {
+ if bytes[i] == 0 {
+ return Err(CStrConvertError::InteriorNul);
+ }
+ i += 1;
+ }
+ // SAFETY: We just checked that all properties hold.
+ Ok(unsafe { Self::from_bytes_with_nul_unchecked(bytes) })
+ }
+
+ /// Creates a [`CStr`] from a `[u8]`, panic if input is not valid.
+ ///
+ /// This function is only meant to be used by `c_str!` macro, so
+ /// crates using `c_str!` macro don't have to enable `const_panic` feature.
+ #[doc(hidden)]
+ pub const fn from_bytes_with_nul_unwrap(bytes: &[u8]) -> &Self {
+ match Self::from_bytes_with_nul(bytes) {
+ Ok(v) => v,
+ Err(_) => panic!("string contains interior NUL"),
+ }
+ }
+
+ /// Creates a [`CStr`] from a `[u8]` without performing any additional
+ /// checks.
+ ///
+ /// # Safety
+ ///
+ /// `bytes` *must* end with a `NUL` byte, and should only have a single
+ /// `NUL` byte (or the string will be truncated).
+ #[inline]
+ pub const unsafe fn from_bytes_with_nul_unchecked(bytes: &[u8]) -> &CStr {
+ // SAFETY: Properties of `bytes` guaranteed by the safety precondition.
+ unsafe { core::mem::transmute(bytes) }
+ }
+
+ /// Returns a C pointer to the string.
+ #[inline]
+ pub const fn as_char_ptr(&self) -> *const core::ffi::c_char {
+ self.0.as_ptr() as _
+ }
+
+ /// Convert the string to a byte slice without the trailing 0 byte.
+ #[inline]
+ pub fn as_bytes(&self) -> &[u8] {
+ &self.0[..self.len()]
+ }
+
+ /// Convert the string to a byte slice containing the trailing 0 byte.
+ #[inline]
+ pub const fn as_bytes_with_nul(&self) -> &[u8] {
+ &self.0
+ }
+
+ /// Yields a [`&str`] slice if the [`CStr`] contains valid UTF-8.
+ ///
+ /// If the contents of the [`CStr`] are valid UTF-8 data, this
+ /// function will return the corresponding [`&str`] slice. Otherwise,
+ /// it will return an error with details of where UTF-8 validation failed.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::str::CStr;
+ /// let cstr = CStr::from_bytes_with_nul(b"foo\0").unwrap();
+ /// assert_eq!(cstr.to_str(), Ok("foo"));
+ /// ```
+ #[inline]
+ pub fn to_str(&self) -> Result<&str, core::str::Utf8Error> {
+ core::str::from_utf8(self.as_bytes())
+ }
+
+ /// Unsafely convert this [`CStr`] into a [`&str`], without checking for
+ /// valid UTF-8.
+ ///
+ /// # Safety
+ ///
+ /// The contents must be valid UTF-8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use kernel::c_str;
+ /// # use kernel::str::CStr;
+ /// // SAFETY: String literals are guaranteed to be valid UTF-8
+ /// // by the Rust compiler.
+ /// let bar = c_str!("ツ");
+ /// assert_eq!(unsafe { bar.as_str_unchecked() }, "ツ");
+ /// ```
+ #[inline]
+ pub unsafe fn as_str_unchecked(&self) -> &str {
+ unsafe { core::str::from_utf8_unchecked(self.as_bytes()) }
+ }
+}
+
+impl fmt::Display for CStr {
+ /// Formats printable ASCII characters, escaping the rest.
+ ///
+ /// ```
+ /// # use kernel::c_str;
+ /// # use kernel::str::CStr;
+ /// # use kernel::str::CString;
+ /// let penguin = c_str!("🐧");
+ /// let s = CString::try_from_fmt(fmt!("{}", penguin)).unwrap();
+ /// assert_eq!(s.as_bytes_with_nul(), "\\xf0\\x9f\\x90\\xa7\0".as_bytes());
+ ///
+ /// let ascii = c_str!("so \"cool\"");
+ /// let s = CString::try_from_fmt(fmt!("{}", ascii)).unwrap();
+ /// assert_eq!(s.as_bytes_with_nul(), "so \"cool\"\0".as_bytes());
+ /// ```
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ for &c in self.as_bytes() {
+ if (0x20..0x7f).contains(&c) {
+ // Printable character.
+ f.write_char(c as char)?;
+ } else {
+ write!(f, "\\x{:02x}", c)?;
+ }
+ }
+ Ok(())
+ }
+}
+
+impl fmt::Debug for CStr {
+ /// Formats printable ASCII characters with a double quote on either end, escaping the rest.
+ ///
+ /// ```
+ /// # use kernel::c_str;
+ /// # use kernel::str::CStr;
+ /// # use kernel::str::CString;
+ /// let penguin = c_str!("🐧");
+ /// let s = CString::try_from_fmt(fmt!("{:?}", penguin)).unwrap();
+ /// assert_eq!(s.as_bytes_with_nul(), "\"\\xf0\\x9f\\x90\\xa7\"\0".as_bytes());
+ ///
+ /// // Embedded double quotes are escaped.
+ /// let ascii = c_str!("so \"cool\"");
+ /// let s = CString::try_from_fmt(fmt!("{:?}", ascii)).unwrap();
+ /// assert_eq!(s.as_bytes_with_nul(), "\"so \\\"cool\\\"\"\0".as_bytes());
+ /// ```
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("\"")?;
+ for &c in self.as_bytes() {
+ match c {
+ // Printable characters.
+ b'\"' => f.write_str("\\\"")?,
+ 0x20..=0x7e => f.write_char(c as char)?,
+ _ => write!(f, "\\x{:02x}", c)?,
+ }
+ }
+ f.write_str("\"")
+ }
+}
+
+impl AsRef<BStr> for CStr {
+ #[inline]
+ fn as_ref(&self) -> &BStr {
+ self.as_bytes()
+ }
+}
+
+impl Deref for CStr {
+ type Target = BStr;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ self.as_bytes()
+ }
+}
+
+impl Index<ops::RangeFrom<usize>> for CStr {
+ type Output = CStr;
+
+ #[inline]
+ fn index(&self, index: ops::RangeFrom<usize>) -> &Self::Output {
+ // Delegate bounds checking to slice.
+ // Assign to _ to mute clippy's unnecessary operation warning.
+ let _ = &self.as_bytes()[index.start..];
+ // SAFETY: We just checked the bounds.
+ unsafe { Self::from_bytes_with_nul_unchecked(&self.0[index.start..]) }
+ }
+}
+
+impl Index<ops::RangeFull> for CStr {
+ type Output = CStr;
+
+ #[inline]
+ fn index(&self, _index: ops::RangeFull) -> &Self::Output {
+ self
+ }
+}
+
+mod private {
+ use core::ops;
+
+ // Marker trait for index types that can be forward to `BStr`.
+ pub trait CStrIndex {}
+
+ impl CStrIndex for usize {}
+ impl CStrIndex for ops::Range<usize> {}
+ impl CStrIndex for ops::RangeInclusive<usize> {}
+ impl CStrIndex for ops::RangeToInclusive<usize> {}
+}
+
+impl<Idx> Index<Idx> for CStr
+where
+ Idx: private::CStrIndex,
+ BStr: Index<Idx>,
+{
+ type Output = <BStr as Index<Idx>>::Output;
+
+ #[inline]
+ fn index(&self, index: Idx) -> &Self::Output {
+ &self.as_bytes()[index]
+ }
+}
+
+/// Creates a new [`CStr`] from a string literal.
+///
+/// The string literal should not contain any `NUL` bytes.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::c_str;
+/// # use kernel::str::CStr;
+/// const MY_CSTR: &CStr = c_str!("My awesome CStr!");
+/// ```
+#[macro_export]
+macro_rules! c_str {
+ ($str:expr) => {{
+ const S: &str = concat!($str, "\0");
+ const C: &$crate::str::CStr = $crate::str::CStr::from_bytes_with_nul_unwrap(S.as_bytes());
+ C
+ }};
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_cstr_to_str() {
+ let good_bytes = b"\xf0\x9f\xa6\x80\0";
+ let checked_cstr = CStr::from_bytes_with_nul(good_bytes).unwrap();
+ let checked_str = checked_cstr.to_str().unwrap();
+ assert_eq!(checked_str, "🦀");
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_cstr_to_str_panic() {
+ let bad_bytes = b"\xc3\x28\0";
+ let checked_cstr = CStr::from_bytes_with_nul(bad_bytes).unwrap();
+ checked_cstr.to_str().unwrap();
+ }
+
+ #[test]
+ fn test_cstr_as_str_unchecked() {
+ let good_bytes = b"\xf0\x9f\x90\xA7\0";
+ let checked_cstr = CStr::from_bytes_with_nul(good_bytes).unwrap();
+ let unchecked_str = unsafe { checked_cstr.as_str_unchecked() };
+ assert_eq!(unchecked_str, "🐧");
+ }
+}
+
+/// Allows formatting of [`fmt::Arguments`] into a raw buffer.
+///
+/// It does not fail if callers write past the end of the buffer so that they can calculate the
+/// size required to fit everything.
+///
+/// # Invariants
+///
+/// The memory region between `pos` (inclusive) and `end` (exclusive) is valid for writes if `pos`
+/// is less than `end`.
+pub(crate) struct RawFormatter {
+ // Use `usize` to use `saturating_*` functions.
+ beg: usize,
+ pos: usize,
+ end: usize,
+}
+
+impl RawFormatter {
+ /// Creates a new instance of [`RawFormatter`] with an empty buffer.
+ fn new() -> Self {
+ // INVARIANT: The buffer is empty, so the region that needs to be writable is empty.
+ Self {
+ beg: 0,
+ pos: 0,
+ end: 0,
+ }
+ }
+
+ /// Creates a new instance of [`RawFormatter`] with the given buffer pointers.
+ ///
+ /// # Safety
+ ///
+ /// If `pos` is less than `end`, then the region between `pos` (inclusive) and `end`
+ /// (exclusive) must be valid for writes for the lifetime of the returned [`RawFormatter`].
+ pub(crate) unsafe fn from_ptrs(pos: *mut u8, end: *mut u8) -> Self {
+ // INVARIANT: The safety requierments guarantee the type invariants.
+ Self {
+ beg: pos as _,
+ pos: pos as _,
+ end: end as _,
+ }
+ }
+
+ /// Creates a new instance of [`RawFormatter`] with the given buffer.
+ ///
+ /// # Safety
+ ///
+ /// The memory region starting at `buf` and extending for `len` bytes must be valid for writes
+ /// for the lifetime of the returned [`RawFormatter`].
+ pub(crate) unsafe fn from_buffer(buf: *mut u8, len: usize) -> Self {
+ let pos = buf as usize;
+ // INVARIANT: We ensure that `end` is never less then `buf`, and the safety requirements
+ // guarantees that the memory region is valid for writes.
+ Self {
+ pos,
+ beg: pos,
+ end: pos.saturating_add(len),
+ }
+ }
+
+ /// Returns the current insert position.
+ ///
+ /// N.B. It may point to invalid memory.
+ pub(crate) fn pos(&self) -> *mut u8 {
+ self.pos as _
+ }
+
+ /// Return the number of bytes written to the formatter.
+ pub(crate) fn bytes_written(&self) -> usize {
+ self.pos - self.beg
+ }
+}
+
+impl fmt::Write for RawFormatter {
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ // `pos` value after writing `len` bytes. This does not have to be bounded by `end`, but we
+ // don't want it to wrap around to 0.
+ let pos_new = self.pos.saturating_add(s.len());
+
+ // Amount that we can copy. `saturating_sub` ensures we get 0 if `pos` goes past `end`.
+ let len_to_copy = core::cmp::min(pos_new, self.end).saturating_sub(self.pos);
+
+ if len_to_copy > 0 {
+ // SAFETY: If `len_to_copy` is non-zero, then we know `pos` has not gone past `end`
+ // yet, so it is valid for write per the type invariants.
+ unsafe {
+ core::ptr::copy_nonoverlapping(
+ s.as_bytes().as_ptr(),
+ self.pos as *mut u8,
+ len_to_copy,
+ )
+ };
+ }
+
+ self.pos = pos_new;
+ Ok(())
+ }
+}
+
+/// Allows formatting of [`fmt::Arguments`] into a raw buffer.
+///
+/// Fails if callers attempt to write more than will fit in the buffer.
+pub(crate) struct Formatter(RawFormatter);
+
+impl Formatter {
+ /// Creates a new instance of [`Formatter`] with the given buffer.
+ ///
+ /// # Safety
+ ///
+ /// The memory region starting at `buf` and extending for `len` bytes must be valid for writes
+ /// for the lifetime of the returned [`Formatter`].
+ pub(crate) unsafe fn from_buffer(buf: *mut u8, len: usize) -> Self {
+ // SAFETY: The safety requirements of this function satisfy those of the callee.
+ Self(unsafe { RawFormatter::from_buffer(buf, len) })
+ }
+}
+
+impl Deref for Formatter {
+ type Target = RawFormatter;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl fmt::Write for Formatter {
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ self.0.write_str(s)?;
+
+ // Fail the request if we go past the end of the buffer.
+ if self.0.pos > self.0.end {
+ Err(fmt::Error)
+ } else {
+ Ok(())
+ }
+ }
+}
+
+/// An owned string that is guaranteed to have exactly one `NUL` byte, which is at the end.
+///
+/// Used for interoperability with kernel APIs that take C strings.
+///
+/// # Invariants
+///
+/// The string is always `NUL`-terminated and contains no other `NUL` bytes.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::str::CString;
+///
+/// let s = CString::try_from_fmt(fmt!("{}{}{}", "abc", 10, 20)).unwrap();
+/// assert_eq!(s.as_bytes_with_nul(), "abc1020\0".as_bytes());
+///
+/// let tmp = "testing";
+/// let s = CString::try_from_fmt(fmt!("{tmp}{}", 123)).unwrap();
+/// assert_eq!(s.as_bytes_with_nul(), "testing123\0".as_bytes());
+///
+/// // This fails because it has an embedded `NUL` byte.
+/// let s = CString::try_from_fmt(fmt!("a\0b{}", 123));
+/// assert_eq!(s.is_ok(), false);
+/// ```
+pub struct CString {
+ buf: Vec<u8>,
+}
+
+impl CString {
+ /// Creates an instance of [`CString`] from the given formatted arguments.
+ pub fn try_from_fmt(args: fmt::Arguments<'_>) -> Result<Self, Error> {
+ // Calculate the size needed (formatted string plus `NUL` terminator).
+ let mut f = RawFormatter::new();
+ f.write_fmt(args)?;
+ f.write_str("\0")?;
+ let size = f.bytes_written();
+
+ // Allocate a vector with the required number of bytes, and write to it.
+ let mut buf = Vec::try_with_capacity(size)?;
+ // SAFETY: The buffer stored in `buf` is at least of size `size` and is valid for writes.
+ let mut f = unsafe { Formatter::from_buffer(buf.as_mut_ptr(), size) };
+ f.write_fmt(args)?;
+ f.write_str("\0")?;
+
+ // SAFETY: The number of bytes that can be written to `f` is bounded by `size`, which is
+ // `buf`'s capacity. The contents of the buffer have been initialised by writes to `f`.
+ unsafe { buf.set_len(f.bytes_written()) };
+
+ // Check that there are no `NUL` bytes before the end.
+ // SAFETY: The buffer is valid for read because `f.bytes_written()` is bounded by `size`
+ // (which the minimum buffer size) and is non-zero (we wrote at least the `NUL` terminator)
+ // so `f.bytes_written() - 1` doesn't underflow.
+ let ptr = unsafe { bindings::memchr(buf.as_ptr().cast(), 0, (f.bytes_written() - 1) as _) };
+ if !ptr.is_null() {
+ return Err(EINVAL);
+ }
+
+ // INVARIANT: We wrote the `NUL` terminator and checked above that no other `NUL` bytes
+ // exist in the buffer.
+ Ok(Self { buf })
+ }
+}
+
+impl Deref for CString {
+ type Target = CStr;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The type invariants guarantee that the string is `NUL`-terminated and that no
+ // other `NUL` bytes exist.
+ unsafe { CStr::from_bytes_with_nul_unchecked(self.buf.as_slice()) }
+ }
+}
+
+/// A convenience alias for [`core::format_args`].
+#[macro_export]
+macro_rules! fmt {
+ ($($f:tt)*) => ( core::format_args!($($f)*) )
+}
diff --git a/rust/kernel/sync.rs b/rust/kernel/sync.rs
new file mode 100644
index 000000000000..255a4928a47a
--- /dev/null
+++ b/rust/kernel/sync.rs
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Synchronisation primitives.
+//!
+//! This module contains the kernel APIs related to synchronisation that have been ported or
+//! wrapped for usage by Rust code in the kernel and is shared by all of them.
+//!
+//! # Examples
+//!
+//! ```
+//! # use kernel::mutex_init;
+//! # use kernel::sync::Mutex;
+//! # use alloc::boxed::Box;
+//! # use core::pin::Pin;
+//! // SAFETY: `init` is called below.
+//! let mut data = Pin::from(Box::try_new(unsafe { Mutex::new(10) }).unwrap());
+//! mutex_init!(data.as_mut(), "test::data");
+//!
+//! assert_eq!(*data.lock(), 10);
+//! *data.lock() = 20;
+//! assert_eq!(*data.lock(), 20);
+//! ```
+
+use crate::{bindings, str::CStr};
+use core::{cell::UnsafeCell, mem::MaybeUninit, pin::Pin};
+
+mod arc;
+mod condvar;
+mod guard;
+mod locked_by;
+mod mutex;
+mod nowait;
+pub mod rcu;
+mod revocable;
+mod rwsem;
+mod seqlock;
+pub mod smutex;
+mod spinlock;
+
+pub use arc::{new_refcount, Ref, RefBorrow, StaticRef, UniqueRef};
+pub use condvar::CondVar;
+pub use guard::{Guard, Lock, LockFactory, LockInfo, LockIniter, ReadLock, WriteLock};
+pub use locked_by::LockedBy;
+pub use mutex::{Mutex, RevocableMutex, RevocableMutexGuard};
+pub use nowait::{NoWaitLock, NoWaitLockGuard};
+pub use revocable::{Revocable, RevocableGuard};
+pub use rwsem::{RevocableRwSemaphore, RevocableRwSemaphoreGuard, RwSemaphore};
+pub use seqlock::{SeqLock, SeqLockReadGuard};
+pub use spinlock::{RawSpinLock, SpinLock};
+
+/// Represents a lockdep class. It's a wrapper around C's `lock_class_key`.
+#[repr(transparent)]
+pub struct LockClassKey(UnsafeCell<MaybeUninit<bindings::lock_class_key>>);
+
+// SAFETY: This is a wrapper around a lock class key, so it is safe to use references to it from
+// any thread.
+unsafe impl Sync for LockClassKey {}
+
+impl LockClassKey {
+ /// Creates a new lock class key.
+ pub const fn new() -> Self {
+ Self(UnsafeCell::new(MaybeUninit::uninit()))
+ }
+
+ pub(crate) fn get(&self) -> *mut bindings::lock_class_key {
+ self.0.get().cast()
+ }
+}
+
+/// Safely initialises an object that has an `init` function that takes a name and a lock class as
+/// arguments, examples of these are [`Mutex`] and [`SpinLock`]. Each of them also provides a more
+/// specialised name that uses this macro.
+#[doc(hidden)]
+#[macro_export]
+macro_rules! init_with_lockdep {
+ ($obj:expr, $name:expr) => {{
+ static CLASS1: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new();
+ static CLASS2: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new();
+ let obj = $obj;
+ let name = $crate::c_str!($name);
+ $crate::sync::NeedsLockClass::init(obj, name, &CLASS1, &CLASS2)
+ }};
+}
+
+/// A trait for types that need a lock class during initialisation.
+///
+/// Implementers of this trait benefit from the [`init_with_lockdep`] macro that generates a new
+/// class for each initialisation call site.
+pub trait NeedsLockClass {
+ /// Initialises the type instance so that it can be safely used.
+ ///
+ /// Callers are encouraged to use the [`init_with_lockdep`] macro as it automatically creates a
+ /// new lock class on each usage.
+ fn init(
+ self: Pin<&mut Self>,
+ name: &'static CStr,
+ key1: &'static LockClassKey,
+ key2: &'static LockClassKey,
+ );
+}
+
+/// Automatically initialises static instances of synchronisation primitives.
+///
+/// The syntax resembles that of regular static variables, except that the value assigned is that
+/// of the protected type (if one exists). In the examples below, all primitives except for
+/// [`CondVar`] require the inner value to be supplied.
+///
+/// # Examples
+///
+/// ```ignore
+/// # use kernel::{init_static_sync, sync::{CondVar, Mutex, RevocableMutex, SpinLock}};
+/// struct Test {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// init_static_sync! {
+/// static A: Mutex<Test> = Test { a: 10, b: 20 };
+///
+/// /// Documentation for `B`.
+/// pub static B: Mutex<u32> = 0;
+///
+/// pub(crate) static C: SpinLock<Test> = Test { a: 10, b: 20 };
+/// static D: CondVar;
+///
+/// static E: RevocableMutex<Test> = Test { a: 30, b: 40 };
+/// }
+/// ```
+#[macro_export]
+macro_rules! init_static_sync {
+ ($($(#[$outer:meta])* $v:vis static $id:ident : $t:ty $(= $value:expr)?;)*) => {
+ $(
+ $(#[$outer])*
+ $v static $id: $t = {
+ #[link_section = ".init_array"]
+ #[used]
+ static TMP: extern "C" fn() = {
+ extern "C" fn constructor() {
+ // SAFETY: This locally-defined function is only called from a constructor,
+ // which guarantees that `$id` is not accessible from other threads
+ // concurrently.
+ #[allow(clippy::cast_ref_to_mut)]
+ let mutable = unsafe { &mut *(&$id as *const _ as *mut $t) };
+ // SAFETY: It's a shared static, so it cannot move.
+ let pinned = unsafe { core::pin::Pin::new_unchecked(mutable) };
+ $crate::init_with_lockdep!(pinned, stringify!($id));
+ }
+ constructor
+ };
+ $crate::init_static_sync!(@call_new $t, $($value)?)
+ };
+ )*
+ };
+ (@call_new $t:ty, $value:expr) => {{
+ let v = $value;
+ // SAFETY: the initialisation function is called by the constructor above.
+ unsafe { <$t>::new(v) }
+ }};
+ (@call_new $t:ty,) => {
+ // SAFETY: the initialisation function is called by the constructor above.
+ unsafe { <$t>::new() }
+ };
+}
+
+/// Reschedules the caller's task if needed.
+pub fn cond_resched() -> bool {
+ // SAFETY: No arguments, reschedules `current` if needed.
+ unsafe { bindings::cond_resched() != 0 }
+}
diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs
new file mode 100644
index 000000000000..daf4aba08672
--- /dev/null
+++ b/rust/kernel/sync/arc.rs
@@ -0,0 +1,582 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A reference-counted pointer.
+//!
+//! This module implements a way for users to create reference-counted objects and pointers to
+//! them. Such a pointer automatically increments and decrements the count, and drops the
+//! underlying object when it reaches zero. It is also safe to use concurrently from multiple
+//! threads.
+//!
+//! It is different from the standard library's [`Arc`] in a few ways:
+//! 1. It is backed by the kernel's `refcount_t` type.
+//! 2. It does not support weak references, which allows it to be half the size.
+//! 3. It saturates the reference count instead of aborting when it goes over a threshold.
+//! 4. It does not provide a `get_mut` method, so the ref counted object is pinned.
+//!
+//! [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html
+
+use crate::{bindings, error::code::*, Error, Opaque, Result};
+use alloc::{
+ alloc::{alloc, dealloc},
+ vec::Vec,
+};
+use core::{
+ alloc::Layout,
+ convert::{AsRef, TryFrom},
+ marker::{PhantomData, Unsize},
+ mem::{ManuallyDrop, MaybeUninit},
+ ops::{Deref, DerefMut},
+ pin::Pin,
+ ptr::{self, NonNull},
+};
+
+/// A reference-counted pointer to an instance of `T`.
+///
+/// The reference count is incremented when new instances of [`Ref`] are created, and decremented
+/// when they are dropped. When the count reaches zero, the underlying `T` is also dropped.
+///
+/// # Invariants
+///
+/// The reference count on an instance of [`Ref`] is always non-zero.
+/// The object pointed to by [`Ref`] is always pinned.
+pub struct Ref<T: ?Sized> {
+ ptr: NonNull<RefInner<T>>,
+ _p: PhantomData<RefInner<T>>,
+}
+
+#[repr(C)]
+struct RefInner<T: ?Sized> {
+ refcount: Opaque<bindings::refcount_t>,
+ data: T,
+}
+
+// This is to allow [`Ref`] (and variants) to be used as the type of `self`.
+impl<T: ?Sized> core::ops::Receiver for Ref<T> {}
+
+// This is to allow [`RefBorrow`] (and variants) to be used as the type of `self`.
+impl<T: ?Sized> core::ops::Receiver for RefBorrow<'_, T> {}
+
+// This is to allow coercion from `Ref<T>` to `Ref<U>` if `T` can be converted to the
+// dynamically-sized type (DST) `U`.
+impl<T: ?Sized + Unsize<U>, U: ?Sized> core::ops::CoerceUnsized<Ref<U>> for Ref<T> {}
+
+// This is to allow `Ref<U>` to be dispatched on when `Ref<T>` can be coerced into `Ref<U>`.
+impl<T: ?Sized + Unsize<U>, U: ?Sized> core::ops::DispatchFromDyn<Ref<U>> for Ref<T> {}
+
+// SAFETY: It is safe to send `Ref<T>` to another thread when the underlying `T` is `Sync` because
+// it effectively means sharing `&T` (which is safe because `T` is `Sync`); additionally, it needs
+// `T` to be `Send` because any thread that has a `Ref<T>` may ultimately access `T` directly, for
+// example, when the reference count reaches zero and `T` is dropped.
+unsafe impl<T: ?Sized + Sync + Send> Send for Ref<T> {}
+
+// SAFETY: It is safe to send `&Ref<T>` to another thread when the underlying `T` is `Sync` for
+// the same reason as above. `T` needs to be `Send` as well because a thread can clone a `&Ref<T>`
+// into a `Ref<T>`, which may lead to `T` being accessed by the same reasoning as above.
+unsafe impl<T: ?Sized + Sync + Send> Sync for Ref<T> {}
+
+impl<T> Ref<T> {
+ /// Constructs a new reference counted instance of `T`.
+ pub fn try_new(contents: T) -> Result<Self> {
+ let layout = Layout::new::<RefInner<T>>();
+ // SAFETY: The layout size is guaranteed to be non-zero because `RefInner` contains the
+ // reference count.
+ let inner = NonNull::new(unsafe { alloc(layout) })
+ .ok_or(ENOMEM)?
+ .cast::<RefInner<T>>();
+
+ // INVARIANT: The refcount is initialised to a non-zero value.
+ let value = RefInner {
+ refcount: Opaque::new(new_refcount()),
+ data: contents,
+ };
+ // SAFETY: `inner` is writable and properly aligned.
+ unsafe { inner.as_ptr().write(value) };
+
+ // SAFETY: We just created `inner` with a reference count of 1, which is owned by the new
+ // `Ref` object.
+ Ok(unsafe { Self::from_inner(inner) })
+ }
+
+ /// Deconstructs a [`Ref`] object into a `usize`.
+ ///
+ /// It can be reconstructed once via [`Ref::from_usize`].
+ pub fn into_usize(obj: Self) -> usize {
+ ManuallyDrop::new(obj).ptr.as_ptr() as _
+ }
+
+ /// Borrows a [`Ref`] instance previously deconstructed via [`Ref::into_usize`].
+ ///
+ /// # Safety
+ ///
+ /// `encoded` must have been returned by a previous call to [`Ref::into_usize`]. Additionally,
+ /// [`Ref::from_usize`] can only be called after *all* instances of [`RefBorrow`] have been
+ /// dropped.
+ pub unsafe fn borrow_usize<'a>(encoded: usize) -> RefBorrow<'a, T> {
+ // SAFETY: By the safety requirement of this function, we know that `encoded` came from
+ // a previous call to `Ref::into_usize`.
+ let inner = NonNull::new(encoded as *mut RefInner<T>).unwrap();
+
+ // SAFETY: The safety requirements ensure that the object remains alive for the lifetime of
+ // the returned value. There is no way to create mutable references to the object.
+ unsafe { RefBorrow::new(inner) }
+ }
+
+ /// Recreates a [`Ref`] instance previously deconstructed via [`Ref::into_usize`].
+ ///
+ /// # Safety
+ ///
+ /// `encoded` must have been returned by a previous call to [`Ref::into_usize`]. Additionally,
+ /// it can only be called once for each previous call to [`Ref::into_usize`].
+ pub unsafe fn from_usize(encoded: usize) -> Self {
+ // SAFETY: By the safety invariants we know that `encoded` came from `Ref::into_usize`, so
+ // the reference count held then will be owned by the new `Ref` object.
+ unsafe { Self::from_inner(NonNull::new(encoded as _).unwrap()) }
+ }
+}
+
+impl<T: ?Sized> Ref<T> {
+ /// Constructs a new [`Ref`] from an existing [`RefInner`].
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `inner` points to a valid location and has a non-zero reference
+ /// count, one of which will be owned by the new [`Ref`] instance.
+ unsafe fn from_inner(inner: NonNull<RefInner<T>>) -> Self {
+ // INVARIANT: By the safety requirements, the invariants hold.
+ Ref {
+ ptr: inner,
+ _p: PhantomData,
+ }
+ }
+
+ /// Determines if two reference-counted pointers point to the same underlying instance of `T`.
+ pub fn ptr_eq(a: &Self, b: &Self) -> bool {
+ ptr::eq(a.ptr.as_ptr(), b.ptr.as_ptr())
+ }
+
+ /// Deconstructs a [`Ref`] object into a raw pointer.
+ ///
+ /// It can be reconstructed once via [`Ref::from_raw`].
+ pub fn into_raw(obj: Self) -> *const T {
+ let ret = &*obj as *const T;
+ core::mem::forget(obj);
+ ret
+ }
+
+ /// Recreates a [`Ref`] instance previously deconstructed via [`Ref::into_raw`].
+ ///
+ /// This code relies on the `repr(C)` layout of structs as described in
+ /// <https://doc.rust-lang.org/reference/type-layout.html#reprc-structs>.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must have been returned by a previous call to [`Ref::into_raw`]. Additionally, it
+ /// can only be called once for each previous call to [`Ref::into_raw`].
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ // SAFETY: The safety requirement ensures that the pointer is valid.
+ let align = core::mem::align_of_val(unsafe { &*ptr });
+ let offset = Layout::new::<RefInner<()>>()
+ .align_to(align)
+ .unwrap()
+ .pad_to_align()
+ .size();
+ // SAFETY: The pointer is in bounds because by the safety requirements `ptr` came from
+ // `Ref::into_raw`, so it is a pointer `offset` bytes from the beginning of the allocation.
+ let data = unsafe { (ptr as *const u8).sub(offset) };
+ let metadata = ptr::metadata(ptr as *const RefInner<T>);
+ let ptr = ptr::from_raw_parts_mut(data as _, metadata);
+ // SAFETY: By the safety requirements we know that `ptr` came from `Ref::into_raw`, so the
+ // reference count held then will be owned by the new `Ref` object.
+ unsafe { Self::from_inner(NonNull::new(ptr).unwrap()) }
+ }
+
+ /// Returns a [`RefBorrow`] from the given [`Ref`].
+ ///
+ /// This is useful when the argument of a function call is a [`RefBorrow`] (e.g., in a method
+ /// receiver), but we have a [`Ref`] instead. Getting a [`RefBorrow`] is free when optimised.
+ #[inline]
+ pub fn as_ref_borrow(&self) -> RefBorrow<'_, T> {
+ // SAFETY: The constraint that lifetime of the shared reference must outlive that of
+ // the returned `RefBorrow` ensures that the object remains alive.
+ unsafe { RefBorrow::new(self.ptr) }
+ }
+}
+
+impl<T: ?Sized> Deref for Ref<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: By the type invariant, there is necessarily a reference to the object, so it is
+ // safe to dereference it.
+ unsafe { &self.ptr.as_ref().data }
+ }
+}
+
+impl<T: ?Sized> Clone for Ref<T> {
+ fn clone(&self) -> Self {
+ // INVARIANT: C `refcount_inc` saturates the refcount, so it cannot overflow to zero.
+ // SAFETY: By the type invariant, there is necessarily a reference to the object, so it is
+ // safe to increment the refcount.
+ unsafe { bindings::refcount_inc(self.ptr.as_ref().refcount.get()) };
+
+ // SAFETY: We just incremented the refcount. This increment is now owned by the new `Ref`.
+ unsafe { Self::from_inner(self.ptr) }
+ }
+}
+
+impl<T: ?Sized> AsRef<T> for Ref<T> {
+ fn as_ref(&self) -> &T {
+ // SAFETY: By the type invariant, there is necessarily a reference to the object, so it is
+ // safe to dereference it.
+ unsafe { &self.ptr.as_ref().data }
+ }
+}
+
+impl<T: ?Sized> Drop for Ref<T> {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariant, there is necessarily a reference to the object. We cannot
+ // touch `refcount` after it's decremented to a non-zero value because another thread/CPU
+ // may concurrently decrement it to zero and free it. It is ok to have a raw pointer to
+ // freed/invalid memory as long as it is never dereferenced.
+ let refcount = unsafe { self.ptr.as_ref() }.refcount.get();
+
+ // INVARIANT: If the refcount reaches zero, there are no other instances of `Ref`, and
+ // this instance is being dropped, so the broken invariant is not observable.
+ // SAFETY: Also by the type invariant, we are allowed to decrement the refcount.
+ let is_zero = unsafe { bindings::refcount_dec_and_test(refcount) };
+ if is_zero {
+ // The count reached zero, we must free the memory.
+
+ // SAFETY: This thread holds the only remaining reference to `self`, so it is safe to
+ // get a mutable reference to it.
+ let inner = unsafe { self.ptr.as_mut() };
+ let layout = Layout::for_value(inner);
+ // SAFETY: The value stored in inner is valid.
+ unsafe { core::ptr::drop_in_place(inner) };
+ // SAFETY: The pointer was initialised from the result of a call to `alloc`.
+ unsafe { dealloc(self.ptr.cast().as_ptr(), layout) };
+ }
+ }
+}
+
+impl<T> TryFrom<Vec<T>> for Ref<[T]> {
+ type Error = Error;
+
+ fn try_from(mut v: Vec<T>) -> Result<Self> {
+ let value_layout = Layout::array::<T>(v.len())?;
+ let layout = Layout::new::<RefInner<()>>()
+ .extend(value_layout)?
+ .0
+ .pad_to_align();
+ // SAFETY: The layout size is guaranteed to be non-zero because `RefInner` contains the
+ // reference count.
+ let ptr = NonNull::new(unsafe { alloc(layout) }).ok_or(ENOMEM)?;
+ let inner =
+ core::ptr::slice_from_raw_parts_mut(ptr.as_ptr() as _, v.len()) as *mut RefInner<[T]>;
+
+ // SAFETY: Just an FFI call that returns a `refcount_t` initialised to 1.
+ let count = Opaque::new(unsafe { bindings::REFCOUNT_INIT(1) });
+ // SAFETY: `inner.refcount` is writable and properly aligned.
+ unsafe { core::ptr::addr_of_mut!((*inner).refcount).write(count) };
+ // SAFETY: The contents of `v` as readable and properly aligned; `inner.data` is writable
+ // and properly aligned. There is no overlap between the two because `inner` is a new
+ // allocation.
+ unsafe {
+ core::ptr::copy_nonoverlapping(
+ v.as_ptr(),
+ core::ptr::addr_of_mut!((*inner).data) as *mut [T] as *mut T,
+ v.len(),
+ )
+ };
+ // SAFETY: We're setting the new length to zero, so it is <= to capacity, and old_len..0 is
+ // an empty range (so satisfies vacuously the requirement of being initialised).
+ unsafe { v.set_len(0) };
+ // SAFETY: We just created `inner` with a reference count of 1, which is owned by the new
+ // `Ref` object.
+ Ok(unsafe { Self::from_inner(NonNull::new(inner).unwrap()) })
+ }
+}
+
+impl<T: ?Sized> From<UniqueRef<T>> for Ref<T> {
+ fn from(item: UniqueRef<T>) -> Self {
+ item.inner
+ }
+}
+
+impl<T: ?Sized> From<UniqueRef<T>> for Pin<UniqueRef<T>> {
+ fn from(obj: UniqueRef<T>) -> Self {
+ // SAFETY: It is not possible to move/replace `T` inside a `Pin<UniqueRef<T>>` (unless `T`
+ // is `Unpin`), so it is ok to convert it to `Pin<UniqueRef<T>>`.
+ unsafe { Pin::new_unchecked(obj) }
+ }
+}
+
+impl<T: ?Sized> From<Pin<UniqueRef<T>>> for Ref<T> {
+ fn from(item: Pin<UniqueRef<T>>) -> Self {
+ // SAFETY: The type invariants of `Ref` guarantee that the data is pinned.
+ unsafe { Pin::into_inner_unchecked(item).inner }
+ }
+}
+
+/// A borrowed [`Ref`] with manually-managed lifetime.
+///
+/// # Invariants
+///
+/// There are no mutable references to the underlying [`Ref`], and it remains valid for the lifetime
+/// of the [`RefBorrow`] instance.
+pub struct RefBorrow<'a, T: ?Sized + 'a> {
+ inner: NonNull<RefInner<T>>,
+ _p: PhantomData<&'a ()>,
+}
+
+impl<T: ?Sized> Clone for RefBorrow<'_, T> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<T: ?Sized> Copy for RefBorrow<'_, T> {}
+
+impl<T: ?Sized> RefBorrow<'_, T> {
+ /// Creates a new [`RefBorrow`] instance.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure the following for the lifetime of the returned [`RefBorrow`] instance:
+ /// 1. That `obj` remains valid;
+ /// 2. That no mutable references to `obj` are created.
+ unsafe fn new(inner: NonNull<RefInner<T>>) -> Self {
+ // INVARIANT: The safety requirements guarantee the invariants.
+ Self {
+ inner,
+ _p: PhantomData,
+ }
+ }
+}
+
+impl<T: ?Sized> From<RefBorrow<'_, T>> for Ref<T> {
+ fn from(b: RefBorrow<'_, T>) -> Self {
+ // SAFETY: The existence of `b` guarantees that the refcount is non-zero. `ManuallyDrop`
+ // guarantees that `drop` isn't called, so it's ok that the temporary `Ref` doesn't own the
+ // increment.
+ ManuallyDrop::new(unsafe { Ref::from_inner(b.inner) })
+ .deref()
+ .clone()
+ }
+}
+
+impl<T: ?Sized> Deref for RefBorrow<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: By the type invariant, the underlying object is still alive with no mutable
+ // references to it, so it is safe to create a shared reference.
+ unsafe { &self.inner.as_ref().data }
+ }
+}
+
+/// A refcounted object that is known to have a refcount of 1.
+///
+/// It is mutable and can be converted to a [`Ref`] so that it can be shared.
+///
+/// # Invariants
+///
+/// `inner` always has a reference count of 1.
+///
+/// # Examples
+///
+/// In the following example, we make changes to the inner object before turning it into a
+/// `Ref<Test>` object (after which point, it cannot be mutated directly). Note that `x.into()`
+/// cannot fail.
+///
+/// ```
+/// use kernel::sync::{Ref, UniqueRef};
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn test() -> Result<Ref<Example>> {
+/// let mut x = UniqueRef::try_new(Example { a: 10, b: 20 })?;
+/// x.a += 1;
+/// x.b += 1;
+/// Ok(x.into())
+/// }
+///
+/// # test();
+/// ```
+///
+/// In the following example we first allocate memory for a ref-counted `Example` but we don't
+/// initialise it on allocation. We do initialise it later with a call to [`UniqueRef::write`],
+/// followed by a conversion to `Ref<Example>`. This is particularly useful when allocation happens
+/// in one context (e.g., sleepable) and initialisation in another (e.g., atomic):
+///
+/// ```
+/// use kernel::sync::{Ref, UniqueRef};
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn test() -> Result<Ref<Example>> {
+/// let x = UniqueRef::try_new_uninit()?;
+/// Ok(x.write(Example { a: 10, b: 20 }).into())
+/// }
+///
+/// # test();
+/// ```
+///
+/// In the last example below, the caller gets a pinned instance of `Example` while converting to
+/// `Ref<Example>`; this is useful in scenarios where one needs a pinned reference during
+/// initialisation, for example, when initialising fields that are wrapped in locks.
+///
+/// ```
+/// use kernel::sync::{Ref, UniqueRef};
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn test() -> Result<Ref<Example>> {
+/// let mut pinned = Pin::from(UniqueRef::try_new(Example { a: 10, b: 20 })?);
+/// // We can modify `pinned` because it is `Unpin`.
+/// pinned.as_mut().a += 1;
+/// Ok(pinned.into())
+/// }
+///
+/// # test();
+/// ```
+pub struct UniqueRef<T: ?Sized> {
+ inner: Ref<T>,
+}
+
+impl<T> UniqueRef<T> {
+ /// Tries to allocate a new [`UniqueRef`] instance.
+ pub fn try_new(value: T) -> Result<Self> {
+ Ok(Self {
+ // INVARIANT: The newly-created object has a ref-count of 1.
+ inner: Ref::try_new(value)?,
+ })
+ }
+
+ /// Tries to allocate a new [`UniqueRef`] instance whose contents are not initialised yet.
+ pub fn try_new_uninit() -> Result<UniqueRef<MaybeUninit<T>>> {
+ Ok(UniqueRef::<MaybeUninit<T>> {
+ // INVARIANT: The newly-created object has a ref-count of 1.
+ inner: Ref::try_new(MaybeUninit::uninit())?,
+ })
+ }
+}
+
+impl<T> UniqueRef<MaybeUninit<T>> {
+ /// Converts a `UniqueRef<MaybeUninit<T>>` into a `UniqueRef<T>` by writing a value into it.
+ pub fn write(mut self, value: T) -> UniqueRef<T> {
+ self.deref_mut().write(value);
+ let inner = ManuallyDrop::new(self).inner.ptr;
+ UniqueRef {
+ // SAFETY: The new `Ref` is taking over `ptr` from `self.inner` (which won't be
+ // dropped). The types are compatible because `MaybeUninit<T>` is compatible with `T`.
+ inner: unsafe { Ref::from_inner(inner.cast()) },
+ }
+ }
+}
+
+impl<T: ?Sized> Deref for UniqueRef<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ self.inner.deref()
+ }
+}
+
+impl<T: ?Sized> DerefMut for UniqueRef<T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ // SAFETY: By the `Ref` type invariant, there is necessarily a reference to the object, so
+ // it is safe to dereference it. Additionally, we know there is only one reference when
+ // it's inside a `UniqueRef`, so it is safe to get a mutable reference.
+ unsafe { &mut self.inner.ptr.as_mut().data }
+ }
+}
+
+/// Allows the creation of "reference-counted" globals.
+///
+/// This is achieved by biasing the refcount with +1, which ensures that the count never drops back
+/// to zero (unless buggy unsafe code incorrectly decrements without owning an increment) and
+/// therefore also ensures that `drop` is never called.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::sync::{Ref, RefBorrow, StaticRef};
+///
+/// const VALUE: u32 = 10;
+/// static SR: StaticRef<u32> = StaticRef::new(VALUE);
+///
+/// fn takes_ref_borrow(v: RefBorrow<'_, u32>) {
+/// assert_eq!(*v, VALUE);
+/// }
+///
+/// fn takes_ref(v: Ref<u32>) {
+/// assert_eq!(*v, VALUE);
+/// }
+///
+/// takes_ref_borrow(SR.as_ref_borrow());
+/// takes_ref(SR.as_ref_borrow().into());
+/// ```
+pub struct StaticRef<T: ?Sized> {
+ inner: RefInner<T>,
+}
+
+// SAFETY: A `StaticRef<T>` is a `Ref<T>` declared statically, so we just use the same criteria for
+// making it `Sync`.
+unsafe impl<T: ?Sized + Sync + Send> Sync for StaticRef<T> {}
+
+impl<T> StaticRef<T> {
+ /// Creates a new instance of a static "ref-counted" object.
+ pub const fn new(data: T) -> Self {
+ // INVARIANT: The refcount is initialised to a non-zero value.
+ Self {
+ inner: RefInner {
+ refcount: Opaque::new(new_refcount()),
+ data,
+ },
+ }
+ }
+}
+
+impl<T: ?Sized> StaticRef<T> {
+ /// Creates a [`RefBorrow`] instance from the given static object.
+ ///
+ /// This requires a `'static` lifetime so that it can guarantee that the underlyling object
+ /// remains valid and is effectively pinned.
+ pub fn as_ref_borrow(&'static self) -> RefBorrow<'static, T> {
+ // SAFETY: The static lifetime guarantees that the object remains valid. And the shared
+ // reference guarantees that no mutable references exist.
+ unsafe { RefBorrow::new(NonNull::from(&self.inner)) }
+ }
+}
+
+/// Creates, from a const context, a new instance of `struct refcount_struct` with a refcount of 1.
+///
+/// ```
+/// # // The test below is meant to ensure that `new_refcount` (which is const) mimics
+/// # // `REFCOUNT_INIT`, which is written in C and thus can't be used in a const context.
+/// # // TODO: Once `#[test]` is working, move this to a test and make `new_refcount` private.
+/// # use kernel::bindings;
+/// # // SAFETY: Just an FFI call that returns a `refcount_t` initialised to 1.
+/// # let bindings::refcount_struct {
+/// # refs: bindings::atomic_t { counter: a },
+/// # } = unsafe { bindings::REFCOUNT_INIT(1) };
+/// # let bindings::refcount_struct {
+/// # refs: bindings::atomic_t { counter: b },
+/// # } = kernel::sync::new_refcount();
+/// # assert_eq!(a, b);
+/// ```
+pub const fn new_refcount() -> bindings::refcount_struct {
+ bindings::refcount_struct {
+ refs: bindings::atomic_t { counter: 1 },
+ }
+}
diff --git a/rust/kernel/sync/condvar.rs b/rust/kernel/sync/condvar.rs
new file mode 100644
index 000000000000..b6102906fb3c
--- /dev/null
+++ b/rust/kernel/sync/condvar.rs
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A condition variable.
+//!
+//! This module allows Rust code to use the kernel's [`struct wait_queue_head`] as a condition
+//! variable.
+
+use super::{Guard, Lock, LockClassKey, LockInfo, NeedsLockClass};
+use crate::{bindings, str::CStr, task::Task, Opaque};
+use core::{marker::PhantomPinned, pin::Pin};
+
+/// Safely initialises a [`CondVar`] with the given name, generating a new lock class.
+#[macro_export]
+macro_rules! condvar_init {
+ ($condvar:expr, $name:literal) => {
+ $crate::init_with_lockdep!($condvar, $name)
+ };
+}
+
+// TODO: `bindgen` is not generating this constant. Figure out why.
+const POLLFREE: u32 = 0x4000;
+
+/// Exposes the kernel's [`struct wait_queue_head`] as a condition variable. It allows the caller to
+/// atomically release the given lock and go to sleep. It reacquires the lock when it wakes up. And
+/// it wakes up when notified by another thread (via [`CondVar::notify_one`] or
+/// [`CondVar::notify_all`]) or because the thread received a signal.
+///
+/// [`struct wait_queue_head`]: ../../../include/linux/wait.h
+pub struct CondVar {
+ pub(crate) wait_list: Opaque<bindings::wait_queue_head>,
+
+ /// A condvar needs to be pinned because it contains a [`struct list_head`] that is
+ /// self-referential, so it cannot be safely moved once it is initialised.
+ _pin: PhantomPinned,
+}
+
+// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on any thread.
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl Send for CondVar {}
+
+// SAFETY: `CondVar` only uses a `struct wait_queue_head`, which is safe to use on multiple threads
+// concurrently.
+unsafe impl Sync for CondVar {}
+
+impl CondVar {
+ /// Constructs a new conditional variable.
+ ///
+ /// # Safety
+ ///
+ /// The caller must call `CondVar::init` before using the conditional variable.
+ pub const unsafe fn new() -> Self {
+ Self {
+ wait_list: Opaque::uninit(),
+ _pin: PhantomPinned,
+ }
+ }
+
+ /// Atomically releases the given lock (whose ownership is proven by the guard) and puts the
+ /// thread to sleep. It wakes up when notified by [`CondVar::notify_one`] or
+ /// [`CondVar::notify_all`], or when the thread receives a signal.
+ ///
+ /// Returns whether there is a signal pending.
+ #[must_use = "wait returns if a signal is pending, so the caller must check the return value"]
+ pub fn wait<L: Lock<I>, I: LockInfo>(&self, guard: &mut Guard<'_, L, I>) -> bool {
+ let lock = guard.lock;
+ let wait = Opaque::<bindings::wait_queue_entry>::uninit();
+
+ // SAFETY: `wait` points to valid memory.
+ unsafe { bindings::init_wait(wait.get()) };
+
+ // SAFETY: Both `wait` and `wait_list` point to valid memory.
+ unsafe {
+ bindings::prepare_to_wait_exclusive(
+ self.wait_list.get(),
+ wait.get(),
+ bindings::TASK_INTERRUPTIBLE as _,
+ )
+ };
+
+ // SAFETY: The guard is evidence that the caller owns the lock.
+ unsafe { lock.unlock(&mut guard.context) };
+
+ // SAFETY: No arguments, switches to another thread.
+ unsafe { bindings::schedule() };
+
+ guard.context = lock.lock_noguard();
+
+ // SAFETY: Both `wait` and `wait_list` point to valid memory.
+ unsafe { bindings::finish_wait(self.wait_list.get(), wait.get()) };
+
+ Task::current().signal_pending()
+ }
+
+ /// Calls the kernel function to notify the appropriate number of threads with the given flags.
+ fn notify(&self, count: i32, flags: u32) {
+ // SAFETY: `wait_list` points to valid memory.
+ unsafe {
+ bindings::__wake_up(
+ self.wait_list.get(),
+ bindings::TASK_NORMAL,
+ count,
+ flags as _,
+ )
+ };
+ }
+
+ /// Wakes a single waiter up, if any. This is not 'sticky' in the sense that if no thread is
+ /// waiting, the notification is lost completely (as opposed to automatically waking up the
+ /// next waiter).
+ pub fn notify_one(&self) {
+ self.notify(1, 0);
+ }
+
+ /// Wakes all waiters up, if any. This is not 'sticky' in the sense that if no thread is
+ /// waiting, the notification is lost completely (as opposed to automatically waking up the
+ /// next waiter).
+ pub fn notify_all(&self) {
+ self.notify(0, 0);
+ }
+
+ /// Wakes all waiters up. If they were added by `epoll`, they are also removed from the list of
+ /// waiters. This is useful when cleaning up a condition variable that may be waited on by
+ /// threads that use `epoll`.
+ pub fn free_waiters(&self) {
+ self.notify(1, bindings::POLLHUP | POLLFREE);
+ }
+}
+
+impl NeedsLockClass for CondVar {
+ fn init(
+ self: Pin<&mut Self>,
+ name: &'static CStr,
+ key: &'static LockClassKey,
+ _: &'static LockClassKey,
+ ) {
+ unsafe {
+ bindings::__init_waitqueue_head(self.wait_list.get(), name.as_char_ptr(), key.get())
+ };
+ }
+}
diff --git a/rust/kernel/sync/guard.rs b/rust/kernel/sync/guard.rs
new file mode 100644
index 000000000000..757d85eac7af
--- /dev/null
+++ b/rust/kernel/sync/guard.rs
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A generic lock guard and trait.
+//!
+//! This module contains a lock guard that can be used with any locking primitive that implements
+//! the ([`Lock`]) trait. It also contains the definition of the trait, which can be leveraged by
+//! other constructs to work on generic locking primitives.
+
+use super::{LockClassKey, NeedsLockClass};
+use crate::{str::CStr, Bool, False, True};
+use core::pin::Pin;
+
+/// Allows mutual exclusion primitives that implement the [`Lock`] trait to automatically unlock
+/// when a guard goes out of scope. It also provides a safe and convenient way to access the data
+/// protected by the lock.
+#[must_use = "the lock unlocks immediately when the guard is unused"]
+pub struct Guard<'a, L: Lock<I> + ?Sized, I: LockInfo = WriteLock> {
+ pub(crate) lock: &'a L,
+ pub(crate) context: L::GuardContext,
+}
+
+// SAFETY: `Guard` is sync when the data protected by the lock is also sync. This is more
+// conservative than the default compiler implementation; more details can be found on
+// <https://github.com/rust-lang/rust/issues/41622> -- it refers to `MutexGuard` from the standard
+// library.
+unsafe impl<L, I> Sync for Guard<'_, L, I>
+where
+ L: Lock<I> + ?Sized,
+ L::Inner: Sync,
+ I: LockInfo,
+{
+}
+
+impl<L: Lock<I> + ?Sized, I: LockInfo> core::ops::Deref for Guard<'_, L, I> {
+ type Target = L::Inner;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
+ unsafe { &*self.lock.locked_data().get() }
+ }
+}
+
+impl<L: Lock<I> + ?Sized, I: LockInfo<Writable = True>> core::ops::DerefMut for Guard<'_, L, I> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ // SAFETY: The caller owns the lock, so it is safe to deref the protected data.
+ unsafe { &mut *self.lock.locked_data().get() }
+ }
+}
+
+impl<L: Lock<I> + ?Sized, I: LockInfo> Drop for Guard<'_, L, I> {
+ fn drop(&mut self) {
+ // SAFETY: The caller owns the lock, so it is safe to unlock it.
+ unsafe { self.lock.unlock(&mut self.context) };
+ }
+}
+
+impl<'a, L: Lock<I> + ?Sized, I: LockInfo> Guard<'a, L, I> {
+ /// Constructs a new immutable lock guard.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that it owns the lock.
+ pub(crate) unsafe fn new(lock: &'a L, context: L::GuardContext) -> Self {
+ Self { lock, context }
+ }
+}
+
+/// Specifies properties of a lock.
+pub trait LockInfo {
+ /// Determines if the data protected by a lock is writable.
+ type Writable: Bool;
+}
+
+/// A marker for locks that only allow reading.
+pub struct ReadLock;
+impl LockInfo for ReadLock {
+ type Writable = False;
+}
+
+/// A marker for locks that allow reading and writing.
+pub struct WriteLock;
+impl LockInfo for WriteLock {
+ type Writable = True;
+}
+
+/// A generic mutual exclusion primitive.
+///
+/// [`Guard`] is written such that any mutual exclusion primitive that can implement this trait can
+/// also benefit from having an automatic way to unlock itself.
+///
+/// # Safety
+///
+/// - Implementers of this trait with the [`WriteLock`] marker must ensure that only one thread/CPU
+/// may access the protected data once the lock is held, that is, between calls to `lock_noguard`
+/// and `unlock`.
+/// - Implementers of all other markers must ensure that a mutable reference to the protected data
+/// is not active in any thread/CPU because at least one shared reference is active between calls
+/// to `lock_noguard` and `unlock`.
+pub unsafe trait Lock<I: LockInfo = WriteLock> {
+ /// The type of the data protected by the lock.
+ type Inner: ?Sized;
+
+ /// The type of context, if any, that needs to be stored in the guard.
+ type GuardContext;
+
+ /// Acquires the lock, making the caller its owner.
+ #[must_use]
+ fn lock_noguard(&self) -> Self::GuardContext;
+
+ /// Reacquires the lock, making the caller its owner.
+ ///
+ /// The guard context before the last unlock is passed in.
+ ///
+ /// Locks that don't require this state on relock can simply use the default implementation
+ /// that calls [`Lock::lock_noguard`].
+ fn relock(&self, ctx: &mut Self::GuardContext) {
+ *ctx = self.lock_noguard();
+ }
+
+ /// Releases the lock, giving up ownership of the lock.
+ ///
+ /// # Safety
+ ///
+ /// It must only be called by the current owner of the lock.
+ unsafe fn unlock(&self, context: &mut Self::GuardContext);
+
+ /// Returns the data protected by the lock.
+ fn locked_data(&self) -> &core::cell::UnsafeCell<Self::Inner>;
+}
+
+/// A creator of instances of a mutual exclusion (lock) primitive.
+pub trait LockFactory {
+ /// The parametrised type of the mutual exclusion primitive that can be created by this factory.
+ type LockedType<T>;
+
+ /// Constructs a new instance of the mutual exclusion primitive.
+ ///
+ /// # Safety
+ ///
+ /// The caller must call [`LockIniter::init_lock`] before using the lock.
+ unsafe fn new_lock<T>(data: T) -> Self::LockedType<T>;
+}
+
+/// A lock that can be initialised with a single lock class key.
+pub trait LockIniter {
+ /// Initialises the lock instance so that it can be safely used.
+ fn init_lock(self: Pin<&mut Self>, name: &'static CStr, key: &'static LockClassKey);
+}
+
+impl<L: LockIniter> NeedsLockClass for L {
+ fn init(
+ self: Pin<&mut Self>,
+ name: &'static CStr,
+ key: &'static LockClassKey,
+ _: &'static LockClassKey,
+ ) {
+ self.init_lock(name, key);
+ }
+}
diff --git a/rust/kernel/sync/locked_by.rs b/rust/kernel/sync/locked_by.rs
new file mode 100644
index 000000000000..2dc6e3a77420
--- /dev/null
+++ b/rust/kernel/sync/locked_by.rs
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A wrapper for data protected by a lock that does not wrap it.
+
+use super::{Guard, Lock};
+use core::{cell::UnsafeCell, ops::Deref, ptr};
+
+/// Allows access to some data to be serialised by a lock that does not wrap it.
+///
+/// In most cases, data protected by a lock is wrapped by the appropriate lock type, e.g.,
+/// [`super::Mutex`] or [`super::SpinLock`]. [`LockedBy`] is meant for cases when this is not
+/// possible. For example, if a container has a lock and some data in the contained elements needs
+/// to be protected by the same lock.
+///
+/// [`LockedBy`] wraps the data in lieu of another locking primitive, and only allows access to it
+/// when the caller shows evidence that 'external' lock is locked.
+///
+/// # Examples
+///
+/// The following is an example for illustrative purposes: `InnerDirectory::bytes_used` is an
+/// aggregate of all `InnerFile::bytes_used` and must be kept consistent; so we wrap `InnerFile` in
+/// a `LockedBy` so that it shares a lock with `InnerDirectory`. This allows us to enforce at
+/// compile-time that access to `InnerFile` is only granted when an `InnerDirectory` is also
+/// locked; we enforce at run time that the right `InnerDirectory` is locked.
+///
+/// ```
+/// use kernel::sync::{LockedBy, Mutex};
+///
+/// struct InnerFile {
+/// bytes_used: u64,
+/// }
+///
+/// struct File {
+/// name: String,
+/// inner: LockedBy<InnerFile, Mutex<InnerDirectory>>,
+/// }
+///
+/// struct InnerDirectory {
+/// /// The sum of the bytes used by all files.
+/// bytes_used: u64,
+/// files: Vec<File>,
+/// }
+///
+/// struct Directory {
+/// name: String,
+/// inner: Mutex<InnerDirectory>,
+/// }
+/// ```
+pub struct LockedBy<T: ?Sized, L: Lock + ?Sized> {
+ owner: *const L::Inner,
+ data: UnsafeCell<T>,
+}
+
+// SAFETY: `LockedBy` can be transferred across thread boundaries iff the data it protects can.
+unsafe impl<T: ?Sized + Send, L: Lock + ?Sized> Send for LockedBy<T, L> {}
+
+// SAFETY: `LockedBy` serialises the interior mutability it provides, so it is `Sync` as long as the
+// data it protects is `Send`.
+unsafe impl<T: ?Sized + Send, L: Lock + ?Sized> Sync for LockedBy<T, L> {}
+
+impl<T, L: Lock + ?Sized> LockedBy<T, L> {
+ /// Constructs a new instance of [`LockedBy`].
+ ///
+ /// It stores a raw pointer to the owner that is never dereferenced. It is only used to ensure
+ /// that the right owner is being used to access the protected data. If the owner is freed, the
+ /// data becomes inaccessible; if another instance of the owner is allocated *on the same
+ /// memory location*, the data becomes accessible again: none of this affects memory safety
+ /// because in any case at most one thread (or CPU) can access the protected data at a time.
+ pub fn new(owner: &L, data: T) -> Self {
+ Self {
+ owner: owner.locked_data().get(),
+ data: UnsafeCell::new(data),
+ }
+ }
+}
+
+impl<T: ?Sized, L: Lock + ?Sized> LockedBy<T, L> {
+ /// Returns a reference to the protected data when the caller provides evidence (via a
+ /// [`Guard`]) that the owner is locked.
+ pub fn access<'a>(&'a self, guard: &'a Guard<'_, L>) -> &'a T {
+ if !ptr::eq(guard.deref(), self.owner) {
+ panic!("guard does not match owner");
+ }
+
+ // SAFETY: `guard` is evidence that the owner is locked.
+ unsafe { &mut *self.data.get() }
+ }
+
+ /// Returns a mutable reference to the protected data when the caller provides evidence (via a
+ /// mutable [`Guard`]) that the owner is locked mutably.
+ pub fn access_mut<'a>(&'a self, guard: &'a mut Guard<'_, L>) -> &'a mut T {
+ if !ptr::eq(guard.deref().deref(), self.owner) {
+ panic!("guard does not match owner");
+ }
+
+ // SAFETY: `guard` is evidence that the owner is locked.
+ unsafe { &mut *self.data.get() }
+ }
+
+ /// Returns a mutable reference to the protected data when the caller provides evidence (via a
+ /// mutable owner) that the owner is locked mutably. Showing a mutable reference to the owner
+ /// is sufficient because we know no other references can exist to it.
+ pub fn access_from_mut<'a>(&'a self, owner: &'a mut L::Inner) -> &'a mut T {
+ if !ptr::eq(owner, self.owner) {
+ panic!("mismatched owners");
+ }
+
+ // SAFETY: `owner` is evidence that there is only one reference to the owner.
+ unsafe { &mut *self.data.get() }
+ }
+}
diff --git a/rust/kernel/sync/mutex.rs b/rust/kernel/sync/mutex.rs
new file mode 100644
index 000000000000..c51ae5e3a8a0
--- /dev/null
+++ b/rust/kernel/sync/mutex.rs
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A kernel mutex.
+//!
+//! This module allows Rust code to use the kernel's [`struct mutex`].
+
+use super::{Guard, Lock, LockClassKey, LockFactory, LockIniter, WriteLock};
+use crate::{bindings, str::CStr, Opaque};
+use core::{cell::UnsafeCell, marker::PhantomPinned, pin::Pin};
+
+/// Safely initialises a [`Mutex`] with the given name, generating a new lock class.
+#[macro_export]
+macro_rules! mutex_init {
+ ($mutex:expr, $name:literal) => {
+ $crate::init_with_lockdep!($mutex, $name)
+ };
+}
+
+/// Exposes the kernel's [`struct mutex`]. When multiple threads attempt to lock the same mutex,
+/// only one at a time is allowed to progress, the others will block (sleep) until the mutex is
+/// unlocked, at which point another thread will be allowed to wake up and make progress.
+///
+/// A [`Mutex`] must first be initialised with a call to [`Mutex::init_lock`] before it can be
+/// used. The [`mutex_init`] macro is provided to automatically assign a new lock class to a mutex
+/// instance.
+///
+/// Since it may block, [`Mutex`] needs to be used with care in atomic contexts.
+///
+/// [`struct mutex`]: ../../../include/linux/mutex.h
+pub struct Mutex<T: ?Sized> {
+ /// The kernel `struct mutex` object.
+ mutex: Opaque<bindings::mutex>,
+
+ /// A mutex needs to be pinned because it contains a [`struct list_head`] that is
+ /// self-referential, so it cannot be safely moved once it is initialised.
+ _pin: PhantomPinned,
+
+ /// The data protected by the mutex.
+ data: UnsafeCell<T>,
+}
+
+// SAFETY: `Mutex` can be transferred across thread boundaries iff the data it protects can.
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
+
+// SAFETY: `Mutex` serialises the interior mutability it provides, so it is `Sync` as long as the
+// data it protects is `Send`.
+unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
+
+impl<T> Mutex<T> {
+ /// Constructs a new mutex.
+ ///
+ /// # Safety
+ ///
+ /// The caller must call [`Mutex::init_lock`] before using the mutex.
+ pub const unsafe fn new(t: T) -> Self {
+ Self {
+ mutex: Opaque::uninit(),
+ data: UnsafeCell::new(t),
+ _pin: PhantomPinned,
+ }
+ }
+}
+
+impl<T: ?Sized> Mutex<T> {
+ /// Locks the mutex and gives the caller access to the data protected by it. Only one thread at
+ /// a time is allowed to access the protected data.
+ pub fn lock(&self) -> Guard<'_, Self> {
+ let ctx = self.lock_noguard();
+ // SAFETY: The mutex was just acquired.
+ unsafe { Guard::new(self, ctx) }
+ }
+}
+
+impl<T> LockFactory for Mutex<T> {
+ type LockedType<U> = Mutex<U>;
+
+ unsafe fn new_lock<U>(data: U) -> Mutex<U> {
+ // SAFETY: The safety requirements of `new_lock` also require that `init_lock` be called.
+ unsafe { Mutex::new(data) }
+ }
+}
+
+impl<T> LockIniter for Mutex<T> {
+ fn init_lock(self: Pin<&mut Self>, name: &'static CStr, key: &'static LockClassKey) {
+ unsafe { bindings::__mutex_init(self.mutex.get(), name.as_char_ptr(), key.get()) };
+ }
+}
+
+pub struct EmptyGuardContext;
+
+// SAFETY: The underlying kernel `struct mutex` object ensures mutual exclusion.
+unsafe impl<T: ?Sized> Lock for Mutex<T> {
+ type Inner = T;
+ type GuardContext = EmptyGuardContext;
+
+ fn lock_noguard(&self) -> EmptyGuardContext {
+ // SAFETY: `mutex` points to valid memory.
+ unsafe { bindings::mutex_lock(self.mutex.get()) };
+ EmptyGuardContext
+ }
+
+ unsafe fn unlock(&self, _: &mut EmptyGuardContext) {
+ // SAFETY: The safety requirements of the function ensure that the mutex is owned by the
+ // caller.
+ unsafe { bindings::mutex_unlock(self.mutex.get()) };
+ }
+
+ fn locked_data(&self) -> &UnsafeCell<T> {
+ &self.data
+ }
+}
+
+/// A revocable mutex.
+///
+/// That is, a mutex to which access can be revoked at runtime. It is a specialisation of the more
+/// generic [`super::revocable::Revocable`].
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::sync::RevocableMutex;
+/// # use kernel::revocable_init;
+/// # use core::pin::Pin;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn read_sum(v: &RevocableMutex<Example>) -> Option<u32> {
+/// let guard = v.try_write()?;
+/// Some(guard.a + guard.b)
+/// }
+///
+/// // SAFETY: We call `revocable_init` immediately below.
+/// let mut v = unsafe { RevocableMutex::new(Example { a: 10, b: 20 }) };
+/// // SAFETY: We never move out of `v`.
+/// let pinned = unsafe { Pin::new_unchecked(&mut v) };
+/// revocable_init!(pinned, "example::v");
+/// assert_eq!(read_sum(&v), Some(30));
+/// v.revoke();
+/// assert_eq!(read_sum(&v), None);
+/// ```
+pub type RevocableMutex<T> = super::revocable::Revocable<Mutex<()>, T>;
+
+/// A guard for a revocable mutex.
+pub type RevocableMutexGuard<'a, T, I = WriteLock> =
+ super::revocable::RevocableGuard<'a, Mutex<()>, T, I>;
diff --git a/rust/kernel/sync/nowait.rs b/rust/kernel/sync/nowait.rs
new file mode 100644
index 000000000000..09852d71aeb2
--- /dev/null
+++ b/rust/kernel/sync/nowait.rs
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A lock that never waits.
+
+use core::cell::UnsafeCell;
+use core::sync::atomic::{AtomicU8, Ordering};
+
+const LOCKED: u8 = 1;
+const CONTENDED: u8 = 2;
+
+/// A lock that only offers a [`try_lock`](NoWaitLock::try_lock) method.
+///
+/// That is, on contention it doesn't offer a way for the caller to block waiting for the current
+/// owner to release the lock. This is useful for best-effort kind of scenarios where waiting is
+/// never needed: in such cases, users don't need a full-featured mutex or spinlock.
+///
+/// When the lock is released via call to [`NoWaitLockGuard::unlock`], it indicates to the caller
+/// whether there was contention (i.e., if another thread tried and failed to acquire this lock).
+/// If the return value is `false`, there was definitely no contention but if it is `true`, it's
+/// possible that the contention was when attempting to acquire the lock.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::sync::NoWaitLock;
+///
+/// #[derive(PartialEq)]
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// let x = NoWaitLock::new(Example { a: 10, b: 20 });
+///
+/// // Modifying the protected value.
+/// {
+/// let mut guard = x.try_lock().unwrap();
+/// assert_eq!(guard.a, 10);
+/// assert_eq!(guard.b, 20);
+/// guard.a += 20;
+/// guard.b += 20;
+/// assert_eq!(guard.a, 30);
+/// assert_eq!(guard.b, 40);
+/// }
+///
+/// // Reading the protected value.
+/// {
+/// let guard = x.try_lock().unwrap();
+/// assert_eq!(guard.a, 30);
+/// assert_eq!(guard.b, 40);
+/// }
+///
+/// // Second acquire fails, but succeeds after the guard is dropped.
+/// {
+/// let guard = x.try_lock().unwrap();
+/// assert!(x.try_lock().is_none());
+///
+/// drop(guard);
+/// assert!(x.try_lock().is_some());
+/// }
+/// ```
+///
+/// The following examples use the [`NoWaitLockGuard::unlock`] to release the lock and check for
+/// contention.
+///
+/// ```
+/// use kernel::sync::NoWaitLock;
+///
+/// #[derive(PartialEq)]
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// let x = NoWaitLock::new(Example { a: 10, b: 20 });
+///
+/// // No contention when lock is released.
+/// let guard = x.try_lock().unwrap();
+/// assert_eq!(guard.unlock(), false);
+///
+/// // Contention detected.
+/// let guard = x.try_lock().unwrap();
+/// assert!(x.try_lock().is_none());
+/// assert_eq!(guard.unlock(), true);
+///
+/// // No contention again.
+/// let guard = x.try_lock().unwrap();
+/// assert_eq!(guard.a, 10);
+/// assert_eq!(guard.b, 20);
+/// assert_eq!(guard.unlock(), false);
+/// ```
+pub struct NoWaitLock<T: ?Sized> {
+ state: AtomicU8,
+ data: UnsafeCell<T>,
+}
+
+// SAFETY: `NoWaitLock` can be transferred across thread boundaries iff the data it protects can.
+unsafe impl<T: ?Sized + Send> Send for NoWaitLock<T> {}
+
+// SAFETY: `NoWaitLock` only allows a single thread at a time to access the interior mutability it
+// provides, so it is `Sync` as long as the data it protects is `Send`.
+unsafe impl<T: ?Sized + Send> Sync for NoWaitLock<T> {}
+
+impl<T> NoWaitLock<T> {
+ /// Creates a new instance of the no-wait lock.
+ pub fn new(data: T) -> Self {
+ Self {
+ state: AtomicU8::new(0),
+ data: UnsafeCell::new(data),
+ }
+ }
+}
+
+impl<T: ?Sized> NoWaitLock<T> {
+ /// Tries to acquire the lock.
+ ///
+ /// If no other thread/CPU currently owns the lock, it returns a guard that can be used to
+ /// access the protected data. Otherwise (i.e., the lock is already owned), it returns `None`.
+ pub fn try_lock(&self) -> Option<NoWaitLockGuard<'_, T>> {
+ // Fast path -- just set the LOCKED bit.
+ //
+ // Acquire ordering matches the release in `NoWaitLockGuard::drop` or
+ // `NoWaitLockGuard::unlock`.
+ if self.state.fetch_or(LOCKED, Ordering::Acquire) & LOCKED == 0 {
+ // INVARIANTS: The thread that manages to set the `LOCKED` bit becomes the owner.
+ return Some(NoWaitLockGuard { lock: self });
+ }
+
+ // Set the `CONTENDED` bit.
+ //
+ // If the `LOCKED` bit has since been reset, the lock was released and the caller becomes
+ // the owner of the lock. It will see the `CONTENDED` bit when it releases the lock even if
+ // there was no additional contention but this is allowed by the interface.
+ if self.state.fetch_or(CONTENDED | LOCKED, Ordering::Relaxed) & LOCKED == 0 {
+ // INVARIANTS: The thread that manages to set the `LOCKED` bit becomes the owner.
+ Some(NoWaitLockGuard { lock: self })
+ } else {
+ None
+ }
+ }
+}
+
+/// A guard for the holder of the no-wait lock.
+///
+/// # Invariants
+///
+/// Only the current owner can have an instance of [`NoWaitLockGuard`].
+pub struct NoWaitLockGuard<'a, T: ?Sized> {
+ lock: &'a NoWaitLock<T>,
+}
+
+impl<T: ?Sized> NoWaitLockGuard<'_, T> {
+ /// Unlocks the no-wait lock.
+ ///
+ /// The return value indicates whether there was contention while the lock was held, that is,
+ /// whether another thread tried (and failed) to acquire the lock.
+ pub fn unlock(self) -> bool {
+ // Matches the acquire in `NoWaitLock::try_lock`.
+ let contention = self.lock.state.swap(0, Ordering::Release) & CONTENDED != 0;
+ core::mem::forget(self);
+ contention
+ }
+}
+
+impl<T: ?Sized> core::ops::Deref for NoWaitLockGuard<'_, T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The type invariant guarantees that only the owner has an instance of the guard,
+ // so the owner is the only one that can call this function.
+ unsafe { &*self.lock.data.get() }
+ }
+}
+
+impl<T: ?Sized> core::ops::DerefMut for NoWaitLockGuard<'_, T> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ // SAFETY: The type invariant guarantees that only the owner has an instance of the guard,
+ // so the owner is the only one that can call this function.
+ unsafe { &mut *self.lock.data.get() }
+ }
+}
+
+impl<T: ?Sized> Drop for NoWaitLockGuard<'_, T> {
+ fn drop(&mut self) {
+ // Matches the acquire in `NoWaitLock::try_lock`.
+ self.lock.state.store(0, Ordering::Release);
+ }
+}
diff --git a/rust/kernel/sync/rcu.rs b/rust/kernel/sync/rcu.rs
new file mode 100644
index 000000000000..1a1c8ea49359
--- /dev/null
+++ b/rust/kernel/sync/rcu.rs
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! RCU support.
+//!
+//! C header: [`include/linux/rcupdate.h`](../../../../include/linux/rcupdate.h)
+
+use crate::bindings;
+use core::marker::PhantomData;
+
+/// Evidence that the RCU read side lock is held on the current thread/CPU.
+///
+/// The type is explicitly not `Send` because this property is per-thread/CPU.
+///
+/// # Invariants
+///
+/// The RCU read side lock is actually held while instances of this guard exist.
+pub struct Guard {
+ _not_send: PhantomData<*mut ()>,
+}
+
+impl Guard {
+ /// Acquires the RCU read side lock and returns a guard.
+ pub fn new() -> Self {
+ // SAFETY: An FFI call with no additional requirements.
+ unsafe { bindings::rcu_read_lock() };
+ // INVARIANT: The RCU read side lock was just acquired above.
+ Self {
+ _not_send: PhantomData,
+ }
+ }
+
+ /// Explicitly releases the RCU read side lock.
+ pub fn unlock(self) {}
+}
+
+impl Default for Guard {
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl Drop for Guard {
+ fn drop(&mut self) {
+ // SAFETY: By the type invariants, the rcu read side is locked, so it is ok to unlock it.
+ unsafe { bindings::rcu_read_unlock() };
+ }
+}
+
+/// Acquires the RCU read side lock.
+pub fn read_lock() -> Guard {
+ Guard::new()
+}
diff --git a/rust/kernel/sync/revocable.rs b/rust/kernel/sync/revocable.rs
new file mode 100644
index 000000000000..8fd7f07f2a65
--- /dev/null
+++ b/rust/kernel/sync/revocable.rs
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Synchronisation primitives where access to their contents can be revoked at runtime.
+
+use crate::{
+ str::CStr,
+ sync::{Guard, Lock, LockClassKey, LockFactory, LockInfo, NeedsLockClass, ReadLock, WriteLock},
+ True,
+};
+use core::{
+ mem::MaybeUninit,
+ ops::{Deref, DerefMut},
+ pin::Pin,
+};
+
+/// The state within the revocable synchronisation primitive.
+///
+/// We don't use simply `Option<T>` because we need to drop in-place because the contents are
+/// implicitly pinned.
+///
+/// # Invariants
+///
+/// The `is_available` field determines if `data` is initialised.
+pub struct Inner<T> {
+ is_available: bool,
+ data: MaybeUninit<T>,
+}
+
+impl<T> Inner<T> {
+ fn new(data: T) -> Self {
+ // INVARIANT: `data` is initialised and `is_available` is `true`, so the state matches.
+ Self {
+ is_available: true,
+ data: MaybeUninit::new(data),
+ }
+ }
+
+ fn drop_in_place(&mut self) {
+ if !self.is_available {
+ // Already dropped.
+ return;
+ }
+
+ // INVARIANT: `data` is being dropped and `is_available` is set to `false`, so the state
+ // matches.
+ self.is_available = false;
+
+ // SAFETY: By the type invariants, `data` is valid because `is_available` was true.
+ unsafe { self.data.assume_init_drop() };
+ }
+}
+
+impl<T> Drop for Inner<T> {
+ fn drop(&mut self) {
+ self.drop_in_place();
+ }
+}
+
+/// Revocable synchronisation primitive.
+///
+/// That is, it wraps synchronisation primitives so that access to their contents can be revoked at
+/// runtime, rendering them inacessible.
+///
+/// Once access is revoked and all concurrent users complete (i.e., all existing instances of
+/// [`RevocableGuard`] are dropped), the wrapped object is also dropped.
+///
+/// For better ergonomics, we advise the use of specialisations of this struct, for example,
+/// [`super::RevocableMutex`] and [`super::RevocableRwSemaphore`]. Callers that do not need to
+/// sleep while holding on to a guard should use [`crate::revocable::Revocable`] instead, which is
+/// more efficient as it uses RCU to keep objects alive.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::sync::{Mutex, Revocable};
+/// # use kernel::revocable_init;
+/// # use core::pin::Pin;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn add_two(v: &Revocable<Mutex<()>, Example>) -> Option<u32> {
+/// let mut guard = v.try_write()?;
+/// guard.a += 2;
+/// guard.b += 2;
+/// Some(guard.a + guard.b)
+/// }
+///
+/// // SAFETY: We call `revocable_init` immediately below.
+/// let mut v = unsafe { Revocable::<Mutex<()>, Example>::new(Example { a: 10, b: 20 }) };
+/// // SAFETY: We never move out of `v`.
+/// let pinned = unsafe { Pin::new_unchecked(&mut v) };
+/// revocable_init!(pinned, "example::v");
+/// assert_eq!(add_two(&v), Some(34));
+/// v.revoke();
+/// assert_eq!(add_two(&v), None);
+/// ```
+pub struct Revocable<F: LockFactory, T> {
+ inner: F::LockedType<Inner<T>>,
+}
+
+/// Safely initialises a [`Revocable`] instance with the given name, generating a new lock class.
+#[macro_export]
+macro_rules! revocable_init {
+ ($mutex:expr, $name:literal) => {
+ $crate::init_with_lockdep!($mutex, $name)
+ };
+}
+
+impl<F: LockFactory, T> Revocable<F, T> {
+ /// Creates a new revocable instance of the given lock.
+ ///
+ /// # Safety
+ ///
+ /// The caller must call [`Revocable::init`] before using the revocable synch primitive.
+ pub unsafe fn new(data: T) -> Self {
+ Self {
+ // SAFETY: The safety requirements of this function require that `Revocable::init`
+ // be called before the returned object can be used. Lock initialisation is called
+ // from `Revocable::init`.
+ inner: unsafe { F::new_lock(Inner::new(data)) },
+ }
+ }
+}
+
+impl<F: LockFactory, T> NeedsLockClass for Revocable<F, T>
+where
+ F::LockedType<Inner<T>>: NeedsLockClass,
+{
+ fn init(
+ self: Pin<&mut Self>,
+ name: &'static CStr,
+ key1: &'static LockClassKey,
+ key2: &'static LockClassKey,
+ ) {
+ // SAFETY: `inner` is pinned when `self` is.
+ let inner = unsafe { self.map_unchecked_mut(|r| &mut r.inner) };
+ inner.init(name, key1, key2);
+ }
+}
+
+impl<F: LockFactory, T> Revocable<F, T>
+where
+ F::LockedType<Inner<T>>: Lock<Inner = Inner<T>>,
+{
+ /// Revokes access to and drops the wrapped object.
+ ///
+ /// Revocation and dropping happen after ongoing accessors complete.
+ pub fn revoke(&self) {
+ self.lock().drop_in_place();
+ }
+
+ /// Tries to lock the \[revocable\] wrapped object in write (exclusive) mode.
+ ///
+ /// Returns `None` if the object has been revoked and is therefore no longer accessible.
+ ///
+ /// Returns a guard that gives access to the object otherwise; the object is guaranteed to
+ /// remain accessible while the guard is alive. Callers are allowed to sleep while holding on
+ /// to the returned guard.
+ pub fn try_write(&self) -> Option<RevocableGuard<'_, F, T, WriteLock>> {
+ let inner = self.lock();
+ if !inner.is_available {
+ return None;
+ }
+ Some(RevocableGuard::new(inner))
+ }
+
+ fn lock(&self) -> Guard<'_, F::LockedType<Inner<T>>> {
+ let ctx = self.inner.lock_noguard();
+ // SAFETY: The lock was acquired in the call above.
+ unsafe { Guard::new(&self.inner, ctx) }
+ }
+}
+
+impl<F: LockFactory, T> Revocable<F, T>
+where
+ F::LockedType<Inner<T>>: Lock<ReadLock, Inner = Inner<T>>,
+{
+ /// Tries to lock the \[revocable\] wrapped object in read (shared) mode.
+ ///
+ /// Returns `None` if the object has been revoked and is therefore no longer accessible.
+ ///
+ /// Returns a guard that gives access to the object otherwise; the object is guaranteed to
+ /// remain accessible while the guard is alive. Callers are allowed to sleep while holding on
+ /// to the returned guard.
+ pub fn try_read(&self) -> Option<RevocableGuard<'_, F, T, ReadLock>> {
+ let ctx = self.inner.lock_noguard();
+ // SAFETY: The lock was acquired in the call above.
+ let inner = unsafe { Guard::new(&self.inner, ctx) };
+ if !inner.is_available {
+ return None;
+ }
+ Some(RevocableGuard::new(inner))
+ }
+}
+
+/// A guard that allows access to a revocable object and keeps it alive.
+pub struct RevocableGuard<'a, F: LockFactory, T, I: LockInfo>
+where
+ F::LockedType<Inner<T>>: Lock<I, Inner = Inner<T>>,
+{
+ guard: Guard<'a, F::LockedType<Inner<T>>, I>,
+}
+
+impl<'a, F: LockFactory, T, I: LockInfo> RevocableGuard<'a, F, T, I>
+where
+ F::LockedType<Inner<T>>: Lock<I, Inner = Inner<T>>,
+{
+ fn new(guard: Guard<'a, F::LockedType<Inner<T>>, I>) -> Self {
+ Self { guard }
+ }
+}
+
+impl<F: LockFactory, T, I: LockInfo<Writable = True>> RevocableGuard<'_, F, T, I>
+where
+ F::LockedType<Inner<T>>: Lock<I, Inner = Inner<T>>,
+{
+ /// Returns a pinned mutable reference to the wrapped object.
+ pub fn as_pinned_mut(&mut self) -> Pin<&mut T> {
+ // SAFETY: Revocable mutexes must be pinned, so we choose to always project the data as
+ // pinned as well (i.e., we guarantee we never move it).
+ unsafe { Pin::new_unchecked(&mut *self) }
+ }
+}
+
+impl<F: LockFactory, T, I: LockInfo> Deref for RevocableGuard<'_, F, T, I>
+where
+ F::LockedType<Inner<T>>: Lock<I, Inner = Inner<T>>,
+{
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ unsafe { &*self.guard.data.as_ptr() }
+ }
+}
+
+impl<F: LockFactory, T, I: LockInfo<Writable = True>> DerefMut for RevocableGuard<'_, F, T, I>
+where
+ F::LockedType<Inner<T>>: Lock<I, Inner = Inner<T>>,
+{
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ unsafe { &mut *self.guard.data.as_mut_ptr() }
+ }
+}
diff --git a/rust/kernel/sync/rwsem.rs b/rust/kernel/sync/rwsem.rs
new file mode 100644
index 000000000000..6556d7d30d36
--- /dev/null
+++ b/rust/kernel/sync/rwsem.rs
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A kernel read/write mutex.
+//!
+//! This module allows Rust code to use the kernel's [`struct rw_semaphore`].
+//!
+//! C header: [`include/linux/rwsem.h`](../../../../include/linux/rwsem.h)
+
+use super::{
+ mutex::EmptyGuardContext, Guard, Lock, LockClassKey, LockFactory, LockIniter, ReadLock,
+ WriteLock,
+};
+use crate::{bindings, str::CStr, Opaque};
+use core::{cell::UnsafeCell, marker::PhantomPinned, pin::Pin};
+
+/// Safely initialises a [`RwSemaphore`] with the given name, generating a new lock class.
+#[macro_export]
+macro_rules! rwsemaphore_init {
+ ($rwsem:expr, $name:literal) => {
+ $crate::init_with_lockdep!($rwsem, $name)
+ };
+}
+
+/// Exposes the kernel's [`struct rw_semaphore`].
+///
+/// It's a read/write mutex. That is, it allows multiple readers to acquire it concurrently, but
+/// only one writer at a time. On contention, waiters sleep.
+///
+/// A [`RwSemaphore`] must first be initialised with a call to [`RwSemaphore::init_lock`] before it
+/// can be used. The [`rwsemaphore_init`] macro is provided to automatically assign a new lock
+/// class to an [`RwSemaphore`] instance.
+///
+/// Since it may block, [`RwSemaphore`] needs to be used with care in atomic contexts.
+///
+/// [`struct rw_semaphore`]: ../../../include/linux/rwsem.h
+pub struct RwSemaphore<T: ?Sized> {
+ /// The kernel `struct rw_semaphore` object.
+ rwsem: Opaque<bindings::rw_semaphore>,
+
+ /// An rwsem needs to be pinned because it contains a [`struct list_head`] that is
+ /// self-referential, so it cannot be safely moved once it is initialised.
+ _pin: PhantomPinned,
+
+ /// The data protected by the rwsem.
+ data: UnsafeCell<T>,
+}
+
+// SAFETY: `RwSemaphore` can be transferred across thread boundaries iff the data it protects can.
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl<T: ?Sized + Send> Send for RwSemaphore<T> {}
+
+// SAFETY: `RwSemaphore` requires that the protected type be `Sync` for it to be `Sync` as well
+// because the read mode allows multiple threads to access the protected data concurrently. It
+// requires `Send` because the write lock allows a `&mut T` to be accessible from an arbitrary
+// thread.
+unsafe impl<T: ?Sized + Send + Sync> Sync for RwSemaphore<T> {}
+
+impl<T> RwSemaphore<T> {
+ /// Constructs a new rw semaphore.
+ ///
+ /// # Safety
+ ///
+ /// The caller must call [`RwSemaphore::init_lock`] before using the rw semaphore.
+ pub unsafe fn new(t: T) -> Self {
+ Self {
+ rwsem: Opaque::uninit(),
+ data: UnsafeCell::new(t),
+ _pin: PhantomPinned,
+ }
+ }
+}
+
+impl<T: ?Sized> RwSemaphore<T> {
+ /// Locks the rw semaphore in write (exclusive) mode and gives the caller access to the data
+ /// protected by it. Only one thread at a time is allowed to access the protected data.
+ pub fn write(&self) -> Guard<'_, Self> {
+ let ctx = <Self as Lock>::lock_noguard(self);
+ // SAFETY: The rw semaphore was just acquired in write mode.
+ unsafe { Guard::new(self, ctx) }
+ }
+
+ /// Locks the rw semaphore in read (shared) mode and gives the caller access to the data
+ /// protected by it. Only one thread at a time is allowed to access the protected data.
+ pub fn read(&self) -> Guard<'_, Self, ReadLock> {
+ let ctx = <Self as Lock<ReadLock>>::lock_noguard(self);
+ // SAFETY: The rw semaphore was just acquired in read mode.
+ unsafe { Guard::new(self, ctx) }
+ }
+}
+
+impl<T> LockFactory for RwSemaphore<T> {
+ type LockedType<U> = RwSemaphore<U>;
+
+ unsafe fn new_lock<U>(data: U) -> RwSemaphore<U> {
+ // SAFETY: The safety requirements of `new_lock` also require that `init_lock` be called.
+ unsafe { RwSemaphore::new(data) }
+ }
+}
+
+impl<T> LockIniter for RwSemaphore<T> {
+ fn init_lock(self: Pin<&mut Self>, name: &'static CStr, key: &'static LockClassKey) {
+ unsafe { bindings::__init_rwsem(self.rwsem.get(), name.as_char_ptr(), key.get()) };
+ }
+}
+
+// SAFETY: The underlying kernel `struct rw_semaphore` object ensures mutual exclusion because it's
+// acquired in write mode.
+unsafe impl<T: ?Sized> Lock for RwSemaphore<T> {
+ type Inner = T;
+ type GuardContext = EmptyGuardContext;
+
+ fn lock_noguard(&self) -> EmptyGuardContext {
+ // SAFETY: `rwsem` points to valid memory.
+ unsafe { bindings::down_write(self.rwsem.get()) };
+ EmptyGuardContext
+ }
+
+ unsafe fn unlock(&self, _: &mut EmptyGuardContext) {
+ // SAFETY: The safety requirements of the function ensure that the rw semaphore is owned by
+ // the caller.
+ unsafe { bindings::up_write(self.rwsem.get()) };
+ }
+
+ fn locked_data(&self) -> &UnsafeCell<T> {
+ &self.data
+ }
+}
+
+// SAFETY: The underlying kernel `struct rw_semaphore` object ensures that only shared references
+// are accessible from other threads because it's acquired in read mode.
+unsafe impl<T: ?Sized> Lock<ReadLock> for RwSemaphore<T> {
+ type Inner = T;
+ type GuardContext = EmptyGuardContext;
+
+ fn lock_noguard(&self) -> EmptyGuardContext {
+ // SAFETY: `rwsem` points to valid memory.
+ unsafe { bindings::down_read(self.rwsem.get()) };
+ EmptyGuardContext
+ }
+
+ unsafe fn unlock(&self, _: &mut EmptyGuardContext) {
+ // SAFETY: The safety requirements of the function ensure that the rw semaphore is owned by
+ // the caller.
+ unsafe { bindings::up_read(self.rwsem.get()) };
+ }
+
+ fn locked_data(&self) -> &UnsafeCell<T> {
+ &self.data
+ }
+}
+
+/// A revocable rw semaphore.
+///
+/// That is, a read/write semaphore to which access can be revoked at runtime. It is a
+/// specialisation of the more generic [`super::revocable::Revocable`].
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::sync::RevocableRwSemaphore;
+/// # use kernel::revocable_init;
+/// # use core::pin::Pin;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// fn read_sum(v: &RevocableRwSemaphore<Example>) -> Option<u32> {
+/// let guard = v.try_read()?;
+/// Some(guard.a + guard.b)
+/// }
+///
+/// fn add_two(v: &RevocableRwSemaphore<Example>) -> Option<u32> {
+/// let mut guard = v.try_write()?;
+/// guard.a += 2;
+/// guard.b += 2;
+/// Some(guard.a + guard.b)
+/// }
+///
+/// // SAFETY: We call `revocable_init` immediately below.
+/// let mut v = unsafe { RevocableRwSemaphore::new(Example { a: 10, b: 20 }) };
+/// // SAFETY: We never move out of `v`.
+/// let pinned = unsafe { Pin::new_unchecked(&mut v) };
+/// revocable_init!(pinned, "example::v");
+/// assert_eq!(read_sum(&v), Some(30));
+/// assert_eq!(add_two(&v), Some(34));
+/// v.revoke();
+/// assert_eq!(read_sum(&v), None);
+/// assert_eq!(add_two(&v), None);
+/// ```
+pub type RevocableRwSemaphore<T> = super::revocable::Revocable<RwSemaphore<()>, T>;
+
+/// A guard for a revocable rw semaphore..
+pub type RevocableRwSemaphoreGuard<'a, T, I = WriteLock> =
+ super::revocable::RevocableGuard<'a, RwSemaphore<()>, T, I>;
diff --git a/rust/kernel/sync/seqlock.rs b/rust/kernel/sync/seqlock.rs
new file mode 100644
index 000000000000..5014e70621f6
--- /dev/null
+++ b/rust/kernel/sync/seqlock.rs
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A kernel sequential lock (seqlock).
+//!
+//! This module allows Rust code to use the sequential locks based on the kernel's `seqcount_t` and
+//! any locks implementing the [`LockFactory`] trait.
+//!
+//! See <https://www.kernel.org/doc/Documentation/locking/seqlock.rst>.
+
+use super::{Guard, Lock, LockClassKey, LockFactory, LockIniter, NeedsLockClass, ReadLock};
+use crate::{bindings, str::CStr, Opaque};
+use core::{cell::UnsafeCell, marker::PhantomPinned, ops::Deref, pin::Pin};
+
+/// Exposes sequential locks backed by the kernel's `seqcount_t`.
+///
+/// The write-side critical section is protected by a lock implementing the [`LockFactory`] trait.
+///
+/// # Examples
+///
+/// ```
+/// use core::sync::atomic::{AtomicU32, Ordering};
+/// use kernel::sync::{SeqLock, SpinLock};
+///
+/// struct Example {
+/// a: AtomicU32,
+/// b: AtomicU32,
+/// }
+///
+/// fn get_sum(v: &SeqLock<SpinLock<Example>>) -> u32 {
+/// // Use `access` to access the fields of `Example`.
+/// v.access(|e| e.a.load(Ordering::Relaxed) + e.b.load(Ordering::Relaxed))
+/// }
+///
+/// fn get_sum_with_guard(v: &SeqLock<SpinLock<Example>>) -> u32 {
+/// // Use `read` and `need_retry` in a loop to access the fields of `Example`.
+/// loop {
+/// let guard = v.read();
+/// let sum = guard.a.load(Ordering::Relaxed) + guard.b.load(Ordering::Relaxed);
+/// if !guard.need_retry() {
+/// break sum;
+/// }
+/// }
+/// }
+///
+/// fn inc_each(v: &SeqLock<SpinLock<Example>>) {
+/// // Use a write-side guard to access the fields of `Example`.
+/// let guard = v.write();
+/// let a = guard.a.load(Ordering::Relaxed);
+/// guard.a.store(a + 1, Ordering::Relaxed);
+/// let b = guard.b.load(Ordering::Relaxed);
+/// guard.b.store(b + 1, Ordering::Relaxed);
+/// }
+/// ```
+pub struct SeqLock<L: Lock + ?Sized> {
+ _p: PhantomPinned,
+ count: Opaque<bindings::seqcount>,
+ write_lock: L,
+}
+
+// SAFETY: `SeqLock` can be transferred across thread boundaries iff the data it protects and the
+// underlying lock can.
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl<L: Lock + Send> Send for SeqLock<L> where L::Inner: Send {}
+
+// SAFETY: `SeqLock` allows concurrent access to the data it protects by both readers and writers,
+// so it requires that the data it protects be `Sync`, as well as the underlying lock.
+unsafe impl<L: Lock + Sync> Sync for SeqLock<L> where L::Inner: Sync {}
+
+impl<L: Lock> SeqLock<L> {
+ /// Constructs a new instance of [`SeqLock`].
+ ///
+ /// # Safety
+ ///
+ /// The caller must call [`SeqLock::init`] before using the seqlock.
+ pub unsafe fn new(data: L::Inner) -> Self
+ where
+ L: LockFactory<LockedType<L::Inner> = L>,
+ L::Inner: Sized,
+ {
+ Self {
+ _p: PhantomPinned,
+ count: Opaque::uninit(),
+ // SAFETY: `L::init_lock` is called from `SeqLock::init`, which is required to be
+ // called by the function's safety requirements.
+ write_lock: unsafe { L::new_lock(data) },
+ }
+ }
+}
+
+impl<L: Lock + ?Sized> SeqLock<L> {
+ /// Accesses the protected data in read mode.
+ ///
+ /// Readers and writers are allowed to run concurrently, so callers must check if they need to
+ /// refetch the values before they are used (e.g., because a writer changed them concurrently,
+ /// rendering them potentially inconsistent). The check is performed via calls to
+ /// [`SeqLockReadGuard::need_retry`].
+ pub fn read(&self) -> SeqLockReadGuard<'_, L> {
+ SeqLockReadGuard {
+ lock: self,
+ // SAFETY: `count` contains valid memory.
+ start_count: unsafe { bindings::read_seqcount_begin(self.count.get()) },
+ }
+ }
+
+ /// Accesses the protected data in read mode.
+ ///
+ /// The provided closure is called repeatedly if it may have accessed inconsistent data (e.g.,
+ /// because a concurrent writer modified it). This is a wrapper around [`SeqLock::read`] and
+ /// [`SeqLockReadGuard::need_retry`] in a loop.
+ pub fn access<F: Fn(&L::Inner) -> R, R>(&self, cb: F) -> R {
+ loop {
+ let guard = self.read();
+ let ret = cb(&guard);
+ if !guard.need_retry() {
+ return ret;
+ }
+ }
+ }
+
+ /// Locks the underlying lock and returns a guard that allows access to the protected data.
+ ///
+ /// The guard is not mutable though because readers are still allowed to concurrently access
+ /// the data. The protected data structure needs to provide interior mutability itself (e.g.,
+ /// via atomic types) for the individual fields that can be mutated.
+ pub fn write(&self) -> Guard<'_, Self, ReadLock> {
+ let ctx = self.lock_noguard();
+ // SAFETY: The seqlock was just acquired.
+ unsafe { Guard::new(self, ctx) }
+ }
+}
+
+impl<L: LockIniter + Lock + ?Sized> NeedsLockClass for SeqLock<L> {
+ fn init(
+ mut self: Pin<&mut Self>,
+ name: &'static CStr,
+ key1: &'static LockClassKey,
+ key2: &'static LockClassKey,
+ ) {
+ // SAFETY: `write_lock` is pinned when `self` is.
+ let pinned = unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.write_lock) };
+ pinned.init_lock(name, key1);
+ // SAFETY: `key2` is valid as it has a static lifetime.
+ unsafe { bindings::__seqcount_init(self.count.get(), name.as_char_ptr(), key2.get()) };
+ }
+}
+
+// SAFETY: The underlying lock ensures mutual exclusion.
+unsafe impl<L: Lock + ?Sized> Lock<ReadLock> for SeqLock<L> {
+ type Inner = L::Inner;
+ type GuardContext = L::GuardContext;
+
+ fn lock_noguard(&self) -> L::GuardContext {
+ let ctx = self.write_lock.lock_noguard();
+ // SAFETY: `count` contains valid memory.
+ unsafe { bindings::write_seqcount_begin(self.count.get()) };
+ ctx
+ }
+
+ fn relock(&self, ctx: &mut L::GuardContext) {
+ self.write_lock.relock(ctx);
+ // SAFETY: `count` contains valid memory.
+ unsafe { bindings::write_seqcount_begin(self.count.get()) };
+ }
+
+ unsafe fn unlock(&self, ctx: &mut L::GuardContext) {
+ // SAFETY: The safety requirements of the function ensure that lock is owned by the caller.
+ unsafe { bindings::write_seqcount_end(self.count.get()) };
+ // SAFETY: The safety requirements of the function ensure that lock is owned by the caller.
+ unsafe { self.write_lock.unlock(ctx) };
+ }
+
+ fn locked_data(&self) -> &UnsafeCell<L::Inner> {
+ self.write_lock.locked_data()
+ }
+}
+
+/// Allows read-side access to data protected by a sequential lock.
+pub struct SeqLockReadGuard<'a, L: Lock + ?Sized> {
+ lock: &'a SeqLock<L>,
+ start_count: u32,
+}
+
+impl<L: Lock + ?Sized> SeqLockReadGuard<'_, L> {
+ /// Determine if the callers needs to retry reading values.
+ ///
+ /// It returns `true` when a concurrent writer ran between the guard being created and
+ /// [`Self::need_retry`] being called.
+ pub fn need_retry(&self) -> bool {
+ // SAFETY: `count` is valid because the guard guarantees that the lock remains alive.
+ unsafe { bindings::read_seqcount_retry(self.lock.count.get(), self.start_count) != 0 }
+ }
+}
+
+impl<L: Lock + ?Sized> Deref for SeqLockReadGuard<'_, L> {
+ type Target = L::Inner;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: We only ever allow shared access to the protected data.
+ unsafe { &*self.lock.locked_data().get() }
+ }
+}
diff --git a/rust/kernel/sync/smutex.rs b/rust/kernel/sync/smutex.rs
new file mode 100644
index 000000000000..e19dd966640a
--- /dev/null
+++ b/rust/kernel/sync/smutex.rs
@@ -0,0 +1,290 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A simple mutex implementation.
+//!
+//! Differently from [`super::Mutex`], this implementation does not require pinning, so the
+//! ergonomics are much improved, though the implementation is not as feature-rich as the C-based
+//! one. The main advantage is that it doesn't impose unsafe blocks on callers.
+//!
+//! The mutex is made up of 2 words in addition to the data it protects. The first one is accessed
+//! concurrently by threads trying to acquire and release the mutex, it contains a "stack" of
+//! waiters and a "locked" bit; the second one is only accessible by the thread holding the mutex,
+//! it contains a queue of waiters. Waiters are moved from the stack to the queue when the mutex is
+//! next unlocked while the stack is non-empty and the queue is empty. A single waiter is popped
+//! from the wait queue when the owner of the mutex unlocks it.
+//!
+//! The initial state of the mutex is `<locked=0, stack=[], queue=[]>`, meaning that it isn't
+//! locked and both the waiter stack and queue are empty.
+//!
+//! A lock operation transitions the mutex to state `<locked=1, stack=[], queue=[]>`.
+//!
+//! An unlock operation transitions the mutex back to the initial state, however, an attempt to
+//! lock the mutex while it's already locked results in a waiter being created (on the stack) and
+//! pushed onto the stack, so the state is `<locked=1, stack=[W1], queue=[]>`.
+//!
+//! Another thread trying to lock the mutex results in another waiter being pushed onto the stack,
+//! so the state becomes `<locked=1, stack=[W2, W1], queue=[]>`.
+//!
+//! In such states (queue is empty but stack is non-empty), the unlock operation is performed in
+//! three steps:
+//! 1. The stack is popped (but the mutex remains locked), so the state is:
+//! `<locked=1, stack=[], queue=[]>`
+//! 2. The stack is turned into a queue by reversing it, so the state is:
+//! `<locked=1, stack=[], queue=[W1, W2]>
+//! 3. Finally, the lock is released, and the first waiter is awakened, so the state is:
+//! `<locked=0, stack=[], queue=[W2]>`
+//!
+//! The mutex remains accessible to any threads attempting to lock it in any of the intermediate
+//! states above. For example, while it is locked, other threads may add waiters to the stack
+//! (which is ok because we want to release the ones on the queue first); another example is that
+//! another thread may acquire the mutex before waiter W1 in the example above, this makes the
+//! mutex unfair but this is desirable because the thread is running already and may in fact
+//! release the lock before W1 manages to get scheduled -- it also mitigates the lock convoy
+//! problem when the releasing thread wants to immediately acquire the lock again: it will be
+//! allowed to do so (as long as W1 doesn't get to it first).
+//!
+//! When the waiter queue is non-empty, unlocking the mutex always results in the first waiter being
+//! popped form the queue and awakened.
+
+use super::{mutex::EmptyGuardContext, Guard, Lock, LockClassKey, LockFactory, LockIniter};
+use crate::{bindings, str::CStr, Opaque};
+use core::sync::atomic::{AtomicUsize, Ordering};
+use core::{cell::UnsafeCell, pin::Pin};
+
+/// The value that is OR'd into the [`Mutex::waiter_stack`] when the mutex is locked.
+const LOCKED: usize = 1;
+
+/// A simple mutex.
+///
+/// This is mutual-exclusion primitive. It guarantees that only one thread at a time may access the
+/// data it protects. When multiple threads attempt to lock the same mutex, only one at a time is
+/// allowed to progress, the others will block (sleep) until the mutex is unlocked, at which point
+/// another thread will be allowed to wake up and make progress.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::{Result, sync::Ref, sync::smutex::Mutex};
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// static EXAMPLE: Mutex<Example> = Mutex::new(Example { a: 10, b: 20 });
+///
+/// fn inc_a(example: &Mutex<Example>) {
+/// let mut guard = example.lock();
+/// guard.a += 1;
+/// }
+///
+/// fn sum(example: &Mutex<Example>) -> u32 {
+/// let guard = example.lock();
+/// guard.a + guard.b
+/// }
+///
+/// fn try_new(a: u32, b: u32) -> Result<Ref<Mutex<Example>>> {
+/// Ref::try_new(Mutex::new(Example { a, b }))
+/// }
+///
+/// assert_eq!(EXAMPLE.lock().a, 10);
+/// assert_eq!(sum(&EXAMPLE), 30);
+///
+/// inc_a(&EXAMPLE);
+///
+/// assert_eq!(EXAMPLE.lock().a, 11);
+/// assert_eq!(sum(&EXAMPLE), 31);
+///
+/// # try_new(42, 43);
+/// ```
+pub struct Mutex<T: ?Sized> {
+ /// A stack of waiters.
+ ///
+ /// It is accessed atomically by threads lock/unlocking the mutex. Additionally, the
+ /// least-significant bit is used to indicate whether the mutex is locked or not.
+ waiter_stack: AtomicUsize,
+
+ /// A queue of waiters.
+ ///
+ /// This is only accessible to the holder of the mutex. When the owner of the mutex is
+ /// unlocking it, it will move waiters from the stack to the queue when the queue is empty and
+ /// the stack non-empty.
+ waiter_queue: UnsafeCell<*mut Waiter>,
+
+ /// The data protected by the mutex.
+ data: UnsafeCell<T>,
+}
+
+// SAFETY: `Mutex` can be transferred across thread boundaries iff the data it protects can.
+#[allow(clippy::non_send_fields_in_send_ty)]
+unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
+
+// SAFETY: `Mutex` serialises the interior mutability it provides, so it is `Sync` as long as the
+// data it protects is `Send`.
+unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
+
+impl<T> Mutex<T> {
+ /// Creates a new instance of the mutex.
+ pub const fn new(data: T) -> Self {
+ Self {
+ waiter_stack: AtomicUsize::new(0),
+ waiter_queue: UnsafeCell::new(core::ptr::null_mut()),
+ data: UnsafeCell::new(data),
+ }
+ }
+}
+
+impl<T: ?Sized> Mutex<T> {
+ /// Locks the mutex and gives the caller access to the data protected by it. Only one thread at
+ /// a time is allowed to access the protected data.
+ pub fn lock(&self) -> Guard<'_, Self> {
+ let ctx = self.lock_noguard();
+ // SAFETY: The mutex was just acquired.
+ unsafe { Guard::new(self, ctx) }
+ }
+}
+
+impl<T> LockFactory for Mutex<T> {
+ type LockedType<U> = Mutex<U>;
+
+ unsafe fn new_lock<U>(data: U) -> Mutex<U> {
+ Mutex::new(data)
+ }
+}
+
+impl<T> LockIniter for Mutex<T> {
+ fn init_lock(self: Pin<&mut Self>, _name: &'static CStr, _key: &'static LockClassKey) {}
+}
+
+// SAFETY: The mutex implementation ensures mutual exclusion.
+unsafe impl<T: ?Sized> Lock for Mutex<T> {
+ type Inner = T;
+ type GuardContext = EmptyGuardContext;
+
+ fn lock_noguard(&self) -> EmptyGuardContext {
+ loop {
+ // Try the fast path: the caller owns the mutex if we manage to set the `LOCKED` bit.
+ //
+ // The `acquire` order matches with one of the `release` ones in `unlock`.
+ if self.waiter_stack.fetch_or(LOCKED, Ordering::Acquire) & LOCKED == 0 {
+ return EmptyGuardContext;
+ }
+
+ // Slow path: we'll likely need to wait, so initialise a local waiter struct.
+ let mut waiter = Waiter {
+ completion: Opaque::uninit(),
+ next: core::ptr::null_mut(),
+ };
+
+ // SAFETY: The completion object was just allocated on the stack and is valid for
+ // writes.
+ unsafe { bindings::init_completion(waiter.completion.get()) };
+
+ // Try to enqueue the waiter by pushing into onto the waiter stack. We want to do it
+ // only while the mutex is locked by another thread.
+ loop {
+ // We use relaxed here because we're just reading the value we'll CAS later (which
+ // has a stronger ordering on success).
+ let mut v = self.waiter_stack.load(Ordering::Relaxed);
+ if v & LOCKED == 0 {
+ // The mutex was released by another thread, so try to acquire it.
+ //
+ // The `acquire` order matches with one of the `release` ones in `unlock`.
+ v = self.waiter_stack.fetch_or(LOCKED, Ordering::Acquire);
+ if v & LOCKED == 0 {
+ return EmptyGuardContext;
+ }
+ }
+
+ waiter.next = (v & !LOCKED) as _;
+
+ // The `release` order matches with `acquire` in `unlock` when the stack is swapped
+ // out. We use release order here to ensure that the other thread can see our
+ // waiter fully initialised.
+ if self
+ .waiter_stack
+ .compare_exchange(
+ v,
+ (&mut waiter as *mut _ as usize) | LOCKED,
+ Ordering::Release,
+ Ordering::Relaxed,
+ )
+ .is_ok()
+ {
+ break;
+ }
+ }
+
+ // Wait for the owner to lock to wake this thread up.
+ //
+ // SAFETY: Completion object was previously initialised with `init_completion` and
+ // remains valid.
+ unsafe { bindings::wait_for_completion(waiter.completion.get()) };
+ }
+ }
+
+ unsafe fn unlock(&self, _: &mut EmptyGuardContext) {
+ // SAFETY: The caller owns the mutex, so it is safe to manipulate the local wait queue.
+ let mut waiter = unsafe { *self.waiter_queue.get() };
+ loop {
+ // If we have a non-empty local queue of waiters, pop the first one, release the mutex,
+ // and wake it up (the popped waiter).
+ if !waiter.is_null() {
+ // SAFETY: The caller owns the mutex, so it is safe to manipulate the local wait
+ // queue.
+ unsafe { *self.waiter_queue.get() = (*waiter).next };
+
+ // The `release` order matches with one of the `acquire` ones in `lock_noguard`.
+ self.waiter_stack.fetch_and(!LOCKED, Ordering::Release);
+
+ // Wake up the first waiter.
+ //
+ // SAFETY: The completion object was initialised before being added to the wait
+ // stack and is only removed above, when called completed. So it is safe for
+ // writes.
+ unsafe { bindings::complete_all((*waiter).completion.get()) };
+ return;
+ }
+
+ // Try the fast path when there are no local waiters.
+ //
+ // The `release` order matches with one of the `acquire` ones in `lock_noguard`.
+ if self
+ .waiter_stack
+ .compare_exchange(LOCKED, 0, Ordering::Release, Ordering::Relaxed)
+ .is_ok()
+ {
+ return;
+ }
+
+ // We don't have a local queue, so pull the whole stack off, reverse it, and use it as a
+ // local queue. Since we're manipulating this queue, we need to keep ownership of the
+ // mutex.
+ //
+ // The `acquire` order matches with the `release` one in `lock_noguard` where a waiter
+ // is pushed onto the stack. It ensures that we see the fully-initialised waiter.
+ let mut stack =
+ (self.waiter_stack.swap(LOCKED, Ordering::Acquire) & !LOCKED) as *mut Waiter;
+ while !stack.is_null() {
+ // SAFETY: The caller still owns the mutex, so it is safe to manipulate the
+ // elements of the wait queue, which will soon become that wait queue.
+ let next = unsafe { (*stack).next };
+
+ // SAFETY: Same as above.
+ unsafe { (*stack).next = waiter };
+
+ waiter = stack;
+ stack = next;
+ }
+ }
+ }
+
+ fn locked_data(&self) -> &UnsafeCell<T> {
+ &self.data
+ }
+}
+
+struct Waiter {
+ completion: Opaque<bindings::completion>,
+ next: *mut Waiter,
+}
diff --git a/rust/kernel/sync/spinlock.rs b/rust/kernel/sync/spinlock.rs
new file mode 100644
index 000000000000..b326af4196bb
--- /dev/null
+++ b/rust/kernel/sync/spinlock.rs
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! A kernel spinlock.
+//!
+//! This module allows Rust code to use the kernel's [`struct spinlock`].
+//!
+//! See <https://www.kernel.org/doc/Documentation/locking/spinlocks.txt>.
+
+use super::{
+ mutex::EmptyGuardContext, Guard, Lock, LockClassKey, LockFactory, LockInfo, LockIniter,
+ WriteLock,
+};
+use crate::{bindings, str::CStr, Opaque, True};
+use core::{cell::UnsafeCell, marker::PhantomPinned, pin::Pin};
+
+/// Safely initialises a [`SpinLock`] with the given name, generating a new lock class.
+#[macro_export]
+macro_rules! spinlock_init {
+ ($spinlock:expr, $name:literal) => {
+ $crate::init_with_lockdep!($spinlock, $name)
+ };
+}
+
+/// Exposes the kernel's [`spinlock_t`]. When multiple CPUs attempt to lock the same spinlock, only
+/// one at a time is allowed to progress, the others will block (spinning) until the spinlock is
+/// unlocked, at which point another CPU will be allowed to make progress.
+///
+/// A [`SpinLock`] must first be initialised with a call to [`SpinLock::init_lock`] before it can be
+/// used. The [`spinlock_init`] macro is provided to automatically assign a new lock class to a
+/// spinlock instance.
+///
+/// There are two ways to acquire the lock:
+/// - [`SpinLock::lock`], which doesn't manage interrupt state, so it should be used in only two
+/// cases: (a) when the caller knows that interrupts are disabled, or (b) when callers never use
+/// it in atomic context (e.g., interrupt handlers), in which case it is ok for interrupts to be
+/// enabled.
+/// - [`SpinLock::lock_irqdisable`], which disables interrupts if they are enabled before
+/// acquiring the lock. When the lock is released, the interrupt state is automatically returned
+/// to its value before [`SpinLock::lock_irqdisable`] was called.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::sync::SpinLock;
+/// # use core::pin::Pin;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// // Function that acquires spinlock without changing interrupt state.
+/// fn lock_example(value: &SpinLock<Example>) {
+/// let mut guard = value.lock();
+/// guard.a = 10;
+/// guard.b = 20;
+/// }
+///
+/// // Function that acquires spinlock and disables interrupts while holding it.
+/// fn lock_irqdisable_example(value: &SpinLock<Example>) {
+/// let mut guard = value.lock_irqdisable();
+/// guard.a = 30;
+/// guard.b = 40;
+/// }
+///
+/// // Initialises a spinlock.
+/// // SAFETY: `spinlock_init` is called below.
+/// let mut value = unsafe { SpinLock::new(Example { a: 1, b: 2 }) };
+/// // SAFETY: We don't move `value`.
+/// kernel::spinlock_init!(unsafe { Pin::new_unchecked(&mut value) }, "value");
+///
+/// // Calls the example functions.
+/// assert_eq!(value.lock().a, 1);
+/// lock_example(&value);
+/// assert_eq!(value.lock().a, 10);
+/// lock_irqdisable_example(&value);
+/// assert_eq!(value.lock().a, 30);
+/// ```
+///
+/// [`spinlock_t`]: ../../../include/linux/spinlock.h
+pub struct SpinLock<T: ?Sized> {
+ spin_lock: Opaque<bindings::spinlock>,
+
+ /// Spinlocks are architecture-defined. So we conservatively require them to be pinned in case
+ /// some architecture uses self-references now or in the future.
+ _pin: PhantomPinned,
+
+ data: UnsafeCell<T>,
+}
+
+// SAFETY: `SpinLock` can be transferred across thread boundaries iff the data it protects can.
+unsafe impl<T: ?Sized + Send> Send for SpinLock<T> {}
+
+// SAFETY: `SpinLock` serialises the interior mutability it provides, so it is `Sync` as long as the
+// data it protects is `Send`.
+unsafe impl<T: ?Sized + Send> Sync for SpinLock<T> {}
+
+impl<T> SpinLock<T> {
+ /// Constructs a new spinlock.
+ ///
+ /// # Safety
+ ///
+ /// The caller must call [`SpinLock::init_lock`] before using the spinlock.
+ pub const unsafe fn new(t: T) -> Self {
+ Self {
+ spin_lock: Opaque::uninit(),
+ data: UnsafeCell::new(t),
+ _pin: PhantomPinned,
+ }
+ }
+}
+
+impl<T: ?Sized> SpinLock<T> {
+ /// Locks the spinlock and gives the caller access to the data protected by it. Only one thread
+ /// at a time is allowed to access the protected data.
+ pub fn lock(&self) -> Guard<'_, Self, WriteLock> {
+ let ctx = <Self as Lock<WriteLock>>::lock_noguard(self);
+ // SAFETY: The spinlock was just acquired.
+ unsafe { Guard::new(self, ctx) }
+ }
+
+ /// Locks the spinlock and gives the caller access to the data protected by it. Additionally it
+ /// disables interrupts (if they are enabled).
+ ///
+ /// When the lock in unlocked, the interrupt state (enabled/disabled) is restored.
+ pub fn lock_irqdisable(&self) -> Guard<'_, Self, DisabledInterrupts> {
+ let ctx = <Self as Lock<DisabledInterrupts>>::lock_noguard(self);
+ // SAFETY: The spinlock was just acquired.
+ unsafe { Guard::new(self, ctx) }
+ }
+}
+
+impl<T> LockFactory for SpinLock<T> {
+ type LockedType<U> = SpinLock<U>;
+
+ unsafe fn new_lock<U>(data: U) -> SpinLock<U> {
+ // SAFETY: The safety requirements of `new_lock` also require that `init_lock` be called.
+ unsafe { SpinLock::new(data) }
+ }
+}
+
+impl<T> LockIniter for SpinLock<T> {
+ fn init_lock(self: Pin<&mut Self>, name: &'static CStr, key: &'static LockClassKey) {
+ unsafe { bindings::__spin_lock_init(self.spin_lock.get(), name.as_char_ptr(), key.get()) };
+ }
+}
+
+/// A type state indicating that interrupts were disabled.
+pub struct DisabledInterrupts;
+impl LockInfo for DisabledInterrupts {
+ type Writable = True;
+}
+
+// SAFETY: The underlying kernel `spinlock_t` object ensures mutual exclusion.
+unsafe impl<T: ?Sized> Lock for SpinLock<T> {
+ type Inner = T;
+ type GuardContext = EmptyGuardContext;
+
+ fn lock_noguard(&self) -> EmptyGuardContext {
+ // SAFETY: `spin_lock` points to valid memory.
+ unsafe { bindings::spin_lock(self.spin_lock.get()) };
+ EmptyGuardContext
+ }
+
+ unsafe fn unlock(&self, _: &mut EmptyGuardContext) {
+ // SAFETY: The safety requirements of the function ensure that the spinlock is owned by
+ // the caller.
+ unsafe { bindings::spin_unlock(self.spin_lock.get()) }
+ }
+
+ fn locked_data(&self) -> &UnsafeCell<T> {
+ &self.data
+ }
+}
+
+// SAFETY: The underlying kernel `spinlock_t` object ensures mutual exclusion.
+unsafe impl<T: ?Sized> Lock<DisabledInterrupts> for SpinLock<T> {
+ type Inner = T;
+ type GuardContext = core::ffi::c_ulong;
+
+ fn lock_noguard(&self) -> core::ffi::c_ulong {
+ // SAFETY: `spin_lock` points to valid memory.
+ unsafe { bindings::spin_lock_irqsave(self.spin_lock.get()) }
+ }
+
+ unsafe fn unlock(&self, ctx: &mut core::ffi::c_ulong) {
+ // SAFETY: The safety requirements of the function ensure that the spinlock is owned by
+ // the caller.
+ unsafe { bindings::spin_unlock_irqrestore(self.spin_lock.get(), *ctx) }
+ }
+
+ fn locked_data(&self) -> &UnsafeCell<T> {
+ &self.data
+ }
+}
+
+/// Safely initialises a [`RawSpinLock`] with the given name, generating a new lock class.
+#[macro_export]
+macro_rules! rawspinlock_init {
+ ($spinlock:expr, $name:literal) => {
+ $crate::init_with_lockdep!($spinlock, $name)
+ };
+}
+
+/// Exposes the kernel's [`raw_spinlock_t`].
+///
+/// It is very similar to [`SpinLock`], except that it is guaranteed not to sleep even on RT
+/// variants of the kernel.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::sync::RawSpinLock;
+/// # use core::pin::Pin;
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// }
+///
+/// // Function that acquires the raw spinlock without changing interrupt state.
+/// fn lock_example(value: &RawSpinLock<Example>) {
+/// let mut guard = value.lock();
+/// guard.a = 10;
+/// guard.b = 20;
+/// }
+///
+/// // Function that acquires the raw spinlock and disables interrupts while holding it.
+/// fn lock_irqdisable_example(value: &RawSpinLock<Example>) {
+/// let mut guard = value.lock_irqdisable();
+/// guard.a = 30;
+/// guard.b = 40;
+/// }
+///
+/// // Initialises a raw spinlock and calls the example functions.
+/// fn spinlock_example() {
+/// // SAFETY: `rawspinlock_init` is called below.
+/// let mut value = unsafe { RawSpinLock::new(Example { a: 1, b: 2 }) };
+/// // SAFETY: We don't move `value`.
+/// kernel::rawspinlock_init!(unsafe { Pin::new_unchecked(&mut value) }, "value");
+/// lock_example(&value);
+/// lock_irqdisable_example(&value);
+/// }
+/// ```
+///
+/// [`raw_spinlock_t`]: ../../../include/linux/spinlock.h
+pub struct RawSpinLock<T: ?Sized> {
+ spin_lock: Opaque<bindings::raw_spinlock>,
+
+ // Spinlocks are architecture-defined. So we conservatively require them to be pinned in case
+ // some architecture uses self-references now or in the future.
+ _pin: PhantomPinned,
+
+ data: UnsafeCell<T>,
+}
+
+// SAFETY: `RawSpinLock` can be transferred across thread boundaries iff the data it protects can.
+unsafe impl<T: ?Sized + Send> Send for RawSpinLock<T> {}
+
+// SAFETY: `RawSpinLock` serialises the interior mutability it provides, so it is `Sync` as long as
+// the data it protects is `Send`.
+unsafe impl<T: ?Sized + Send> Sync for RawSpinLock<T> {}
+
+impl<T> RawSpinLock<T> {
+ /// Constructs a new raw spinlock.
+ ///
+ /// # Safety
+ ///
+ /// The caller must call [`RawSpinLock::init_lock`] before using the raw spinlock.
+ pub const unsafe fn new(t: T) -> Self {
+ Self {
+ spin_lock: Opaque::uninit(),
+ data: UnsafeCell::new(t),
+ _pin: PhantomPinned,
+ }
+ }
+}
+
+impl<T: ?Sized> RawSpinLock<T> {
+ /// Locks the raw spinlock and gives the caller access to the data protected by it. Only one
+ /// thread at a time is allowed to access the protected data.
+ pub fn lock(&self) -> Guard<'_, Self, WriteLock> {
+ let ctx = <Self as Lock<WriteLock>>::lock_noguard(self);
+ // SAFETY: The raw spinlock was just acquired.
+ unsafe { Guard::new(self, ctx) }
+ }
+
+ /// Locks the raw spinlock and gives the caller access to the data protected by it.
+ /// Additionally it disables interrupts (if they are enabled).
+ ///
+ /// When the lock in unlocked, the interrupt state (enabled/disabled) is restored.
+ pub fn lock_irqdisable(&self) -> Guard<'_, Self, DisabledInterrupts> {
+ let ctx = <Self as Lock<DisabledInterrupts>>::lock_noguard(self);
+ // SAFETY: The raw spinlock was just acquired.
+ unsafe { Guard::new(self, ctx) }
+ }
+}
+
+impl<T> LockFactory for RawSpinLock<T> {
+ type LockedType<U> = RawSpinLock<U>;
+
+ unsafe fn new_lock<U>(data: U) -> RawSpinLock<U> {
+ // SAFETY: The safety requirements of `new_lock` also require that `init_lock` be called.
+ unsafe { RawSpinLock::new(data) }
+ }
+}
+
+impl<T> LockIniter for RawSpinLock<T> {
+ fn init_lock(self: Pin<&mut Self>, name: &'static CStr, key: &'static LockClassKey) {
+ unsafe {
+ bindings::_raw_spin_lock_init(self.spin_lock.get(), name.as_char_ptr(), key.get())
+ };
+ }
+}
+
+// SAFETY: The underlying kernel `raw_spinlock_t` object ensures mutual exclusion.
+unsafe impl<T: ?Sized> Lock for RawSpinLock<T> {
+ type Inner = T;
+ type GuardContext = EmptyGuardContext;
+
+ fn lock_noguard(&self) -> EmptyGuardContext {
+ // SAFETY: `spin_lock` points to valid memory.
+ unsafe { bindings::raw_spin_lock(self.spin_lock.get()) };
+ EmptyGuardContext
+ }
+
+ unsafe fn unlock(&self, _: &mut EmptyGuardContext) {
+ // SAFETY: The safety requirements of the function ensure that the raw spinlock is owned by
+ // the caller.
+ unsafe { bindings::raw_spin_unlock(self.spin_lock.get()) };
+ }
+
+ fn locked_data(&self) -> &UnsafeCell<T> {
+ &self.data
+ }
+}
+
+// SAFETY: The underlying kernel `raw_spinlock_t` object ensures mutual exclusion.
+unsafe impl<T: ?Sized> Lock<DisabledInterrupts> for RawSpinLock<T> {
+ type Inner = T;
+ type GuardContext = core::ffi::c_ulong;
+
+ fn lock_noguard(&self) -> core::ffi::c_ulong {
+ // SAFETY: `spin_lock` points to valid memory.
+ unsafe { bindings::raw_spin_lock_irqsave(self.spin_lock.get()) }
+ }
+
+ unsafe fn unlock(&self, ctx: &mut core::ffi::c_ulong) {
+ // SAFETY: The safety requirements of the function ensure that the raw spinlock is owned by
+ // the caller.
+ unsafe { bindings::raw_spin_unlock_irqrestore(self.spin_lock.get(), *ctx) };
+ }
+
+ fn locked_data(&self) -> &UnsafeCell<T> {
+ &self.data
+ }
+}
diff --git a/rust/kernel/sysctl.rs b/rust/kernel/sysctl.rs
new file mode 100644
index 000000000000..8f742d60037d
--- /dev/null
+++ b/rust/kernel/sysctl.rs
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! System control.
+//!
+//! C header: [`include/linux/sysctl.h`](../../../../include/linux/sysctl.h)
+//!
+//! Reference: <https://www.kernel.org/doc/Documentation/sysctl/README>
+
+use alloc::boxed::Box;
+use alloc::vec::Vec;
+use core::mem;
+use core::ptr;
+use core::sync::atomic;
+
+use crate::{
+ bindings,
+ error::code::*,
+ io_buffer::IoBufferWriter,
+ str::CStr,
+ types,
+ user_ptr::{UserSlicePtr, UserSlicePtrWriter},
+ Result,
+};
+
+/// Sysctl storage.
+pub trait SysctlStorage: Sync {
+ /// Writes a byte slice.
+ fn store_value(&self, data: &[u8]) -> (usize, Result);
+
+ /// Reads via a [`UserSlicePtrWriter`].
+ fn read_value(&self, data: &mut UserSlicePtrWriter) -> (usize, Result);
+}
+
+fn trim_whitespace(mut data: &[u8]) -> &[u8] {
+ while !data.is_empty() && (data[0] == b' ' || data[0] == b'\t' || data[0] == b'\n') {
+ data = &data[1..];
+ }
+ while !data.is_empty()
+ && (data[data.len() - 1] == b' '
+ || data[data.len() - 1] == b'\t'
+ || data[data.len() - 1] == b'\n')
+ {
+ data = &data[..data.len() - 1];
+ }
+ data
+}
+
+impl<T> SysctlStorage for &T
+where
+ T: SysctlStorage,
+{
+ fn store_value(&self, data: &[u8]) -> (usize, Result) {
+ (*self).store_value(data)
+ }
+
+ fn read_value(&self, data: &mut UserSlicePtrWriter) -> (usize, Result) {
+ (*self).read_value(data)
+ }
+}
+
+impl SysctlStorage for atomic::AtomicBool {
+ fn store_value(&self, data: &[u8]) -> (usize, Result) {
+ let result = match trim_whitespace(data) {
+ b"0" => {
+ self.store(false, atomic::Ordering::Relaxed);
+ Ok(())
+ }
+ b"1" => {
+ self.store(true, atomic::Ordering::Relaxed);
+ Ok(())
+ }
+ _ => Err(EINVAL),
+ };
+ (data.len(), result)
+ }
+
+ fn read_value(&self, data: &mut UserSlicePtrWriter) -> (usize, Result) {
+ let value = if self.load(atomic::Ordering::Relaxed) {
+ b"1\n"
+ } else {
+ b"0\n"
+ };
+ (value.len(), data.write_slice(value))
+ }
+}
+
+/// Holds a single `sysctl` entry (and its table).
+pub struct Sysctl<T: SysctlStorage> {
+ inner: Box<T>,
+ // Responsible for keeping the `ctl_table` alive.
+ _table: Box<[bindings::ctl_table]>,
+ header: *mut bindings::ctl_table_header,
+}
+
+// SAFETY: The only public method we have is `get()`, which returns `&T`, and
+// `T: Sync`. Any new methods must adhere to this requirement.
+unsafe impl<T: SysctlStorage> Sync for Sysctl<T> {}
+
+unsafe extern "C" fn proc_handler<T: SysctlStorage>(
+ ctl: *mut bindings::ctl_table,
+ write: core::ffi::c_int,
+ buffer: *mut core::ffi::c_void,
+ len: *mut usize,
+ ppos: *mut bindings::loff_t,
+) -> core::ffi::c_int {
+ // If we are reading from some offset other than the beginning of the file,
+ // return an empty read to signal EOF.
+ if unsafe { *ppos } != 0 && write == 0 {
+ unsafe { *len = 0 };
+ return 0;
+ }
+
+ let data = unsafe { UserSlicePtr::new(buffer, *len) };
+ let storage = unsafe { &*((*ctl).data as *const T) };
+ let (bytes_processed, result) = if write != 0 {
+ let data = match data.read_all() {
+ Ok(r) => r,
+ Err(e) => return e.to_kernel_errno(),
+ };
+ storage.store_value(&data)
+ } else {
+ let mut writer = data.writer();
+ storage.read_value(&mut writer)
+ };
+ unsafe { *len = bytes_processed };
+ unsafe { *ppos += *len as bindings::loff_t };
+ match result {
+ Ok(()) => 0,
+ Err(e) => e.to_kernel_errno(),
+ }
+}
+
+impl<T: SysctlStorage> Sysctl<T> {
+ /// Registers a single entry in `sysctl`.
+ pub fn register(
+ path: &'static CStr,
+ name: &'static CStr,
+ storage: T,
+ mode: types::Mode,
+ ) -> Result<Sysctl<T>> {
+ if name.contains(&b'/') {
+ return Err(EINVAL);
+ }
+
+ let storage = Box::try_new(storage)?;
+ let mut table = Vec::try_with_capacity(2)?;
+ table.try_push(bindings::ctl_table {
+ procname: name.as_char_ptr(),
+ mode: mode.as_int(),
+ data: &*storage as *const T as *mut core::ffi::c_void,
+ proc_handler: Some(proc_handler::<T>),
+
+ maxlen: 0,
+ child: ptr::null_mut(),
+ poll: ptr::null_mut(),
+ extra1: ptr::null_mut(),
+ extra2: ptr::null_mut(),
+ })?;
+ table.try_push(unsafe { mem::zeroed() })?;
+ let mut table = table.try_into_boxed_slice()?;
+
+ let result = unsafe { bindings::register_sysctl(path.as_char_ptr(), table.as_mut_ptr()) };
+ if result.is_null() {
+ return Err(ENOMEM);
+ }
+
+ Ok(Sysctl {
+ inner: storage,
+ _table: table,
+ header: result,
+ })
+ }
+
+ /// Gets the storage.
+ pub fn get(&self) -> &T {
+ &self.inner
+ }
+}
+
+impl<T: SysctlStorage> Drop for Sysctl<T> {
+ fn drop(&mut self) {
+ unsafe {
+ bindings::unregister_sysctl_table(self.header);
+ }
+ self.header = ptr::null_mut();
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_trim_whitespace() {
+ assert_eq!(trim_whitespace(b"foo "), b"foo");
+ assert_eq!(trim_whitespace(b" foo"), b"foo");
+ assert_eq!(trim_whitespace(b" foo "), b"foo");
+ }
+}
diff --git a/rust/kernel/task.rs b/rust/kernel/task.rs
new file mode 100644
index 000000000000..67040b532816
--- /dev/null
+++ b/rust/kernel/task.rs
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Tasks (threads and processes).
+//!
+//! C header: [`include/linux/sched.h`](../../../../include/linux/sched.h).
+
+use crate::{
+ bindings, c_str, error::from_kernel_err_ptr, types::PointerWrapper, ARef, AlwaysRefCounted,
+ Result, ScopeGuard,
+};
+use alloc::boxed::Box;
+use core::{cell::UnsafeCell, fmt, marker::PhantomData, ops::Deref, ptr};
+
+/// Wraps the kernel's `struct task_struct`.
+///
+/// # Invariants
+///
+/// Instances of this type are always ref-counted, that is, a call to `get_task_struct` ensures
+/// that the allocation remains valid at least until the matching call to `put_task_struct`.
+///
+/// # Examples
+///
+/// The following is an example of getting the PID of the current thread with zero additional cost
+/// when compared to the C version:
+///
+/// ```
+/// use kernel::task::Task;
+///
+/// let pid = Task::current().pid();
+/// ```
+///
+/// Getting the PID of the current process, also zero additional cost:
+///
+/// ```
+/// use kernel::task::Task;
+///
+/// let pid = Task::current().group_leader().pid();
+/// ```
+///
+/// Getting the current task and storing it in some struct. The reference count is automatically
+/// incremented when creating `State` and decremented when it is dropped:
+///
+/// ```
+/// use kernel::{task::Task, ARef};
+///
+/// struct State {
+/// creator: ARef<Task>,
+/// index: u32,
+/// }
+///
+/// impl State {
+/// fn new() -> Self {
+/// Self {
+/// creator: Task::current().into(),
+/// index: 0,
+/// }
+/// }
+/// }
+/// ```
+#[repr(transparent)]
+pub struct Task(pub(crate) UnsafeCell<bindings::task_struct>);
+
+// SAFETY: It's OK to access `Task` through references from other threads because we're either
+// accessing properties that don't change (e.g., `pid`, `group_leader`) or that are properly
+// synchronised by C code (e.g., `signal_pending`).
+unsafe impl Sync for Task {}
+
+/// The type of process identifiers (PIDs).
+type Pid = bindings::pid_t;
+
+impl Task {
+ /// Returns a task reference for the currently executing task/thread.
+ pub fn current<'a>() -> TaskRef<'a> {
+ // SAFETY: Just an FFI call.
+ let ptr = unsafe { bindings::get_current() };
+
+ TaskRef {
+ // SAFETY: If the current thread is still running, the current task is valid. Given
+ // that `TaskRef` is not `Send`, we know it cannot be transferred to another thread
+ // (where it could potentially outlive the caller).
+ task: unsafe { &*ptr.cast() },
+ _not_send: PhantomData,
+ }
+ }
+
+ /// Returns the group leader of the given task.
+ pub fn group_leader(&self) -> &Task {
+ // SAFETY: By the type invariant, we know that `self.0` is valid.
+ let ptr = unsafe { core::ptr::addr_of!((*self.0.get()).group_leader).read() };
+
+ // SAFETY: The lifetime of the returned task reference is tied to the lifetime of `self`,
+ // and given that a task has a reference to its group leader, we know it must be valid for
+ // the lifetime of the returned task reference.
+ unsafe { &*ptr.cast() }
+ }
+
+ /// Returns the PID of the given task.
+ pub fn pid(&self) -> Pid {
+ // SAFETY: By the type invariant, we know that `self.0` is valid.
+ unsafe { core::ptr::addr_of!((*self.0.get()).pid).read() }
+ }
+
+ /// Determines whether the given task has pending signals.
+ pub fn signal_pending(&self) -> bool {
+ // SAFETY: By the type invariant, we know that `self.0` is valid.
+ unsafe { bindings::signal_pending(self.0.get()) != 0 }
+ }
+
+ /// Starts a new kernel thread and runs it.
+ ///
+ /// # Examples
+ ///
+ /// Launches 10 threads and waits for them to complete.
+ ///
+ /// ```
+ /// use core::sync::atomic::{AtomicU32, Ordering};
+ /// use kernel::sync::{CondVar, Mutex};
+ /// use kernel::task::Task;
+ ///
+ /// kernel::init_static_sync! {
+ /// static COUNT: Mutex<u32> = 0;
+ /// static COUNT_IS_ZERO: CondVar;
+ /// }
+ ///
+ /// fn threadfn() {
+ /// pr_info!("Running from thread {}\n", Task::current().pid());
+ /// let mut guard = COUNT.lock();
+ /// *guard -= 1;
+ /// if *guard == 0 {
+ /// COUNT_IS_ZERO.notify_all();
+ /// }
+ /// }
+ ///
+ /// // Set count to 10 and spawn 10 threads.
+ /// *COUNT.lock() = 10;
+ /// for i in 0..10 {
+ /// Task::spawn(fmt!("test{i}"), threadfn).unwrap();
+ /// }
+ ///
+ /// // Wait for count to drop to zero.
+ /// let mut guard = COUNT.lock();
+ /// while (*guard != 0) {
+ /// COUNT_IS_ZERO.wait(&mut guard);
+ /// }
+ /// ```
+ pub fn spawn<T: FnOnce() + Send + 'static>(
+ name: fmt::Arguments<'_>,
+ func: T,
+ ) -> Result<ARef<Task>> {
+ unsafe extern "C" fn threadfn<T: FnOnce() + Send + 'static>(
+ arg: *mut core::ffi::c_void,
+ ) -> core::ffi::c_int {
+ // SAFETY: The thread argument is always a `Box<T>` because it is only called via the
+ // thread creation below.
+ let bfunc = unsafe { Box::<T>::from_pointer(arg) };
+ bfunc();
+ 0
+ }
+
+ let arg = Box::try_new(func)?.into_pointer();
+
+ // SAFETY: `arg` was just created with a call to `into_pointer` above.
+ let guard = ScopeGuard::new(|| unsafe {
+ Box::<T>::from_pointer(arg);
+ });
+
+ // SAFETY: The function pointer is always valid (as long as the module remains loaded).
+ // Ownership of `raw` is transferred to the new thread (if one is actually created), so it
+ // remains valid. Lastly, the C format string is a constant that require formatting as the
+ // one and only extra argument.
+ let ktask = from_kernel_err_ptr(unsafe {
+ bindings::kthread_create_on_node(
+ Some(threadfn::<T>),
+ arg as _,
+ bindings::NUMA_NO_NODE,
+ c_str!("%pA").as_char_ptr(),
+ &name as *const _ as *const core::ffi::c_void,
+ )
+ })?;
+
+ // SAFETY: Since the kthread creation succeeded and we haven't run it yet, we know the task
+ // is valid.
+ let task: ARef<_> = unsafe { &*(ktask as *const Task) }.into();
+
+ // Wakes up the thread, otherwise it won't run.
+ task.wake_up();
+
+ guard.dismiss();
+ Ok(task)
+ }
+
+ /// Wakes up the task.
+ pub fn wake_up(&self) {
+ // SAFETY: By the type invariant, we know that `self.0.get()` is non-null and valid.
+ // And `wake_up_process` is safe to be called for any valid task, even if the task is
+ // running.
+ unsafe { bindings::wake_up_process(self.0.get()) };
+ }
+}
+
+// SAFETY: The type invariants guarantee that `Task` is always ref-counted.
+unsafe impl AlwaysRefCounted for Task {
+ fn inc_ref(&self) {
+ // SAFETY: The existence of a shared reference means that the refcount is nonzero.
+ unsafe { bindings::get_task_struct(self.0.get()) };
+ }
+
+ unsafe fn dec_ref(obj: ptr::NonNull<Self>) {
+ // SAFETY: The safety requirements guarantee that the refcount is nonzero.
+ unsafe { bindings::put_task_struct(obj.cast().as_ptr()) }
+ }
+}
+
+/// A wrapper for a shared reference to [`Task`] that isn't [`Send`].
+///
+/// We make this explicitly not [`Send`] so that we can use it to represent the current thread
+/// without having to increment/decrement the task's reference count.
+///
+/// # Invariants
+///
+/// The wrapped [`Task`] remains valid for the lifetime of the object.
+pub struct TaskRef<'a> {
+ task: &'a Task,
+ _not_send: PhantomData<*mut ()>,
+}
+
+impl Deref for TaskRef<'_> {
+ type Target = Task;
+
+ fn deref(&self) -> &Self::Target {
+ self.task
+ }
+}
+
+impl From<TaskRef<'_>> for ARef<Task> {
+ fn from(t: TaskRef<'_>) -> Self {
+ t.deref().into()
+ }
+}
diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs
new file mode 100644
index 000000000000..b1f73aeece0b
--- /dev/null
+++ b/rust/kernel/types.rs
@@ -0,0 +1,705 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Kernel types.
+//!
+//! C header: [`include/linux/types.h`](../../../../include/linux/types.h)
+
+use crate::{
+ bindings,
+ sync::{Ref, RefBorrow},
+};
+use alloc::boxed::Box;
+use core::{
+ cell::UnsafeCell,
+ marker::PhantomData,
+ mem::MaybeUninit,
+ ops::{self, Deref, DerefMut},
+ pin::Pin,
+ ptr::NonNull,
+};
+
+/// Permissions.
+///
+/// C header: [`include/uapi/linux/stat.h`](../../../../include/uapi/linux/stat.h)
+///
+/// C header: [`include/linux/stat.h`](../../../../include/linux/stat.h)
+pub struct Mode(bindings::umode_t);
+
+impl Mode {
+ /// Creates a [`Mode`] from an integer.
+ pub fn from_int(m: u16) -> Mode {
+ Mode(m)
+ }
+
+ /// Returns the mode as an integer.
+ pub fn as_int(&self) -> u16 {
+ self.0
+ }
+}
+
+/// Used to convert an object into a raw pointer that represents it.
+///
+/// It can eventually be converted back into the object. This is used to store objects as pointers
+/// in kernel data structures, for example, an implementation of
+/// [`Operations`][crate::file::Operations] in `struct
+/// file::private_data`.
+pub trait PointerWrapper {
+ /// Type of values borrowed between calls to [`PointerWrapper::into_pointer`] and
+ /// [`PointerWrapper::from_pointer`].
+ type Borrowed<'a>;
+
+ /// Returns the raw pointer.
+ fn into_pointer(self) -> *const core::ffi::c_void;
+
+ /// Returns a borrowed value.
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must have been returned by a previous call to [`PointerWrapper::into_pointer`].
+ /// Additionally, [`PointerWrapper::from_pointer`] can only be called after *all* values
+ /// returned by [`PointerWrapper::borrow`] have been dropped.
+ unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Self::Borrowed<'a>;
+
+ /// Returns a mutably borrowed value.
+ ///
+ /// # Safety
+ ///
+ /// The passed pointer must come from a previous to [`PointerWrapper::into_pointer`], and no
+ /// other concurrent users of the pointer (except the ones derived from the returned value) run
+ /// at least until the returned [`ScopeGuard`] is dropped.
+ unsafe fn borrow_mut<T: PointerWrapper>(ptr: *const core::ffi::c_void) -> ScopeGuard<T, fn(T)> {
+ // SAFETY: The safety requirements ensure that `ptr` came from a previous call to
+ // `into_pointer`.
+ ScopeGuard::new_with_data(unsafe { T::from_pointer(ptr) }, |d| {
+ d.into_pointer();
+ })
+ }
+
+ /// Returns the instance back from the raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// The passed pointer must come from a previous call to [`PointerWrapper::into_pointer()`].
+ unsafe fn from_pointer(ptr: *const core::ffi::c_void) -> Self;
+}
+
+impl<T: 'static> PointerWrapper for Box<T> {
+ type Borrowed<'a> = &'a T;
+
+ fn into_pointer(self) -> *const core::ffi::c_void {
+ Box::into_raw(self) as _
+ }
+
+ unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> &'a T {
+ // SAFETY: The safety requirements for this function ensure that the object is still alive,
+ // so it is safe to dereference the raw pointer.
+ // The safety requirements also ensure that the object remains alive for the lifetime of
+ // the returned value.
+ unsafe { &*ptr.cast() }
+ }
+
+ unsafe fn from_pointer(ptr: *const core::ffi::c_void) -> Self {
+ // SAFETY: The passed pointer comes from a previous call to [`Self::into_pointer()`].
+ unsafe { Box::from_raw(ptr as _) }
+ }
+}
+
+impl<T: 'static> PointerWrapper for Ref<T> {
+ type Borrowed<'a> = RefBorrow<'a, T>;
+
+ fn into_pointer(self) -> *const core::ffi::c_void {
+ Ref::into_usize(self) as _
+ }
+
+ unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> RefBorrow<'a, T> {
+ // SAFETY: The safety requirements for this function ensure that the underlying object
+ // remains valid for the lifetime of the returned value.
+ unsafe { Ref::borrow_usize(ptr as _) }
+ }
+
+ unsafe fn from_pointer(ptr: *const core::ffi::c_void) -> Self {
+ // SAFETY: The passed pointer comes from a previous call to [`Self::into_pointer()`].
+ unsafe { Ref::from_usize(ptr as _) }
+ }
+}
+
+impl<T: PointerWrapper + Deref> PointerWrapper for Pin<T> {
+ type Borrowed<'a> = T::Borrowed<'a>;
+
+ fn into_pointer(self) -> *const core::ffi::c_void {
+ // SAFETY: We continue to treat the pointer as pinned by returning just a pointer to it to
+ // the caller.
+ let inner = unsafe { Pin::into_inner_unchecked(self) };
+ inner.into_pointer()
+ }
+
+ unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Self::Borrowed<'a> {
+ // SAFETY: The safety requirements for this function are the same as the ones for
+ // `T::borrow`.
+ unsafe { T::borrow(ptr) }
+ }
+
+ unsafe fn from_pointer(p: *const core::ffi::c_void) -> Self {
+ // SAFETY: The object was originally pinned.
+ // The passed pointer comes from a previous call to `inner::into_pointer()`.
+ unsafe { Pin::new_unchecked(T::from_pointer(p)) }
+ }
+}
+
+impl<T> PointerWrapper for *mut T {
+ type Borrowed<'a> = *mut T;
+
+ fn into_pointer(self) -> *const core::ffi::c_void {
+ self as _
+ }
+
+ unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Self::Borrowed<'a> {
+ ptr as _
+ }
+
+ unsafe fn from_pointer(ptr: *const core::ffi::c_void) -> Self {
+ ptr as _
+ }
+}
+
+impl PointerWrapper for () {
+ type Borrowed<'a> = ();
+
+ fn into_pointer(self) -> *const core::ffi::c_void {
+ // We use 1 to be different from a null pointer.
+ 1usize as _
+ }
+
+ unsafe fn borrow<'a>(_: *const core::ffi::c_void) -> Self::Borrowed<'a> {}
+
+ unsafe fn from_pointer(_: *const core::ffi::c_void) -> Self {}
+}
+
+/// Runs a cleanup function/closure when dropped.
+///
+/// The [`ScopeGuard::dismiss`] function prevents the cleanup function from running.
+///
+/// # Examples
+///
+/// In the example below, we have multiple exit paths and we want to log regardless of which one is
+/// taken:
+/// ```
+/// # use kernel::ScopeGuard;
+/// fn example1(arg: bool) {
+/// let _log = ScopeGuard::new(|| pr_info!("example1 completed\n"));
+///
+/// if arg {
+/// return;
+/// }
+///
+/// pr_info!("Do something...\n");
+/// }
+///
+/// # example1(false);
+/// # example1(true);
+/// ```
+///
+/// In the example below, we want to log the same message on all early exits but a different one on
+/// the main exit path:
+/// ```
+/// # use kernel::ScopeGuard;
+/// fn example2(arg: bool) {
+/// let log = ScopeGuard::new(|| pr_info!("example2 returned early\n"));
+///
+/// if arg {
+/// return;
+/// }
+///
+/// // (Other early returns...)
+///
+/// log.dismiss();
+/// pr_info!("example2 no early return\n");
+/// }
+///
+/// # example2(false);
+/// # example2(true);
+/// ```
+///
+/// In the example below, we need a mutable object (the vector) to be accessible within the log
+/// function, so we wrap it in the [`ScopeGuard`]:
+/// ```
+/// # use kernel::ScopeGuard;
+/// fn example3(arg: bool) -> Result {
+/// let mut vec =
+/// ScopeGuard::new_with_data(Vec::new(), |v| pr_info!("vec had {} elements\n", v.len()));
+///
+/// vec.try_push(10u8)?;
+/// if arg {
+/// return Ok(());
+/// }
+/// vec.try_push(20u8)?;
+/// Ok(())
+/// }
+///
+/// # assert_eq!(example3(false), Ok(()));
+/// # assert_eq!(example3(true), Ok(()));
+/// ```
+///
+/// # Invariants
+///
+/// The value stored in the struct is nearly always `Some(_)`, except between
+/// [`ScopeGuard::dismiss`] and [`ScopeGuard::drop`]: in this case, it will be `None` as the value
+/// will have been returned to the caller. Since [`ScopeGuard::dismiss`] consumes the guard,
+/// callers won't be able to use it anymore.
+pub struct ScopeGuard<T, F: FnOnce(T)>(Option<(T, F)>);
+
+impl<T, F: FnOnce(T)> ScopeGuard<T, F> {
+ /// Creates a new guarded object wrapping the given data and with the given cleanup function.
+ pub fn new_with_data(data: T, cleanup_func: F) -> Self {
+ // INVARIANT: The struct is being initialised with `Some(_)`.
+ Self(Some((data, cleanup_func)))
+ }
+
+ /// Prevents the cleanup function from running and returns the guarded data.
+ pub fn dismiss(mut self) -> T {
+ // INVARIANT: This is the exception case in the invariant; it is not visible to callers
+ // because this function consumes `self`.
+ self.0.take().unwrap().0
+ }
+}
+
+impl ScopeGuard<(), Box<dyn FnOnce(())>> {
+ /// Creates a new guarded object with the given cleanup function.
+ pub fn new(cleanup: impl FnOnce()) -> ScopeGuard<(), impl FnOnce(())> {
+ ScopeGuard::new_with_data((), move |_| cleanup())
+ }
+}
+
+impl<T, F: FnOnce(T)> Deref for ScopeGuard<T, F> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ // The type invariants guarantee that `unwrap` will succeed.
+ &self.0.as_ref().unwrap().0
+ }
+}
+
+impl<T, F: FnOnce(T)> DerefMut for ScopeGuard<T, F> {
+ fn deref_mut(&mut self) -> &mut T {
+ // The type invariants guarantee that `unwrap` will succeed.
+ &mut self.0.as_mut().unwrap().0
+ }
+}
+
+impl<T, F: FnOnce(T)> Drop for ScopeGuard<T, F> {
+ fn drop(&mut self) {
+ // Run the cleanup function if one is still present.
+ if let Some((data, cleanup)) = self.0.take() {
+ cleanup(data)
+ }
+ }
+}
+
+/// Stores an opaque value.
+///
+/// This is meant to be used with FFI objects that are never interpreted by Rust code.
+#[repr(transparent)]
+pub struct Opaque<T>(MaybeUninit<UnsafeCell<T>>);
+
+impl<T> Opaque<T> {
+ /// Creates a new opaque value.
+ pub const fn new(value: T) -> Self {
+ Self(MaybeUninit::new(UnsafeCell::new(value)))
+ }
+
+ /// Creates an uninitialised value.
+ pub const fn uninit() -> Self {
+ Self(MaybeUninit::uninit())
+ }
+
+ /// Returns a raw pointer to the opaque data.
+ pub fn get(&self) -> *mut T {
+ UnsafeCell::raw_get(self.0.as_ptr())
+ }
+}
+
+/// A bitmask.
+///
+/// It has a restriction that all bits must be the same, except one. For example, `0b1110111` and
+/// `0b1000` are acceptable masks.
+#[derive(Clone, Copy)]
+pub struct Bit<T> {
+ index: T,
+ inverted: bool,
+}
+
+/// Creates a bit mask with a single bit set.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::bit;
+/// let mut x = 0xfeu32;
+///
+/// assert_eq!(x & bit(0), 0);
+/// assert_eq!(x & bit(1), 2);
+/// assert_eq!(x & bit(2), 4);
+/// assert_eq!(x & bit(3), 8);
+///
+/// x |= bit(0);
+/// assert_eq!(x, 0xff);
+///
+/// x &= !bit(1);
+/// assert_eq!(x, 0xfd);
+///
+/// x &= !bit(7);
+/// assert_eq!(x, 0x7d);
+///
+/// let y: u64 = bit(34).into();
+/// assert_eq!(y, 0x400000000);
+///
+/// assert_eq!(y | bit(35), 0xc00000000);
+/// ```
+pub fn bit<T: Copy>(index: T) -> Bit<T> {
+ Bit {
+ index,
+ inverted: false,
+ }
+}
+
+impl<T: Copy> ops::Not for Bit<T> {
+ type Output = Self;
+ fn not(self) -> Self {
+ Self {
+ index: self.index,
+ inverted: !self.inverted,
+ }
+ }
+}
+
+/// Implemented by integer types that allow counting the number of trailing zeroes.
+pub trait TrailingZeros {
+ /// Returns the number of trailing zeroes in the binary representation of `self`.
+ fn trailing_zeros(&self) -> u32;
+}
+
+macro_rules! define_unsigned_number_traits {
+ ($type_name:ty) => {
+ impl TrailingZeros for $type_name {
+ fn trailing_zeros(&self) -> u32 {
+ <$type_name>::trailing_zeros(*self)
+ }
+ }
+
+ impl<T: Copy> core::convert::From<Bit<T>> for $type_name
+ where
+ Self: ops::Shl<T, Output = Self> + core::convert::From<u8> + ops::Not<Output = Self>,
+ {
+ fn from(v: Bit<T>) -> Self {
+ let c = Self::from(1u8) << v.index;
+ if v.inverted {
+ !c
+ } else {
+ c
+ }
+ }
+ }
+
+ impl<T: Copy> ops::BitAnd<Bit<T>> for $type_name
+ where
+ Self: ops::Shl<T, Output = Self> + core::convert::From<u8>,
+ {
+ type Output = Self;
+ fn bitand(self, rhs: Bit<T>) -> Self::Output {
+ self & Self::from(rhs)
+ }
+ }
+
+ impl<T: Copy> ops::BitOr<Bit<T>> for $type_name
+ where
+ Self: ops::Shl<T, Output = Self> + core::convert::From<u8>,
+ {
+ type Output = Self;
+ fn bitor(self, rhs: Bit<T>) -> Self::Output {
+ self | Self::from(rhs)
+ }
+ }
+
+ impl<T: Copy> ops::BitAndAssign<Bit<T>> for $type_name
+ where
+ Self: ops::Shl<T, Output = Self> + core::convert::From<u8>,
+ {
+ fn bitand_assign(&mut self, rhs: Bit<T>) {
+ *self &= Self::from(rhs)
+ }
+ }
+
+ impl<T: Copy> ops::BitOrAssign<Bit<T>> for $type_name
+ where
+ Self: ops::Shl<T, Output = Self> + core::convert::From<u8>,
+ {
+ fn bitor_assign(&mut self, rhs: Bit<T>) {
+ *self |= Self::from(rhs)
+ }
+ }
+ };
+}
+
+define_unsigned_number_traits!(u8);
+define_unsigned_number_traits!(u16);
+define_unsigned_number_traits!(u32);
+define_unsigned_number_traits!(u64);
+define_unsigned_number_traits!(usize);
+
+/// Returns an iterator over the set bits of `value`.
+///
+/// # Examples
+///
+/// ```
+/// use kernel::bits_iter;
+///
+/// let mut iter = bits_iter(5usize);
+/// assert_eq!(iter.next().unwrap(), 0);
+/// assert_eq!(iter.next().unwrap(), 2);
+/// assert!(iter.next().is_none());
+/// ```
+///
+/// ```
+/// use kernel::bits_iter;
+///
+/// fn print_bits(x: usize) {
+/// for bit in bits_iter(x) {
+/// pr_info!("{}\n", bit);
+/// }
+/// }
+///
+/// # print_bits(42);
+/// ```
+#[inline]
+pub fn bits_iter<T>(value: T) -> impl Iterator<Item = u32>
+where
+ T: core::cmp::PartialEq
+ + From<u8>
+ + ops::Shl<u32, Output = T>
+ + ops::Not<Output = T>
+ + ops::BitAndAssign
+ + TrailingZeros,
+{
+ struct BitIterator<U> {
+ value: U,
+ }
+
+ impl<U> Iterator for BitIterator<U>
+ where
+ U: core::cmp::PartialEq
+ + From<u8>
+ + ops::Shl<u32, Output = U>
+ + ops::Not<Output = U>
+ + ops::BitAndAssign
+ + TrailingZeros,
+ {
+ type Item = u32;
+
+ #[inline]
+ fn next(&mut self) -> Option<u32> {
+ if self.value == U::from(0u8) {
+ return None;
+ }
+ let ret = self.value.trailing_zeros();
+ self.value &= !(U::from(1u8) << ret);
+ Some(ret)
+ }
+ }
+
+ BitIterator { value }
+}
+
+/// A trait for boolean types.
+///
+/// This is meant to be used in type states to allow boolean constraints in implementation blocks.
+/// In the example below, the implementation containing `MyType::set_value` could _not_ be
+/// constrained to type states containing `Writable = true` if `Writable` were a constant instead
+/// of a type.
+///
+/// # Safety
+///
+/// No additional implementations of [`Bool`] should be provided, as [`True`] and [`False`] are
+/// already provided.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::{Bool, False, True};
+/// use core::marker::PhantomData;
+///
+/// // Type state specifies whether the type is writable.
+/// trait MyTypeState {
+/// type Writable: Bool;
+/// }
+///
+/// // In state S1, the type is writable.
+/// struct S1;
+/// impl MyTypeState for S1 {
+/// type Writable = True;
+/// }
+///
+/// // In state S2, the type is not writable.
+/// struct S2;
+/// impl MyTypeState for S2 {
+/// type Writable = False;
+/// }
+///
+/// struct MyType<T: MyTypeState> {
+/// value: u32,
+/// _p: PhantomData<T>,
+/// }
+///
+/// impl<T: MyTypeState> MyType<T> {
+/// fn new(value: u32) -> Self {
+/// Self {
+/// value,
+/// _p: PhantomData,
+/// }
+/// }
+/// }
+///
+/// // This implementation block only applies if the type state is writable.
+/// impl<T> MyType<T>
+/// where
+/// T: MyTypeState<Writable = True>,
+/// {
+/// fn set_value(&mut self, v: u32) {
+/// self.value = v;
+/// }
+/// }
+///
+/// let mut x = MyType::<S1>::new(10);
+/// let mut y = MyType::<S2>::new(20);
+///
+/// x.set_value(30);
+///
+/// // The code below fails to compile because `S2` is not writable.
+/// // y.set_value(40);
+/// ```
+pub unsafe trait Bool {}
+
+/// Represents the `true` value for types with [`Bool`] bound.
+pub struct True;
+
+// SAFETY: This is one of the only two implementations of `Bool`.
+unsafe impl Bool for True {}
+
+/// Represents the `false` value for types wth [`Bool`] bound.
+pub struct False;
+
+// SAFETY: This is one of the only two implementations of `Bool`.
+unsafe impl Bool for False {}
+
+/// Types that are _always_ reference counted.
+///
+/// It allows such types to define their own custom ref increment and decrement functions.
+/// Additionally, it allows users to convert from a shared reference `&T` to an owned reference
+/// [`ARef<T>`].
+///
+/// This is usually implemented by wrappers to existing structures on the C side of the code. For
+/// Rust code, the recommendation is to use [`Ref`] to create reference-counted instances of a
+/// type.
+///
+/// # Safety
+///
+/// Implementers must ensure that increments to the reference count keeps the object alive in
+/// memory at least until a matching decrement performed.
+///
+/// Implementers must also ensure that all instances are reference-counted. (Otherwise they
+/// won't be able to honour the requirement that [`AlwaysRefCounted::inc_ref`] keep the object
+/// alive.)
+pub unsafe trait AlwaysRefCounted {
+ /// Increments the reference count on the object.
+ fn inc_ref(&self);
+
+ /// Decrements the reference count on the object.
+ ///
+ /// Frees the object when the count reaches zero.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that there was a previous matching increment to the reference count,
+ /// and that the object is no longer used after its reference count is decremented (as it may
+ /// result in the object being freed), unless the caller owns another increment on the refcount
+ /// (e.g., it calls [`AlwaysRefCounted::inc_ref`] twice, then calls
+ /// [`AlwaysRefCounted::dec_ref`] once).
+ unsafe fn dec_ref(obj: NonNull<Self>);
+}
+
+/// An owned reference to an always-reference-counted object.
+///
+/// The object's reference count is automatically decremented when an instance of [`ARef`] is
+/// dropped. It is also automatically incremented when a new instance is created via
+/// [`ARef::clone`].
+///
+/// # Invariants
+///
+/// The pointer stored in `ptr` is non-null and valid for the lifetime of the [`ARef`] instance. In
+/// particular, the [`ARef`] instance owns an increment on underlying object's reference count.
+pub struct ARef<T: AlwaysRefCounted> {
+ ptr: NonNull<T>,
+ _p: PhantomData<T>,
+}
+
+impl<T: AlwaysRefCounted> ARef<T> {
+ /// Creates a new instance of [`ARef`].
+ ///
+ /// It takes over an increment of the reference count on the underlying object.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that the reference count was incremented at least once, and that they
+ /// are properly relinquishing one increment. That is, if there is only one increment, callers
+ /// must not use the underlying object anymore -- it is only safe to do so via the newly
+ /// created [`ARef`].
+ pub unsafe fn from_raw(ptr: NonNull<T>) -> Self {
+ // INVARIANT: The safety requirements guarantee that the new instance now owns the
+ // increment on the refcount.
+ Self {
+ ptr,
+ _p: PhantomData,
+ }
+ }
+}
+
+impl<T: AlwaysRefCounted> Clone for ARef<T> {
+ fn clone(&self) -> Self {
+ self.inc_ref();
+ // SAFETY: We just incremented the refcount above.
+ unsafe { Self::from_raw(self.ptr) }
+ }
+}
+
+impl<T: AlwaysRefCounted> Deref for ARef<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: The type invariants guarantee that the object is valid.
+ unsafe { self.ptr.as_ref() }
+ }
+}
+
+impl<T: AlwaysRefCounted> From<&T> for ARef<T> {
+ fn from(b: &T) -> Self {
+ b.inc_ref();
+ // SAFETY: We just incremented the refcount above.
+ unsafe { Self::from_raw(NonNull::from(b)) }
+ }
+}
+
+impl<T: AlwaysRefCounted> Drop for ARef<T> {
+ fn drop(&mut self) {
+ // SAFETY: The type invariants guarantee that the `ARef` owns the reference we're about to
+ // decrement.
+ unsafe { T::dec_ref(self.ptr) };
+ }
+}
+
+/// A sum type that always holds either a value of type `L` or `R`.
+pub enum Either<L, R> {
+ /// Constructs an instance of [`Either`] containing a value of type `L`.
+ Left(L),
+
+ /// Constructs an instance of [`Either`] containing a value of type `R`.
+ Right(R),
+}
diff --git a/rust/kernel/unsafe_list.rs b/rust/kernel/unsafe_list.rs
new file mode 100644
index 000000000000..df496667c033
--- /dev/null
+++ b/rust/kernel/unsafe_list.rs
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Intrusive circular doubly-linked lists.
+//!
+//! We don't use the C version for two main reasons:
+//! - Next/prev pointers do not support `?Sized` types, so wouldn't be able to have a list of, for
+//! example, `dyn Trait`.
+//! - It would require the list head to be pinned (in addition to the list entries).
+
+use core::{cell::UnsafeCell, iter, marker::PhantomPinned, mem::MaybeUninit, ptr::NonNull};
+
+/// An intrusive circular doubly-linked list.
+///
+/// Membership of elements of the list must be tracked by the owner of the list.
+///
+/// While elements of the list must remain pinned while in the list, the list itself does not
+/// require pinning. In other words, users are allowed to move instances of [`List`].
+///
+/// # Invariants
+///
+/// The links of an entry are wrapped in [`UnsafeCell`] and they are acessible when the list itself
+/// is. For example, when a thread has a mutable reference to a list, it may also safely get
+/// mutable references to the links of the elements in the list.
+///
+/// The links of an entry are also wrapped in [`MaybeUninit`] and they are initialised when they
+/// are present in a list. Otherwise they are uninitialised.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::unsafe_list::{Adapter, Links, List};
+///
+/// struct Example {
+/// v: usize,
+/// links: Links<Example>,
+/// }
+///
+/// // SAFETY: This adapter is the only one that uses `Example::links`.
+/// unsafe impl Adapter for Example {
+/// type EntryType = Self;
+/// fn to_links(obj: &Self) -> &Links<Self> {
+/// &obj.links
+/// }
+/// }
+///
+/// let a = Example {
+/// v: 0,
+/// links: Links::new(),
+/// };
+/// let b = Example {
+/// v: 1,
+/// links: Links::new(),
+/// };
+///
+/// let mut list = List::<Example>::new();
+/// assert!(list.is_empty());
+///
+/// // SAFETY: `a` was declared above, it's not in any lists yet, is never moved, and outlives the
+/// // list.
+/// unsafe { list.push_back(&a) };
+///
+/// // SAFETY: `b` was declared above, it's not in any lists yet, is never moved, and outlives the
+/// // list.
+/// unsafe { list.push_back(&b) };
+///
+/// assert!(core::ptr::eq(&a, list.front().unwrap().as_ptr()));
+/// assert!(core::ptr::eq(&b, list.back().unwrap().as_ptr()));
+///
+/// for (i, e) in list.iter().enumerate() {
+/// assert_eq!(i, e.v);
+/// }
+///
+/// for e in &list {
+/// pr_info!("{}", e.v);
+/// }
+///
+/// // SAFETY: `b` was added to the list above and wasn't removed yet.
+/// unsafe { list.remove(&b) };
+///
+/// assert!(core::ptr::eq(&a, list.front().unwrap().as_ptr()));
+/// assert!(core::ptr::eq(&a, list.back().unwrap().as_ptr()));
+/// ```
+pub struct List<A: Adapter + ?Sized> {
+ first: Option<NonNull<A::EntryType>>,
+}
+
+// SAFETY: The list is itself can be safely sent to other threads but we restrict it to being `Send`
+// only when its entries are also `Send`.
+unsafe impl<A: Adapter + ?Sized> Send for List<A> where A::EntryType: Send {}
+
+// SAFETY: The list is itself usable from other threads via references but we restrict it to being
+// `Sync` only when its entries are also `Sync`.
+unsafe impl<A: Adapter + ?Sized> Sync for List<A> where A::EntryType: Sync {}
+
+impl<A: Adapter + ?Sized> List<A> {
+ /// Constructs a new empty list.
+ pub const fn new() -> Self {
+ Self { first: None }
+ }
+
+ /// Determines if the list is empty.
+ pub fn is_empty(&self) -> bool {
+ self.first.is_none()
+ }
+
+ /// Inserts the only entry to a list.
+ ///
+ /// This must only be called when the list is empty.
+ pub fn insert_only_entry(&mut self, obj: &A::EntryType) {
+ let obj_ptr = NonNull::from(obj);
+
+ // SAFETY: We have mutable access to the list, so we also have access to the entry
+ // we're about to insert (and it's not in any other lists per the function safety
+ // requirements).
+ let obj_inner = unsafe { &mut *A::to_links(obj).0.get() };
+
+ // INVARIANTS: All fields of the links of the newly-inserted object are initialised
+ // below.
+ obj_inner.write(LinksInner {
+ next: obj_ptr,
+ prev: obj_ptr,
+ _pin: PhantomPinned,
+ });
+ self.first = Some(obj_ptr);
+ }
+
+ /// Adds the given object to the end of the list.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that:
+ /// - The object is not currently in any lists.
+ /// - The object remains alive until it is removed from the list.
+ /// - The object is not moved until it is removed from the list.
+ pub unsafe fn push_back(&mut self, obj: &A::EntryType) {
+ if let Some(first) = self.first {
+ // SAFETY: The previous entry to the first one is necessarily present in the list (it
+ // may in fact be the first entry itself as this is a circular list). The safety
+ // requirements of this function regarding `obj` satisfy those of `insert_after`.
+ unsafe { self.insert_after(self.inner_ref(first).prev, obj) };
+ } else {
+ self.insert_only_entry(obj);
+ }
+ }
+
+ /// Adds the given object to the beginning of the list.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that:
+ /// - The object is not currently in any lists.
+ /// - The object remains alive until it is removed from the list.
+ /// - The object is not moved until it is removed from the list.
+ pub unsafe fn push_front(&mut self, obj: &A::EntryType) {
+ if let Some(first) = self.first {
+ // SAFETY: The safety requirements of this function regarding `obj` satisfy those of
+ // `insert_before`. Additionally, `first` is in the list.
+ unsafe { self.insert_before(first, obj) };
+ } else {
+ self.insert_only_entry(obj);
+ }
+ }
+
+ /// Removes the given object from the list.
+ ///
+ /// # Safety
+ ///
+ /// The object must be in the list. In other words, the object must have previously been
+ /// inserted into this list and not removed yet.
+ pub unsafe fn remove(&mut self, entry: &A::EntryType) {
+ // SAFETY: Per the function safety requirements, `entry` is in the list.
+ let inner = unsafe { self.inner_ref(NonNull::from(entry)) };
+ let next = inner.next;
+ let prev = inner.prev;
+
+ // SAFETY: We have mutable access to the list, so we also have access to the entry we're
+ // about to remove (which we know is in the list per the function safety requirements).
+ let inner = unsafe { &mut *A::to_links(entry).0.get() };
+
+ // SAFETY: Since the entry was in the list, it was initialised.
+ unsafe { inner.assume_init_drop() };
+
+ if core::ptr::eq(next.as_ptr(), entry) {
+ // Removing the only element.
+ self.first = None;
+ } else {
+ // SAFETY: `prev` is in the list because it is pointed at by the entry being removed.
+ unsafe { self.inner(prev).next = next };
+ // SAFETY: `next` is in the list because it is pointed at by the entry being removed.
+ unsafe { self.inner(next).prev = prev };
+
+ if core::ptr::eq(self.first.unwrap().as_ptr(), entry) {
+ // Update the pointer to the first element as we're removing it.
+ self.first = Some(next);
+ }
+ }
+ }
+
+ /// Adds the given object after another object already in the list.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that:
+ /// - The existing object is currently in the list.
+ /// - The new object is not currently in any lists.
+ /// - The new object remains alive until it is removed from the list.
+ /// - The new object is not moved until it is removed from the list.
+ pub unsafe fn insert_after(&mut self, existing: NonNull<A::EntryType>, new: &A::EntryType) {
+ // SAFETY: We have mutable access to the list, so we also have access to the entry we're
+ // about to insert (and it's not in any other lists per the function safety requirements).
+ let new_inner = unsafe { &mut *A::to_links(new).0.get() };
+
+ // SAFETY: Per the function safety requirements, `existing` is in the list.
+ let existing_inner = unsafe { self.inner(existing) };
+ let next = existing_inner.next;
+
+ // INVARIANTS: All fields of the links of the newly-inserted object are initialised below.
+ new_inner.write(LinksInner {
+ next,
+ prev: existing,
+ _pin: PhantomPinned,
+ });
+
+ existing_inner.next = NonNull::from(new);
+
+ // SAFETY: `next` is in the list because it's pointed at by the existing entry.
+ unsafe { self.inner(next).prev = NonNull::from(new) };
+ }
+
+ /// Adds the given object before another object already in the list.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that:
+ /// - The existing object is currently in the list.
+ /// - The new object is not currently in any lists.
+ /// - The new object remains alive until it is removed from the list.
+ /// - The new object is not moved until it is removed from the list.
+ pub unsafe fn insert_before(&mut self, existing: NonNull<A::EntryType>, new: &A::EntryType) {
+ // SAFETY: The safety requirements of this function satisfy those of `insert_after`.
+ unsafe { self.insert_after(self.inner_ref(existing).prev, new) };
+
+ if self.first.unwrap() == existing {
+ // Update the pointer to the first element as we're inserting before it.
+ self.first = Some(NonNull::from(new));
+ }
+ }
+
+ /// Returns the first element of the list, if one exists.
+ pub fn front(&self) -> Option<NonNull<A::EntryType>> {
+ self.first
+ }
+
+ /// Returns the last element of the list, if one exists.
+ pub fn back(&self) -> Option<NonNull<A::EntryType>> {
+ // SAFETY: Having a pointer to it guarantees that the object is in the list.
+ self.first.map(|f| unsafe { self.inner_ref(f).prev })
+ }
+
+ /// Returns an iterator for the list starting at the first entry.
+ pub fn iter(&self) -> Iterator<'_, A> {
+ Iterator::new(self.cursor_front())
+ }
+
+ /// Returns an iterator for the list starting at the last entry.
+ pub fn iter_back(&self) -> impl iter::DoubleEndedIterator<Item = &'_ A::EntryType> {
+ Iterator::new(self.cursor_back())
+ }
+
+ /// Returns a cursor starting on the first (front) element of the list.
+ pub fn cursor_front(&self) -> Cursor<'_, A> {
+ // SAFETY: `front` is in the list (or is `None`) because we've read it from the list head
+ // and the list cannot have changed because we hold a shared reference to it.
+ unsafe { Cursor::new(self, self.front()) }
+ }
+
+ /// Returns a cursor starting on the last (back) element of the list.
+ pub fn cursor_back(&self) -> Cursor<'_, A> {
+ // SAFETY: `back` is in the list (or is `None`) because we've read it from the list head
+ // and the list cannot have changed because we hold a shared reference to it.
+ unsafe { Cursor::new(self, self.back()) }
+ }
+
+ /// Returns a mutable reference to the links of a given object.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that the element is in the list.
+ unsafe fn inner(&mut self, ptr: NonNull<A::EntryType>) -> &mut LinksInner<A::EntryType> {
+ // SAFETY: The safety requirements guarantee that we the links are initialised because
+ // that's part of the type invariants. Additionally, the type invariants also guarantee
+ // that having a mutable reference to the list guarantees that the links are mutably
+ // accessible as well.
+ unsafe { (*A::to_links(ptr.as_ref()).0.get()).assume_init_mut() }
+ }
+
+ /// Returns a shared reference to the links of a given object.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that the element is in the list.
+ unsafe fn inner_ref(&self, ptr: NonNull<A::EntryType>) -> &LinksInner<A::EntryType> {
+ // SAFETY: The safety requirements guarantee that we the links are initialised because
+ // that's part of the type invariants. Additionally, the type invariants also guarantee
+ // that having a shared reference to the list guarantees that the links are accessible in
+ // shared mode as well.
+ unsafe { (*A::to_links(ptr.as_ref()).0.get()).assume_init_ref() }
+ }
+}
+
+impl<'a, A: Adapter + ?Sized> iter::IntoIterator for &'a List<A> {
+ type Item = &'a A::EntryType;
+ type IntoIter = Iterator<'a, A>;
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+/// An iterator for the linked list.
+pub struct Iterator<'a, A: Adapter + ?Sized> {
+ cursor: Cursor<'a, A>,
+}
+
+impl<'a, A: Adapter + ?Sized> Iterator<'a, A> {
+ fn new(cursor: Cursor<'a, A>) -> Self {
+ Self { cursor }
+ }
+}
+
+impl<'a, A: Adapter + ?Sized> iter::Iterator for Iterator<'a, A> {
+ type Item = &'a A::EntryType;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let ret = self.cursor.current()?;
+ self.cursor.move_next();
+ Some(ret)
+ }
+}
+
+impl<A: Adapter + ?Sized> iter::DoubleEndedIterator for Iterator<'_, A> {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ let ret = self.cursor.current()?;
+ self.cursor.move_prev();
+ Some(ret)
+ }
+}
+
+/// A linked-list adapter.
+///
+/// It is a separate type (as opposed to implemented by the type of the elements of the list)
+/// so that a given type can be inserted into multiple lists at the same time; in such cases, each
+/// list needs its own adapter that returns a different pointer to links.
+///
+/// It may, however, be implemented by the type itself to be inserted into lists, which makes it
+/// more readable.
+///
+/// # Safety
+///
+/// Implementers must ensure that the links returned by [`Adapter::to_links`] are unique to the
+/// adapter. That is, different adapters must return different links for a given object.
+///
+/// The reason for this requirement is to avoid confusion that may lead to UB. In particular, if
+/// two adapters were to use the same links, a user may have two lists (one for each adapter) and
+/// try to insert the same object into both at the same time; although this clearly violates the
+/// list safety requirements (e.g., those in [`List::push_back`]), for users to notice it, they'd
+/// have to dig into the details of the two adapters.
+///
+/// By imposing the requirement on the adapter, we make it easier for users to check compliance
+/// with the requirements when using the list.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::unsafe_list::{Adapter, Links, List};
+///
+/// struct Example {
+/// a: u32,
+/// b: u32,
+/// links1: Links<Example>,
+/// links2: Links<Example>,
+/// }
+///
+/// // SAFETY: This adapter is the only one that uses `Example::links1`.
+/// unsafe impl Adapter for Example {
+/// type EntryType = Self;
+/// fn to_links(obj: &Self) -> &Links<Self> {
+/// &obj.links1
+/// }
+/// }
+///
+/// struct ExampleAdapter;
+///
+/// // SAFETY: This adapter is the only one that uses `Example::links2`.
+/// unsafe impl Adapter for ExampleAdapter {
+/// type EntryType = Example;
+/// fn to_links(obj: &Example) -> &Links<Example> {
+/// &obj.links2
+/// }
+/// }
+///
+/// static LIST1: List<Example> = List::new();
+/// static LIST2: List<ExampleAdapter> = List::new();
+/// ```
+pub unsafe trait Adapter {
+ /// The type of the enties in the list.
+ type EntryType: ?Sized;
+
+ /// Retrieves the linked list links for the given object.
+ fn to_links(obj: &Self::EntryType) -> &Links<Self::EntryType>;
+}
+
+struct LinksInner<T: ?Sized> {
+ next: NonNull<T>,
+ prev: NonNull<T>,
+ _pin: PhantomPinned,
+}
+
+/// Links of a linked list.
+///
+/// List entries need one of these per concurrent list.
+pub struct Links<T: ?Sized>(UnsafeCell<MaybeUninit<LinksInner<T>>>);
+
+// SAFETY: `Links` can be safely sent to other threads but we restrict it to being `Send` only when
+// the list entries it points to are also `Send`.
+unsafe impl<T: ?Sized> Send for Links<T> {}
+
+// SAFETY: `Links` is usable from other threads via references but we restrict it to being `Sync`
+// only when the list entries it points to are also `Sync`.
+unsafe impl<T: ?Sized> Sync for Links<T> {}
+
+impl<T: ?Sized> Links<T> {
+ /// Constructs a new instance of the linked-list links.
+ pub const fn new() -> Self {
+ Self(UnsafeCell::new(MaybeUninit::uninit()))
+ }
+}
+
+pub(crate) struct CommonCursor<A: Adapter + ?Sized> {
+ pub(crate) cur: Option<NonNull<A::EntryType>>,
+}
+
+impl<A: Adapter + ?Sized> CommonCursor<A> {
+ pub(crate) fn new(cur: Option<NonNull<A::EntryType>>) -> Self {
+ Self { cur }
+ }
+
+ /// Moves the cursor to the next entry of the list.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that the cursor is either [`None`] or points to an entry that is in
+ /// `list`.
+ pub(crate) unsafe fn move_next(&mut self, list: &List<A>) {
+ match self.cur.take() {
+ None => self.cur = list.first,
+ Some(cur) => {
+ if let Some(head) = list.first {
+ // SAFETY: Per the function safety requirements, `cur` is in the list.
+ let links = unsafe { list.inner_ref(cur) };
+ if links.next != head {
+ self.cur = Some(links.next);
+ }
+ }
+ }
+ }
+ }
+
+ /// Moves the cursor to the previous entry of the list.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that the cursor is either [`None`] or points to an entry that is in
+ /// `list`.
+ pub(crate) unsafe fn move_prev(&mut self, list: &List<A>) {
+ match list.first {
+ None => self.cur = None,
+ Some(head) => {
+ let next = match self.cur.take() {
+ None => head,
+ Some(cur) => {
+ if cur == head {
+ return;
+ }
+ cur
+ }
+ };
+ // SAFETY: `next` is either `head` or `cur`. The former is in the list because it's
+ // its head; the latter is in the list per the function safety requirements.
+ self.cur = Some(unsafe { list.inner_ref(next) }.prev);
+ }
+ }
+ }
+}
+
+/// A list cursor that allows traversing a linked list and inspecting elements.
+pub struct Cursor<'a, A: Adapter + ?Sized> {
+ cursor: CommonCursor<A>,
+ list: &'a List<A>,
+}
+
+impl<'a, A: Adapter + ?Sized> Cursor<'a, A> {
+ /// Creates a new cursor.
+ ///
+ /// # Safety
+ ///
+ /// Callers must ensure that `cur` is either [`None`] or points to an entry in `list`.
+ pub(crate) unsafe fn new(list: &'a List<A>, cur: Option<NonNull<A::EntryType>>) -> Self {
+ Self {
+ list,
+ cursor: CommonCursor::new(cur),
+ }
+ }
+
+ /// Returns the element the cursor is currently positioned on.
+ pub fn current(&self) -> Option<&'a A::EntryType> {
+ let cur = self.cursor.cur?;
+ // SAFETY: `cursor` starts off in the list and only changes within the list. Additionally,
+ // the list cannot change because we hold a shared reference to it, so the cursor is always
+ // within the list.
+ Some(unsafe { cur.as_ref() })
+ }
+
+ /// Moves the cursor to the next element.
+ pub fn move_next(&mut self) {
+ // SAFETY: `cursor` starts off in the list and only changes within the list. Additionally,
+ // the list cannot change because we hold a shared reference to it, so the cursor is always
+ // within the list.
+ unsafe { self.cursor.move_next(self.list) };
+ }
+
+ /// Moves the cursor to the previous element.
+ pub fn move_prev(&mut self) {
+ // SAFETY: `cursor` starts off in the list and only changes within the list. Additionally,
+ // the list cannot change because we hold a shared reference to it, so the cursor is always
+ // within the list.
+ unsafe { self.cursor.move_prev(self.list) };
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use alloc::{boxed::Box, vec::Vec};
+ use core::ptr::NonNull;
+
+ struct Example {
+ links: super::Links<Self>,
+ }
+
+ // SAFETY: This is the only adapter that uses `Example::links`.
+ unsafe impl super::Adapter for Example {
+ type EntryType = Self;
+ fn to_links(obj: &Self) -> &super::Links<Self> {
+ &obj.links
+ }
+ }
+
+ fn build_vector(size: usize) -> Vec<Box<Example>> {
+ let mut v = Vec::new();
+ v.reserve(size);
+ for _ in 0..size {
+ v.push(Box::new(Example {
+ links: super::Links::new(),
+ }));
+ }
+ v
+ }
+
+ #[track_caller]
+ fn assert_list_contents(v: &[Box<Example>], list: &super::List<Example>) {
+ let n = v.len();
+
+ // Assert that the list is ok going forward.
+ let mut count = 0;
+ for (i, e) in list.iter().enumerate() {
+ assert!(core::ptr::eq(e, &*v[i]));
+ count += 1;
+ }
+ assert_eq!(count, n);
+
+ // Assert that the list is ok going backwards.
+ let mut count = 0;
+ for (i, e) in list.iter_back().rev().enumerate() {
+ assert!(core::ptr::eq(e, &*v[n - 1 - i]));
+ count += 1;
+ }
+ assert_eq!(count, n);
+ }
+
+ #[track_caller]
+ fn test_each_element(
+ min_len: usize,
+ max_len: usize,
+ test: impl Fn(&mut Vec<Box<Example>>, &mut super::List<Example>, usize, Box<Example>),
+ ) {
+ for n in min_len..=max_len {
+ for i in 0..n {
+ let extra = Box::new(Example {
+ links: super::Links::new(),
+ });
+ let mut v = build_vector(n);
+ let mut list = super::List::<Example>::new();
+
+ // Build list.
+ for j in 0..n {
+ // SAFETY: The entry was allocated above, it's not in any lists yet, is never
+ // moved, and outlives the list.
+ unsafe { list.push_back(&v[j]) };
+ }
+
+ // Call the test case.
+ test(&mut v, &mut list, i, extra);
+
+ // Check that the list is ok.
+ assert_list_contents(&v, &list);
+ }
+ }
+ }
+
+ #[test]
+ fn test_push_back() {
+ const MAX: usize = 10;
+ let v = build_vector(MAX);
+ let mut list = super::List::<Example>::new();
+
+ for n in 1..=MAX {
+ // SAFETY: The entry was allocated above, it's not in any lists yet, is never moved,
+ // and outlives the list.
+ unsafe { list.push_back(&v[n - 1]) };
+ assert_list_contents(&v[..n], &list);
+ }
+ }
+
+ #[test]
+ fn test_push_front() {
+ const MAX: usize = 10;
+ let v = build_vector(MAX);
+ let mut list = super::List::<Example>::new();
+
+ for n in 1..=MAX {
+ // SAFETY: The entry was allocated above, it's not in any lists yet, is never moved,
+ // and outlives the list.
+ unsafe { list.push_front(&v[MAX - n]) };
+ assert_list_contents(&v[MAX - n..], &list);
+ }
+ }
+
+ #[test]
+ fn test_one_removal() {
+ test_each_element(1, 10, |v, list, i, _| {
+ // Remove the i-th element.
+ // SAFETY: The i-th element was added to the list above, and wasn't removed yet.
+ unsafe { list.remove(&v[i]) };
+ v.remove(i);
+ });
+ }
+
+ #[test]
+ fn test_one_insert_after() {
+ test_each_element(1, 10, |v, list, i, extra| {
+ // Insert after the i-th element.
+ // SAFETY: The i-th element was added to the list above, and wasn't removed yet.
+ // Additionally, the new element isn't in any list yet, isn't moved, and outlives
+ // the list.
+ unsafe { list.insert_after(NonNull::from(&*v[i]), &*extra) };
+ v.insert(i + 1, extra);
+ });
+ }
+
+ #[test]
+ fn test_one_insert_before() {
+ test_each_element(1, 10, |v, list, i, extra| {
+ // Insert before the i-th element.
+ // SAFETY: The i-th element was added to the list above, and wasn't removed yet.
+ // Additionally, the new element isn't in any list yet, isn't moved, and outlives
+ // the list.
+ unsafe { list.insert_before(NonNull::from(&*v[i]), &*extra) };
+ v.insert(i, extra);
+ });
+ }
+}
diff --git a/rust/kernel/user_ptr.rs b/rust/kernel/user_ptr.rs
new file mode 100644
index 000000000000..0bc2c7f1331e
--- /dev/null
+++ b/rust/kernel/user_ptr.rs
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! User pointers.
+//!
+//! C header: [`include/linux/uaccess.h`](../../../../include/linux/uaccess.h)
+
+use crate::{
+ bindings,
+ error::code::*,
+ io_buffer::{IoBufferReader, IoBufferWriter},
+ Result,
+};
+use alloc::vec::Vec;
+
+/// A reference to an area in userspace memory, which can be either
+/// read-only or read-write.
+///
+/// All methods on this struct are safe: invalid pointers return
+/// `EFAULT`. Concurrent access, *including data races to/from userspace
+/// memory*, is permitted, because fundamentally another userspace
+/// thread/process could always be modifying memory at the same time
+/// (in the same way that userspace Rust's [`std::io`] permits data races
+/// with the contents of files on disk). In the presence of a race, the
+/// exact byte values read/written are unspecified but the operation is
+/// well-defined. Kernelspace code should validate its copy of data
+/// after completing a read, and not expect that multiple reads of the
+/// same address will return the same value.
+///
+/// All APIs enforce the invariant that a given byte of memory from userspace
+/// may only be read once. By preventing double-fetches we avoid TOCTOU
+/// vulnerabilities. This is accomplished by taking `self` by value to prevent
+/// obtaining multiple readers on a given [`UserSlicePtr`], and the readers
+/// only permitting forward reads.
+///
+/// Constructing a [`UserSlicePtr`] performs no checks on the provided
+/// address and length, it can safely be constructed inside a kernel thread
+/// with no current userspace process. Reads and writes wrap the kernel APIs
+/// `copy_from_user` and `copy_to_user`, which check the memory map of the
+/// current process and enforce that the address range is within the user
+/// range (no additional calls to `access_ok` are needed).
+///
+/// [`std::io`]: https://doc.rust-lang.org/std/io/index.html
+pub struct UserSlicePtr(*mut core::ffi::c_void, usize);
+
+impl UserSlicePtr {
+ /// Constructs a user slice from a raw pointer and a length in bytes.
+ ///
+ /// # Safety
+ ///
+ /// Callers must be careful to avoid time-of-check-time-of-use
+ /// (TOCTOU) issues. The simplest way is to create a single instance of
+ /// [`UserSlicePtr`] per user memory block as it reads each byte at
+ /// most once.
+ pub unsafe fn new(ptr: *mut core::ffi::c_void, length: usize) -> Self {
+ UserSlicePtr(ptr, length)
+ }
+
+ /// Reads the entirety of the user slice.
+ ///
+ /// Returns `EFAULT` if the address does not currently point to
+ /// mapped, readable memory.
+ pub fn read_all(self) -> Result<Vec<u8>> {
+ self.reader().read_all()
+ }
+
+ /// Constructs a [`UserSlicePtrReader`].
+ pub fn reader(self) -> UserSlicePtrReader {
+ UserSlicePtrReader(self.0, self.1)
+ }
+
+ /// Writes the provided slice into the user slice.
+ ///
+ /// Returns `EFAULT` if the address does not currently point to
+ /// mapped, writable memory (in which case some data from before the
+ /// fault may be written), or `data` is larger than the user slice
+ /// (in which case no data is written).
+ pub fn write_all(self, data: &[u8]) -> Result {
+ self.writer().write_slice(data)
+ }
+
+ /// Constructs a [`UserSlicePtrWriter`].
+ pub fn writer(self) -> UserSlicePtrWriter {
+ UserSlicePtrWriter(self.0, self.1)
+ }
+
+ /// Constructs both a [`UserSlicePtrReader`] and a [`UserSlicePtrWriter`].
+ pub fn reader_writer(self) -> (UserSlicePtrReader, UserSlicePtrWriter) {
+ (
+ UserSlicePtrReader(self.0, self.1),
+ UserSlicePtrWriter(self.0, self.1),
+ )
+ }
+}
+
+/// A reader for [`UserSlicePtr`].
+///
+/// Used to incrementally read from the user slice.
+pub struct UserSlicePtrReader(*mut core::ffi::c_void, usize);
+
+impl IoBufferReader for UserSlicePtrReader {
+ /// Returns the number of bytes left to be read from this.
+ ///
+ /// Note that even reading less than this number of bytes may fail.
+ fn len(&self) -> usize {
+ self.1
+ }
+
+ /// Reads raw data from the user slice into a raw kernel buffer.
+ ///
+ /// # Safety
+ ///
+ /// The output buffer must be valid.
+ unsafe fn read_raw(&mut self, out: *mut u8, len: usize) -> Result {
+ if len > self.1 || len > u32::MAX as usize {
+ return Err(EFAULT);
+ }
+ let res = unsafe { bindings::copy_from_user(out as _, self.0, len as _) };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ // Since this is not a pointer to a valid object in our program,
+ // we cannot use `add`, which has C-style rules for defined
+ // behavior.
+ self.0 = self.0.wrapping_add(len);
+ self.1 -= len;
+ Ok(())
+ }
+}
+
+/// A writer for [`UserSlicePtr`].
+///
+/// Used to incrementally write into the user slice.
+pub struct UserSlicePtrWriter(*mut core::ffi::c_void, usize);
+
+impl IoBufferWriter for UserSlicePtrWriter {
+ fn len(&self) -> usize {
+ self.1
+ }
+
+ fn clear(&mut self, mut len: usize) -> Result {
+ let mut ret = Ok(());
+ if len > self.1 {
+ ret = Err(EFAULT);
+ len = self.1;
+ }
+
+ // SAFETY: The buffer will be validated by `clear_user`. We ensure that `len` is within
+ // bounds in the check above.
+ let left = unsafe { bindings::clear_user(self.0, len as _) } as usize;
+ if left != 0 {
+ ret = Err(EFAULT);
+ len -= left;
+ }
+
+ self.0 = self.0.wrapping_add(len);
+ self.1 -= len;
+ ret
+ }
+
+ unsafe fn write_raw(&mut self, data: *const u8, len: usize) -> Result {
+ if len > self.1 || len > u32::MAX as usize {
+ return Err(EFAULT);
+ }
+ let res = unsafe { bindings::copy_to_user(self.0, data as _, len as _) };
+ if res != 0 {
+ return Err(EFAULT);
+ }
+ // Since this is not a pointer to a valid object in our program,
+ // we cannot use `add`, which has C-style rules for defined
+ // behavior.
+ self.0 = self.0.wrapping_add(len);
+ self.1 -= len;
+ Ok(())
+ }
+}
diff --git a/rust/kernel/workqueue.rs b/rust/kernel/workqueue.rs
new file mode 100644
index 000000000000..c6b89f137469
--- /dev/null
+++ b/rust/kernel/workqueue.rs
@@ -0,0 +1,512 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Work queues.
+//!
+//! C header: [`include/linux/workqueue.h`](../../../../include/linux/workqueue.h)
+
+use crate::{
+ bindings, c_str,
+ error::code::*,
+ sync::{LockClassKey, Ref, UniqueRef},
+ Opaque, Result,
+};
+use core::{fmt, ops::Deref, ptr::NonNull};
+
+/// Spawns a new work item to run in the work queue.
+///
+/// It also automatically defines a new lockdep lock class for the work item.
+#[macro_export]
+macro_rules! spawn_work_item {
+ ($queue:expr, $func:expr) => {{
+ static CLASS: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new();
+ $crate::workqueue::Queue::try_spawn($queue, &CLASS, $func)
+ }};
+}
+
+/// Implements the [`WorkAdapter`] trait for a type where its [`Work`] instance is a field.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::workqueue::Work;
+///
+/// struct Example {
+/// work: Work,
+/// }
+///
+/// kernel::impl_self_work_adapter!(Example, work, |_| {});
+/// ```
+#[macro_export]
+macro_rules! impl_self_work_adapter {
+ ($work_type:ty, $field:ident, $closure:expr) => {
+ $crate::impl_work_adapter!($work_type, $work_type, $field, $closure);
+ };
+}
+
+/// Implements the [`WorkAdapter`] trait for an adapter type.
+///
+/// # Examples
+///
+/// ```
+/// # use kernel::workqueue::Work;
+///
+/// struct Example {
+/// work: Work,
+/// }
+///
+/// struct Adapter;
+///
+/// kernel::impl_work_adapter!(Adapter, Example, work, |_| {});
+/// ```
+#[macro_export]
+macro_rules! impl_work_adapter {
+ ($adapter:ty, $work_type:ty, $field:ident, $closure:expr) => {
+ // SAFETY: We use `offset_of` to ensure that the field is within the given type, and we
+ // also check its type is `Work`.
+ unsafe impl $crate::workqueue::WorkAdapter for $adapter {
+ type Target = $work_type;
+ const FIELD_OFFSET: isize = $crate::offset_of!(Self::Target, $field);
+ fn run(w: $crate::sync::Ref<Self::Target>) {
+ let closure: fn($crate::sync::Ref<Self::Target>) = $closure;
+ closure(w);
+ return;
+
+ // Checks that the type of the field is actually `Work`.
+ let tmp = core::mem::MaybeUninit::<$work_type>::uninit();
+ // SAFETY: The pointer is valid and aligned, just not initialised; `addr_of`
+ // ensures that we don't actually read from it (which would be UB) nor create an
+ // intermediate reference.
+ let _x: *const $crate::workqueue::Work =
+ unsafe { core::ptr::addr_of!((*tmp.as_ptr()).$field) };
+ }
+ }
+ };
+}
+
+/// Initialises a work item.
+///
+/// It automatically defines a new lockdep lock class for the work item.
+#[macro_export]
+macro_rules! init_work_item {
+ ($work_container:expr) => {{
+ static CLASS: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new();
+ $crate::workqueue::Work::init($work_container, &CLASS)
+ }};
+}
+
+/// Initialises a work item with the given adapter.
+///
+/// It automatically defines a new lockdep lock class for the work item.
+#[macro_export]
+macro_rules! init_work_item_adapter {
+ ($adapter:ty, $work_container:expr) => {{
+ static CLASS: $crate::sync::LockClassKey = $crate::sync::LockClassKey::new();
+ $crate::workqueue::Work::init_with_adapter::<$adapter>($work_container, &CLASS)
+ }};
+}
+
+/// A kernel work queue.
+///
+/// Wraps the kernel's C `struct workqueue_struct`.
+///
+/// It allows work items to be queued to run on thread pools managed by the kernel. Several are
+/// always available, for example, the ones returned by [`system`], [`system_highpri`],
+/// [`system_long`], etc.
+///
+/// # Examples
+///
+/// The following example is the simplest way to launch a work item:
+///
+/// ```
+/// # use kernel::{spawn_work_item, workqueue};
+///
+/// # fn example() -> Result {
+/// spawn_work_item!(workqueue::system(), || pr_info!("Hello from a work item\n"))?;
+/// # Ok(())
+/// # }
+///
+/// # example().unwrap()
+/// ```
+///
+/// The following example is used to create a work item and enqueue it several times. We note that
+/// enqueuing while the work item is already queued is a no-op, so we enqueue it when it is not
+/// enqueued yet.
+///
+/// ```
+/// # use kernel::workqueue::{self, Work};
+/// use core::sync::atomic::{AtomicU32, Ordering};
+/// use kernel::sync::UniqueRef;
+///
+/// struct Example {
+/// count: AtomicU32,
+/// work: Work,
+/// }
+///
+/// kernel::impl_self_work_adapter!(Example, work, |w| {
+/// let count = w.count.fetch_add(1, Ordering::Relaxed);
+/// pr_info!("Called with count={}\n", count);
+///
+/// // Queue again if the count is less than 10.
+/// if count < 10 {
+/// workqueue::system().enqueue(w);
+/// }
+/// });
+///
+/// # fn example() -> Result {
+/// let e = UniqueRef::try_new(Example {
+/// count: AtomicU32::new(0),
+/// // SAFETY: `work` is initialised below.
+/// work: unsafe { Work::new() },
+/// })?;
+///
+/// kernel::init_work_item!(&e);
+///
+/// // Queue the first time.
+/// workqueue::system().enqueue(e.into());
+/// # Ok(())
+/// # }
+///
+/// # example().unwrap()
+/// ```
+///
+/// The following example has two different work items in the same struct, which allows it to be
+/// queued twice.
+///
+/// ```
+/// # use kernel::workqueue::{self, Work, WorkAdapter};
+/// use core::sync::atomic::{AtomicU32, Ordering};
+/// use kernel::sync::{Ref, UniqueRef};
+///
+/// struct Example {
+/// work1: Work,
+/// work2: Work,
+/// }
+///
+/// kernel::impl_self_work_adapter!(Example, work1, |_| pr_info!("First work\n"));
+///
+/// struct SecondAdapter;
+/// kernel::impl_work_adapter!(SecondAdapter, Example, work2, |_| pr_info!("Second work\n"));
+///
+/// # fn example() -> Result {
+/// let e = UniqueRef::try_new(Example {
+/// // SAFETY: `work1` is initialised below.
+/// work1: unsafe { Work::new() },
+/// // SAFETY: `work2` is initialised below.
+/// work2: unsafe { Work::new() },
+/// })?;
+///
+/// kernel::init_work_item!(&e);
+/// kernel::init_work_item_adapter!(SecondAdapter, &e);
+///
+/// let e = Ref::from(e);
+///
+/// // Enqueue the two different work items.
+/// workqueue::system().enqueue(e.clone());
+/// workqueue::system().enqueue_adapter::<SecondAdapter>(e);
+/// # Ok(())
+/// # }
+///
+/// # example().unwrap()
+/// ```
+#[repr(transparent)]
+pub struct Queue(Opaque<bindings::workqueue_struct>);
+
+// SAFETY: Kernel workqueues are usable from any thread.
+unsafe impl Sync for Queue {}
+
+impl Queue {
+ /// Tries to allocate a new work queue.
+ ///
+ /// Callers should first consider using one of the existing ones (e.g. [`system`]) before
+ /// deciding to create a new one.
+ pub fn try_new(name: fmt::Arguments<'_>) -> Result<BoxedQueue> {
+ // SAFETY: We use a format string that requires an `fmt::Arguments` pointer as the first
+ // and only argument.
+ let ptr = unsafe {
+ bindings::alloc_workqueue(
+ c_str!("%pA").as_char_ptr(),
+ 0,
+ 0,
+ &name as *const _ as *const core::ffi::c_void,
+ )
+ };
+ if ptr.is_null() {
+ return Err(ENOMEM);
+ }
+
+ // SAFETY: `ptr` was just allocated and checked above, so it non-null and valid. Plus, it
+ // isn't touched after the call below, so ownership is transferred.
+ Ok(unsafe { BoxedQueue::new(ptr) })
+ }
+
+ /// Enqueues a work item.
+ ///
+ /// Returns `true` if the work item was successfully enqueue; returns `false` if it had already
+ /// been (and continued to be) enqueued.
+ pub fn enqueue<T: WorkAdapter<Target = T>>(&self, w: Ref<T>) -> bool {
+ self.enqueue_adapter::<T>(w)
+ }
+
+ /// Enqueues a work item with an explicit adapter.
+ ///
+ /// Returns `true` if the work item was successfully enqueue; returns `false` if it had already
+ /// been (and continued to be) enqueued.
+ pub fn enqueue_adapter<A: WorkAdapter + ?Sized>(&self, w: Ref<A::Target>) -> bool {
+ let ptr = Ref::into_raw(w);
+ let field_ptr =
+ (ptr as *const u8).wrapping_offset(A::FIELD_OFFSET) as *mut bindings::work_struct;
+
+ // SAFETY: Having a shared reference to work queue guarantees that it remains valid, while
+ // the work item remains valid because we called `into_raw` and only call `from_raw` again
+ // if the object was already queued (so a previous call already guarantees it remains
+ // alive), when the work item runs, or when the work item is canceled.
+ let ret = unsafe {
+ bindings::queue_work_on(bindings::WORK_CPU_UNBOUND as _, self.0.get(), field_ptr)
+ };
+
+ if !ret {
+ // SAFETY: `ptr` comes from a previous call to `into_raw`. Additionally, given that
+ // `queue_work_on` returned `false`, we know that no-one is going to use the result of
+ // `into_raw`, so we must drop it here to avoid a reference leak.
+ unsafe { Ref::from_raw(ptr) };
+ }
+
+ ret
+ }
+
+ /// Tries to spawn the given function or closure as a work item.
+ ///
+ /// Users are encouraged to use [`spawn_work_item`] as it automatically defines the lock class
+ /// key to be used.
+ pub fn try_spawn<T: 'static + Send + Fn()>(
+ &self,
+ key: &'static LockClassKey,
+ func: T,
+ ) -> Result {
+ let w = UniqueRef::<ClosureAdapter<T>>::try_new(ClosureAdapter {
+ // SAFETY: `work` is initialised below.
+ work: unsafe { Work::new() },
+ func,
+ })?;
+ Work::init(&w, key);
+ self.enqueue(w.into());
+ Ok(())
+ }
+}
+
+struct ClosureAdapter<T: Fn() + Send> {
+ work: Work,
+ func: T,
+}
+
+// SAFETY: `ClosureAdapter::work` is of type `Work`.
+unsafe impl<T: Fn() + Send> WorkAdapter for ClosureAdapter<T> {
+ type Target = Self;
+ const FIELD_OFFSET: isize = crate::offset_of!(Self, work);
+
+ fn run(w: Ref<Self::Target>) {
+ (w.func)();
+ }
+}
+
+/// An adapter for work items.
+///
+/// For the most usual case where a type has a [`Work`] in it and is itself the adapter, it is
+/// recommended that they use the [`impl_self_work_adapter`] or [`impl_work_adapter`] macros
+/// instead of implementing the [`WorkAdapter`] manually. The great advantage is that they don't
+/// require any unsafe blocks.
+///
+/// # Safety
+///
+/// Implementers must ensure that there is a [`Work`] instance `FIELD_OFFSET` bytes from the
+/// beginning of a valid `Target` type. It is normally safe to use the [`crate::offset_of`] macro
+/// for this.
+pub unsafe trait WorkAdapter {
+ /// The type that this work adapter is meant to use.
+ type Target;
+
+ /// The offset, in bytes, from the beginning of [`Self::Target`] to the instance of [`Work`].
+ const FIELD_OFFSET: isize;
+
+ /// Runs when the work item is picked up for execution after it has been enqueued to some work
+ /// queue.
+ fn run(w: Ref<Self::Target>);
+}
+
+/// A work item.
+///
+/// Wraps the kernel's C `struct work_struct`.
+///
+/// Users must add a field of this type to a structure, then implement [`WorkAdapter`] so that it
+/// can be queued for execution in a thread pool. Examples of it being used are available in the
+/// documentation for [`Queue`].
+#[repr(transparent)]
+pub struct Work(Opaque<bindings::work_struct>);
+
+impl Work {
+ /// Creates a new instance of [`Work`].
+ ///
+ /// # Safety
+ ///
+ /// Callers must call either [`Work::init`] or [`Work::init_with_adapter`] before the work item
+ /// can be used.
+ pub unsafe fn new() -> Self {
+ Self(Opaque::uninit())
+ }
+
+ /// Initialises the work item.
+ ///
+ /// Users should prefer the [`init_work_item`] macro because it automatically defines a new
+ /// lock class key.
+ pub fn init<T: WorkAdapter<Target = T>>(obj: &UniqueRef<T>, key: &'static LockClassKey) {
+ Self::init_with_adapter::<T>(obj, key)
+ }
+
+ /// Initialises the work item with the given adapter.
+ ///
+ /// Users should prefer the [`init_work_item_adapter`] macro because it automatically defines a
+ /// new lock class key.
+ pub fn init_with_adapter<A: WorkAdapter>(
+ obj: &UniqueRef<A::Target>,
+ key: &'static LockClassKey,
+ ) {
+ let ptr = &**obj as *const _ as *const u8;
+ let field_ptr = ptr.wrapping_offset(A::FIELD_OFFSET) as *mut bindings::work_struct;
+
+ // SAFETY: `work` is valid for writes -- the `UniqueRef` instance guarantees that it has
+ // been allocated and there is only one pointer to it. Additionally, `work_func` is a valid
+ // callback for the work item.
+ unsafe {
+ bindings::__INIT_WORK_WITH_KEY(field_ptr, Some(Self::work_func::<A>), false, key.get())
+ };
+ }
+
+ /// Cancels the work item.
+ ///
+ /// It is ok for this to be called when the work is not queued.
+ pub fn cancel(&self) {
+ // SAFETY: The work is valid (we have a reference to it), and the function can be called
+ // whether the work is queued or not.
+ if unsafe { bindings::cancel_work_sync(self.0.get()) } {
+ // SAFETY: When the work was queued, a call to `into_raw` was made. We just canceled
+ // the work without it having the chance to run, so we need to explicitly destroy this
+ // reference (which would have happened in `work_func` if it did run).
+ unsafe { Ref::from_raw(&*self) };
+ }
+ }
+
+ unsafe extern "C" fn work_func<A: WorkAdapter>(work: *mut bindings::work_struct) {
+ let field_ptr = work as *const _ as *const u8;
+ let ptr = field_ptr.wrapping_offset(-A::FIELD_OFFSET) as *const A::Target;
+
+ // SAFETY: This callback is only ever used by the `init_with_adapter` method, so it is
+ // always the case that the work item is embedded in a `Work` (Self) struct.
+ let w = unsafe { Ref::from_raw(ptr) };
+ A::run(w);
+ }
+}
+
+/// A boxed owned workqueue.
+///
+/// # Invariants
+///
+/// `ptr` is owned by this instance of [`BoxedQueue`], so it's always valid.
+pub struct BoxedQueue {
+ ptr: NonNull<Queue>,
+}
+
+impl BoxedQueue {
+ /// Creates a new instance of [`BoxedQueue`].
+ ///
+ /// # Safety
+ ///
+ /// `ptr` must be non-null and valid. Additionally, ownership must be handed over to new
+ /// instance of [`BoxedQueue`].
+ unsafe fn new(ptr: *mut bindings::workqueue_struct) -> Self {
+ Self {
+ // SAFETY: We checked above that `ptr` is non-null.
+ ptr: unsafe { NonNull::new_unchecked(ptr.cast()) },
+ }
+ }
+}
+
+impl Deref for BoxedQueue {
+ type Target = Queue;
+
+ fn deref(&self) -> &Queue {
+ // SAFETY: The type invariants guarantee that `ptr` is always valid.
+ unsafe { self.ptr.as_ref() }
+ }
+}
+
+impl Drop for BoxedQueue {
+ fn drop(&mut self) {
+ // SAFETY: The type invariants guarantee that `ptr` is always valid.
+ unsafe { bindings::destroy_workqueue(self.ptr.as_ref().0.get()) };
+ }
+}
+
+/// Returns the system work queue (`system_wq`).
+///
+/// It is the one used by schedule\[_delayed\]_work\[_on\](). Multi-CPU multi-threaded. There are
+/// users which expect relatively short queue flush time.
+///
+/// Callers shouldn't queue work items which can run for too long.
+pub fn system() -> &'static Queue {
+ // SAFETY: `system_wq` is a C global, always available.
+ unsafe { &*bindings::system_wq.cast() }
+}
+
+/// Returns the system high-priority work queue (`system_highpri_wq`).
+///
+/// It is similar to the one returned by [`system`] but for work items which require higher
+/// scheduling priority.
+pub fn system_highpri() -> &'static Queue {
+ // SAFETY: `system_highpri_wq` is a C global, always available.
+ unsafe { &*bindings::system_highpri_wq.cast() }
+}
+
+/// Returns the system work queue for potentially long-running work items (`system_long_wq`).
+///
+/// It is similar to the one returned by [`system`] but may host long running work items. Queue
+/// flushing might take relatively long.
+pub fn system_long() -> &'static Queue {
+ // SAFETY: `system_long_wq` is a C global, always available.
+ unsafe { &*bindings::system_long_wq.cast() }
+}
+
+/// Returns the system unbound work queue (`system_unbound_wq`).
+///
+/// Workers are not bound to any specific CPU, not concurrency managed, and all queued work items
+/// are executed immediately as long as `max_active` limit is not reached and resources are
+/// available.
+pub fn system_unbound() -> &'static Queue {
+ // SAFETY: `system_unbound_wq` is a C global, always available.
+ unsafe { &*bindings::system_unbound_wq.cast() }
+}
+
+/// Returns the system freezable work queue (`system_freezable_wq`).
+///
+/// It is equivalent to the one returned by [`system`] except that it's freezable.
+pub fn system_freezable() -> &'static Queue {
+ // SAFETY: `system_freezable_wq` is a C global, always available.
+ unsafe { &*bindings::system_freezable_wq.cast() }
+}
+
+/// Returns the system power-efficient work queue (`system_power_efficient_wq`).
+///
+/// It is inclined towards saving power and is converted to "unbound" variants if the
+/// `workqueue.power_efficient` kernel parameter is specified; otherwise, it is similar to the one
+/// returned by [`system`].
+pub fn system_power_efficient() -> &'static Queue {
+ // SAFETY: `system_power_efficient_wq` is a C global, always available.
+ unsafe { &*bindings::system_power_efficient_wq.cast() }
+}
+
+/// Returns the system freezable power-efficient work queue (`system_freezable_power_efficient_wq`).
+///
+/// It is similar to the one returned by [`system_power_efficient`] except that is freezable.
+pub fn system_freezable_power_efficient() -> &'static Queue {
+ // SAFETY: `system_freezable_power_efficient_wq` is a C global, always available.
+ unsafe { &*bindings::system_freezable_power_efficient_wq.cast() }
+}
diff --git a/rust/macros/concat_idents.rs b/rust/macros/concat_idents.rs
new file mode 100644
index 000000000000..3b5a9dd70e8a
--- /dev/null
+++ b/rust/macros/concat_idents.rs
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use proc_macro::{token_stream, Ident, TokenStream, TokenTree};
+
+use crate::helpers::expect_punct;
+
+fn expect_ident(it: &mut token_stream::IntoIter) -> Ident {
+ if let Some(TokenTree::Ident(ident)) = it.next() {
+ ident
+ } else {
+ panic!("Expected Ident")
+ }
+}
+
+pub(crate) fn concat_idents(ts: TokenStream) -> TokenStream {
+ let mut it = ts.into_iter();
+ let a = expect_ident(&mut it);
+ assert_eq!(expect_punct(&mut it), ',');
+ let b = expect_ident(&mut it);
+ assert!(it.next().is_none(), "only two idents can be concatenated");
+ let res = Ident::new(&(a.to_string() + &b.to_string()), b.span());
+ TokenStream::from_iter([TokenTree::Ident(res)])
+}
diff --git a/rust/macros/helpers.rs b/rust/macros/helpers.rs
new file mode 100644
index 000000000000..ad210563e5a6
--- /dev/null
+++ b/rust/macros/helpers.rs
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use proc_macro::{token_stream, Group, TokenTree};
+
+pub(crate) fn try_ident(it: &mut token_stream::IntoIter) -> Option<String> {
+ if let Some(TokenTree::Ident(ident)) = it.next() {
+ Some(ident.to_string())
+ } else {
+ None
+ }
+}
+
+pub(crate) fn try_literal(it: &mut token_stream::IntoIter) -> Option<String> {
+ if let Some(TokenTree::Literal(literal)) = it.next() {
+ Some(literal.to_string())
+ } else {
+ None
+ }
+}
+
+pub(crate) fn try_byte_string(it: &mut token_stream::IntoIter) -> Option<String> {
+ try_literal(it).and_then(|byte_string| {
+ if byte_string.starts_with("b\"") && byte_string.ends_with('\"') {
+ Some(byte_string[2..byte_string.len() - 1].to_string())
+ } else {
+ None
+ }
+ })
+}
+
+pub(crate) fn expect_ident(it: &mut token_stream::IntoIter) -> String {
+ try_ident(it).expect("Expected Ident")
+}
+
+pub(crate) fn expect_punct(it: &mut token_stream::IntoIter) -> char {
+ if let TokenTree::Punct(punct) = it.next().expect("Reached end of token stream for Punct") {
+ punct.as_char()
+ } else {
+ panic!("Expected Punct");
+ }
+}
+
+pub(crate) fn expect_literal(it: &mut token_stream::IntoIter) -> String {
+ try_literal(it).expect("Expected Literal")
+}
+
+pub(crate) fn expect_group(it: &mut token_stream::IntoIter) -> Group {
+ if let TokenTree::Group(group) = it.next().expect("Reached end of token stream for Group") {
+ group
+ } else {
+ panic!("Expected Group");
+ }
+}
+
+pub(crate) fn expect_byte_string(it: &mut token_stream::IntoIter) -> String {
+ try_byte_string(it).expect("Expected byte string")
+}
+
+pub(crate) fn expect_end(it: &mut token_stream::IntoIter) {
+ if it.next().is_some() {
+ panic!("Expected end");
+ }
+}
+
+pub(crate) fn get_literal(it: &mut token_stream::IntoIter, expected_name: &str) -> String {
+ assert_eq!(expect_ident(it), expected_name);
+ assert_eq!(expect_punct(it), ':');
+ let literal = expect_literal(it);
+ assert_eq!(expect_punct(it), ',');
+ literal
+}
+
+pub(crate) fn get_byte_string(it: &mut token_stream::IntoIter, expected_name: &str) -> String {
+ assert_eq!(expect_ident(it), expected_name);
+ assert_eq!(expect_punct(it), ':');
+ let byte_string = expect_byte_string(it);
+ assert_eq!(expect_punct(it), ',');
+ byte_string
+}
diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs
new file mode 100644
index 000000000000..dfb9b3c6f0dc
--- /dev/null
+++ b/rust/macros/lib.rs
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Crate for all kernel procedural macros.
+
+mod concat_idents;
+mod helpers;
+mod module;
+mod vtable;
+
+use proc_macro::TokenStream;
+
+/// Declares a kernel module.
+///
+/// The `type` argument should be a type which implements the [`Module`]
+/// trait. Also accepts various forms of kernel metadata.
+///
+/// C header: [`include/linux/moduleparam.h`](../../../include/linux/moduleparam.h)
+///
+/// [`Module`]: ../kernel/trait.Module.html
+///
+/// # Examples
+///
+/// ```ignore
+/// use kernel::prelude::*;
+///
+/// module!{
+/// type: MyModule,
+/// name: b"my_kernel_module",
+/// author: b"Rust for Linux Contributors",
+/// description: b"My very own kernel module!",
+/// license: b"GPL",
+/// params: {
+/// my_i32: i32 {
+/// default: 42,
+/// permissions: 0o000,
+/// description: b"Example of i32",
+/// },
+/// writeable_i32: i32 {
+/// default: 42,
+/// permissions: 0o644,
+/// description: b"Example of i32",
+/// },
+/// },
+/// }
+///
+/// struct MyModule;
+///
+/// impl kernel::Module for MyModule {
+/// fn init() -> Result<Self> {
+/// // If the parameter is writeable, then the kparam lock must be
+/// // taken to read the parameter:
+/// {
+/// let lock = THIS_MODULE.kernel_param_lock();
+/// pr_info!("i32 param is: {}\n", writeable_i32.read(&lock));
+/// }
+/// // If the parameter is read only, it can be read without locking
+/// // the kernel parameters:
+/// pr_info!("i32 param is: {}\n", my_i32.read());
+/// Ok(Self)
+/// }
+/// }
+/// ```
+///
+/// # Supported argument types
+/// - `type`: type which implements the [`Module`] trait (required).
+/// - `name`: byte array of the name of the kernel module (required).
+/// - `author`: byte array of the author of the kernel module.
+/// - `description`: byte array of the description of the kernel module.
+/// - `license`: byte array of the license of the kernel module (required).
+/// - `alias`: byte array of alias name of the kernel module.
+/// - `alias_rtnl_link`: byte array of the `rtnl_link_alias` of the kernel module
+/// (mutually exclusive with `alias`).
+/// - `params`: parameters for the kernel module, as described below.
+///
+/// # Supported parameter types
+///
+/// - `bool`: Corresponds to C `bool` param type.
+/// - `i8`: No equivalent C param type.
+/// - `u8`: Corresponds to C `char` param type.
+/// - `i16`: Corresponds to C `short` param type.
+/// - `u16`: Corresponds to C `ushort` param type.
+/// - `i32`: Corresponds to C `int` param type.
+/// - `u32`: Corresponds to C `uint` param type.
+/// - `i64`: No equivalent C param type.
+/// - `u64`: Corresponds to C `ullong` param type.
+/// - `isize`: No equivalent C param type.
+/// - `usize`: No equivalent C param type.
+/// - `str`: Corresponds to C `charp` param type. Reading returns a byte slice.
+/// - `ArrayParam<T,N>`: Corresponds to C parameters created using `module_param_array`.
+/// An array of `T`'s of length at **most** `N`.
+///
+/// `invbool` is unsupported: it was only ever used in a few modules.
+/// Consider using a `bool` and inverting the logic instead.
+#[proc_macro]
+pub fn module(ts: TokenStream) -> TokenStream {
+ module::module(ts)
+}
+
+/// Declares or implements a vtable trait.
+///
+/// Linux's use of pure vtables is very close to Rust traits, but they differ
+/// in how unimplemented functions are represented. In Rust, traits can provide
+/// default implementation for all non-required methods (and the default
+/// implementation could just return `Error::EINVAL`); Linux typically use C
+/// `NULL` pointers to represent these functions.
+///
+/// This attribute is intended to close the gap. Traits can be declared and
+/// implemented with the `#[vtable]` attribute, and a `HAS_*` associated constant
+/// will be generated for each method in the trait, indicating if the implementor
+/// has overridden a method.
+///
+/// This attribute is not needed if all methods are required.
+///
+/// # Examples
+///
+/// ```ignore
+/// use kernel::prelude::*;
+///
+/// // Declares a `#[vtable]` trait
+/// #[vtable]
+/// pub trait Operations: Send + Sync + Sized {
+/// fn foo(&self) -> Result<()> {
+/// Err(EINVAL)
+/// }
+///
+/// fn bar(&self) -> Result<()> {
+/// Err(EINVAL)
+/// }
+/// }
+///
+/// struct Foo;
+///
+/// // Implements the `#[vtable]` trait
+/// #[vtable]
+/// impl Operations for Foo {
+/// fn foo(&self) -> Result<()> {
+/// # Err(EINVAL)
+/// // ...
+/// }
+/// }
+///
+/// assert_eq!(<Foo as Operations>::HAS_FOO, true);
+/// assert_eq!(<Foo as Operations>::HAS_BAR, false);
+/// ```
+#[proc_macro_attribute]
+pub fn vtable(attr: TokenStream, ts: TokenStream) -> TokenStream {
+ vtable::vtable(attr, ts)
+}
+
+/// Concatenate two identifiers.
+///
+/// This is useful in macros that need to declare or reference items with names
+/// starting with a fixed prefix and ending in a user specified name. The resulting
+/// identifier has the span of the second argument.
+///
+/// # Examples
+///
+/// ```ignore
+/// use kernel::macro::concat_idents;
+///
+/// macro_rules! pub_no_prefix {
+/// ($prefix:ident, $($newname:ident),+) => {
+/// $(pub(crate) const $newname: u32 = kernel::macros::concat_idents!($prefix, $newname);)+
+/// };
+/// }
+///
+/// pub_no_prefix!(
+/// binder_driver_return_protocol_,
+/// BR_OK,
+/// BR_ERROR,
+/// BR_TRANSACTION,
+/// BR_REPLY,
+/// BR_DEAD_REPLY,
+/// BR_TRANSACTION_COMPLETE,
+/// BR_INCREFS,
+/// BR_ACQUIRE,
+/// BR_RELEASE,
+/// BR_DECREFS,
+/// BR_NOOP,
+/// BR_SPAWN_LOOPER,
+/// BR_DEAD_BINDER,
+/// BR_CLEAR_DEATH_NOTIFICATION_DONE,
+/// BR_FAILED_REPLY
+/// );
+///
+/// assert_eq!(BR_OK, binder_driver_return_protocol_BR_OK);
+/// ```
+#[proc_macro]
+pub fn concat_idents(ts: TokenStream) -> TokenStream {
+ concat_idents::concat_idents(ts)
+}
diff --git a/rust/macros/module.rs b/rust/macros/module.rs
new file mode 100644
index 000000000000..4e68cf63a2cc
--- /dev/null
+++ b/rust/macros/module.rs
@@ -0,0 +1,655 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use proc_macro::{token_stream, Delimiter, Group, Literal, TokenStream, TokenTree};
+use std::fmt::Write;
+
+use crate::helpers::*;
+
+#[derive(Clone, PartialEq)]
+enum ParamType {
+ Ident(String),
+ Array { vals: String, max_length: usize },
+}
+
+fn expect_array_fields(it: &mut token_stream::IntoIter) -> ParamType {
+ assert_eq!(expect_punct(it), '<');
+ let vals = expect_ident(it);
+ assert_eq!(expect_punct(it), ',');
+ let max_length_str = expect_literal(it);
+ let max_length = max_length_str
+ .parse::<usize>()
+ .expect("Expected usize length");
+ assert_eq!(expect_punct(it), '>');
+ ParamType::Array { vals, max_length }
+}
+
+fn expect_type(it: &mut token_stream::IntoIter) -> ParamType {
+ if let TokenTree::Ident(ident) = it
+ .next()
+ .expect("Reached end of token stream for param type")
+ {
+ match ident.to_string().as_ref() {
+ "ArrayParam" => expect_array_fields(it),
+ _ => ParamType::Ident(ident.to_string()),
+ }
+ } else {
+ panic!("Expected Param Type")
+ }
+}
+
+struct ModInfoBuilder<'a> {
+ module: &'a str,
+ counter: usize,
+ buffer: String,
+}
+
+impl<'a> ModInfoBuilder<'a> {
+ fn new(module: &'a str) -> Self {
+ ModInfoBuilder {
+ module,
+ counter: 0,
+ buffer: String::new(),
+ }
+ }
+
+ fn emit_base(&mut self, field: &str, content: &str, builtin: bool) {
+ let string = if builtin {
+ // Built-in modules prefix their modinfo strings by `module.`.
+ format!(
+ "{module}.{field}={content}\0",
+ module = self.module,
+ field = field,
+ content = content
+ )
+ } else {
+ // Loadable modules' modinfo strings go as-is.
+ format!("{field}={content}\0", field = field, content = content)
+ };
+
+ write!(
+ &mut self.buffer,
+ "
+ {cfg}
+ #[doc(hidden)]
+ #[link_section = \".modinfo\"]
+ #[used]
+ pub static __{module}_{counter}: [u8; {length}] = *{string};
+ ",
+ cfg = if builtin {
+ "#[cfg(not(MODULE))]"
+ } else {
+ "#[cfg(MODULE)]"
+ },
+ module = self.module.to_uppercase(),
+ counter = self.counter,
+ length = string.len(),
+ string = Literal::byte_string(string.as_bytes()),
+ )
+ .unwrap();
+
+ self.counter += 1;
+ }
+
+ fn emit_only_builtin(&mut self, field: &str, content: &str) {
+ self.emit_base(field, content, true)
+ }
+
+ fn emit_only_loadable(&mut self, field: &str, content: &str) {
+ self.emit_base(field, content, false)
+ }
+
+ fn emit(&mut self, field: &str, content: &str) {
+ self.emit_only_builtin(field, content);
+ self.emit_only_loadable(field, content);
+ }
+
+ fn emit_param(&mut self, field: &str, param: &str, content: &str) {
+ let content = format!("{param}:{content}", param = param, content = content);
+ self.emit(field, &content);
+ }
+}
+
+fn permissions_are_readonly(perms: &str) -> bool {
+ let (radix, digits) = if let Some(n) = perms.strip_prefix("0x") {
+ (16, n)
+ } else if let Some(n) = perms.strip_prefix("0o") {
+ (8, n)
+ } else if let Some(n) = perms.strip_prefix("0b") {
+ (2, n)
+ } else {
+ (10, perms)
+ };
+ match u32::from_str_radix(digits, radix) {
+ Ok(perms) => perms & 0o222 == 0,
+ Err(_) => false,
+ }
+}
+
+fn param_ops_path(param_type: &str) -> &'static str {
+ match param_type {
+ "bool" => "kernel::module_param::PARAM_OPS_BOOL",
+ "i8" => "kernel::module_param::PARAM_OPS_I8",
+ "u8" => "kernel::module_param::PARAM_OPS_U8",
+ "i16" => "kernel::module_param::PARAM_OPS_I16",
+ "u16" => "kernel::module_param::PARAM_OPS_U16",
+ "i32" => "kernel::module_param::PARAM_OPS_I32",
+ "u32" => "kernel::module_param::PARAM_OPS_U32",
+ "i64" => "kernel::module_param::PARAM_OPS_I64",
+ "u64" => "kernel::module_param::PARAM_OPS_U64",
+ "isize" => "kernel::module_param::PARAM_OPS_ISIZE",
+ "usize" => "kernel::module_param::PARAM_OPS_USIZE",
+ "str" => "kernel::module_param::PARAM_OPS_STR",
+ t => panic!("Unrecognized type {}", t),
+ }
+}
+
+fn try_simple_param_val(
+ param_type: &str,
+) -> Box<dyn Fn(&mut token_stream::IntoIter) -> Option<String>> {
+ match param_type {
+ "bool" => Box::new(try_ident),
+ "str" => Box::new(|param_it| {
+ try_byte_string(param_it)
+ .map(|s| format!("kernel::module_param::StringParam::Ref(b\"{}\")", s))
+ }),
+ _ => Box::new(try_literal),
+ }
+}
+
+fn get_default(param_type: &ParamType, param_it: &mut token_stream::IntoIter) -> String {
+ let try_param_val = match param_type {
+ ParamType::Ident(ref param_type)
+ | ParamType::Array {
+ vals: ref param_type,
+ max_length: _,
+ } => try_simple_param_val(param_type),
+ };
+ assert_eq!(expect_ident(param_it), "default");
+ assert_eq!(expect_punct(param_it), ':');
+ let default = match param_type {
+ ParamType::Ident(_) => try_param_val(param_it).expect("Expected default param value"),
+ ParamType::Array {
+ vals: _,
+ max_length: _,
+ } => {
+ let group = expect_group(param_it);
+ assert_eq!(group.delimiter(), Delimiter::Bracket);
+ let mut default_vals = Vec::new();
+ let mut it = group.stream().into_iter();
+
+ while let Some(default_val) = try_param_val(&mut it) {
+ default_vals.push(default_val);
+ match it.next() {
+ Some(TokenTree::Punct(punct)) => assert_eq!(punct.as_char(), ','),
+ None => break,
+ _ => panic!("Expected ',' or end of array default values"),
+ }
+ }
+
+ let mut default_array = "kernel::module_param::ArrayParam::create(&[".to_string();
+ default_array.push_str(
+ &default_vals
+ .iter()
+ .map(|val| val.to_string())
+ .collect::<Vec<String>>()
+ .join(","),
+ );
+ default_array.push_str("])");
+ default_array
+ }
+ };
+ assert_eq!(expect_punct(param_it), ',');
+ default
+}
+
+fn generated_array_ops_name(vals: &str, max_length: usize) -> String {
+ format!(
+ "__generated_array_ops_{vals}_{max_length}",
+ vals = vals,
+ max_length = max_length
+ )
+}
+
+#[derive(Debug, Default)]
+struct ModuleInfo {
+ type_: String,
+ license: String,
+ name: String,
+ author: Option<String>,
+ description: Option<String>,
+ alias: Option<String>,
+ params: Option<Group>,
+}
+
+impl ModuleInfo {
+ fn parse(it: &mut token_stream::IntoIter) -> Self {
+ let mut info = ModuleInfo::default();
+
+ const EXPECTED_KEYS: &[&str] = &[
+ "type",
+ "name",
+ "author",
+ "description",
+ "license",
+ "alias",
+ "alias_rtnl_link",
+ "params",
+ ];
+ const REQUIRED_KEYS: &[&str] = &["type", "name", "license"];
+ let mut seen_keys = Vec::new();
+
+ loop {
+ let key = match it.next() {
+ Some(TokenTree::Ident(ident)) => ident.to_string(),
+ Some(_) => panic!("Expected Ident or end"),
+ None => break,
+ };
+
+ if seen_keys.contains(&key) {
+ panic!(
+ "Duplicated key \"{}\". Keys can only be specified once.",
+ key
+ );
+ }
+
+ assert_eq!(expect_punct(it), ':');
+
+ match key.as_str() {
+ "type" => info.type_ = expect_ident(it),
+ "name" => info.name = expect_byte_string(it),
+ "author" => info.author = Some(expect_byte_string(it)),
+ "description" => info.description = Some(expect_byte_string(it)),
+ "license" => info.license = expect_byte_string(it),
+ "alias" => info.alias = Some(expect_byte_string(it)),
+ "alias_rtnl_link" => {
+ info.alias = Some(format!("rtnl-link-{}", expect_byte_string(it)))
+ }
+ "params" => info.params = Some(expect_group(it)),
+ _ => panic!(
+ "Unknown key \"{}\". Valid keys are: {:?}.",
+ key, EXPECTED_KEYS
+ ),
+ }
+
+ assert_eq!(expect_punct(it), ',');
+
+ seen_keys.push(key);
+ }
+
+ expect_end(it);
+
+ for key in REQUIRED_KEYS {
+ if !seen_keys.iter().any(|e| e == key) {
+ panic!("Missing required key \"{}\".", key);
+ }
+ }
+
+ let mut ordered_keys: Vec<&str> = Vec::new();
+ for key in EXPECTED_KEYS {
+ if seen_keys.iter().any(|e| e == key) {
+ ordered_keys.push(key);
+ }
+ }
+
+ if seen_keys != ordered_keys {
+ panic!(
+ "Keys are not ordered as expected. Order them like: {:?}.",
+ ordered_keys
+ );
+ }
+
+ info
+ }
+}
+
+pub(crate) fn module(ts: TokenStream) -> TokenStream {
+ let mut it = ts.into_iter();
+
+ let info = ModuleInfo::parse(&mut it);
+
+ let mut modinfo = ModInfoBuilder::new(info.name.as_ref());
+ if let Some(author) = info.author {
+ modinfo.emit("author", &author);
+ }
+ if let Some(description) = info.description {
+ modinfo.emit("description", &description);
+ }
+ modinfo.emit("license", &info.license);
+ if let Some(alias) = info.alias {
+ modinfo.emit("alias", &alias);
+ }
+
+ // Built-in modules also export the `file` modinfo string.
+ let file =
+ std::env::var("RUST_MODFILE").expect("Unable to fetch RUST_MODFILE environmental variable");
+ modinfo.emit_only_builtin("file", &file);
+
+ let mut array_types_to_generate = Vec::new();
+ if let Some(params) = info.params {
+ assert_eq!(params.delimiter(), Delimiter::Brace);
+
+ let mut it = params.stream().into_iter();
+
+ loop {
+ let param_name = match it.next() {
+ Some(TokenTree::Ident(ident)) => ident.to_string(),
+ Some(_) => panic!("Expected Ident or end"),
+ None => break,
+ };
+
+ assert_eq!(expect_punct(&mut it), ':');
+ let param_type = expect_type(&mut it);
+ let group = expect_group(&mut it);
+ assert_eq!(expect_punct(&mut it), ',');
+
+ assert_eq!(group.delimiter(), Delimiter::Brace);
+
+ let mut param_it = group.stream().into_iter();
+ let param_default = get_default(&param_type, &mut param_it);
+ let param_permissions = get_literal(&mut param_it, "permissions");
+ let param_description = get_byte_string(&mut param_it, "description");
+ expect_end(&mut param_it);
+
+ // TODO: More primitive types.
+ // TODO: Other kinds: unsafes, etc.
+ let (param_kernel_type, ops): (String, _) = match param_type {
+ ParamType::Ident(ref param_type) => (
+ param_type.to_string(),
+ param_ops_path(param_type).to_string(),
+ ),
+ ParamType::Array {
+ ref vals,
+ max_length,
+ } => {
+ array_types_to_generate.push((vals.clone(), max_length));
+ (
+ format!("__rust_array_param_{}_{}", vals, max_length),
+ generated_array_ops_name(vals, max_length),
+ )
+ }
+ };
+
+ modinfo.emit_param("parmtype", &param_name, &param_kernel_type);
+ modinfo.emit_param("parm", &param_name, &param_description);
+ let param_type_internal = match param_type {
+ ParamType::Ident(ref param_type) => match param_type.as_ref() {
+ "str" => "kernel::module_param::StringParam".to_string(),
+ other => other.to_string(),
+ },
+ ParamType::Array {
+ ref vals,
+ max_length,
+ } => format!(
+ "kernel::module_param::ArrayParam<{vals}, {max_length}>",
+ vals = vals,
+ max_length = max_length
+ ),
+ };
+ let read_func = if permissions_are_readonly(&param_permissions) {
+ format!(
+ "
+ fn read(&self)
+ -> &<{param_type_internal} as kernel::module_param::ModuleParam>::Value {{
+ // SAFETY: Parameters do not need to be locked because they are
+ // read only or sysfs is not enabled.
+ unsafe {{
+ <{param_type_internal} as kernel::module_param::ModuleParam>::value(
+ &__{name}_{param_name}_value
+ )
+ }}
+ }}
+ ",
+ name = info.name,
+ param_name = param_name,
+ param_type_internal = param_type_internal,
+ )
+ } else {
+ format!(
+ "
+ fn read<'lck>(&self, lock: &'lck kernel::KParamGuard)
+ -> &'lck <{param_type_internal} as kernel::module_param::ModuleParam>::Value {{
+ // SAFETY: Parameters are locked by `KParamGuard`.
+ unsafe {{
+ <{param_type_internal} as kernel::module_param::ModuleParam>::value(
+ &__{name}_{param_name}_value
+ )
+ }}
+ }}
+ ",
+ name = info.name,
+ param_name = param_name,
+ param_type_internal = param_type_internal,
+ )
+ };
+ let kparam = format!(
+ "
+ kernel::bindings::kernel_param__bindgen_ty_1 {{
+ arg: unsafe {{ &__{name}_{param_name}_value }}
+ as *const _ as *mut core::ffi::c_void,
+ }},
+ ",
+ name = info.name,
+ param_name = param_name,
+ );
+ write!(
+ modinfo.buffer,
+ "
+ static mut __{name}_{param_name}_value: {param_type_internal} = {param_default};
+
+ struct __{name}_{param_name};
+
+ impl __{name}_{param_name} {{ {read_func} }}
+
+ const {param_name}: __{name}_{param_name} = __{name}_{param_name};
+
+ // Note: the C macro that generates the static structs for the `__param` section
+ // asks for them to be `aligned(sizeof(void *))`. However, that was put in place
+ // in 2003 in commit 38d5b085d2a0 (\"[PATCH] Fix over-alignment problem on x86-64\")
+ // to undo GCC over-alignment of static structs of >32 bytes. It seems that is
+ // not the case anymore, so we simplify to a transparent representation here
+ // in the expectation that it is not needed anymore.
+ // TODO: Revisit this to confirm the above comment and remove it if it happened.
+ #[repr(transparent)]
+ struct __{name}_{param_name}_RacyKernelParam(kernel::bindings::kernel_param);
+
+ unsafe impl Sync for __{name}_{param_name}_RacyKernelParam {{
+ }}
+
+ #[cfg(not(MODULE))]
+ const __{name}_{param_name}_name: *const core::ffi::c_char =
+ b\"{name}.{param_name}\\0\" as *const _ as *const core::ffi::c_char;
+
+ #[cfg(MODULE)]
+ const __{name}_{param_name}_name: *const core::ffi::c_char =
+ b\"{param_name}\\0\" as *const _ as *const core::ffi::c_char;
+
+ #[link_section = \"__param\"]
+ #[used]
+ static __{name}_{param_name}_struct: __{name}_{param_name}_RacyKernelParam =
+ __{name}_{param_name}_RacyKernelParam(kernel::bindings::kernel_param {{
+ name: __{name}_{param_name}_name,
+ // SAFETY: `__this_module` is constructed by the kernel at load time
+ // and will not be freed until the module is unloaded.
+ #[cfg(MODULE)]
+ mod_: unsafe {{ &kernel::bindings::__this_module as *const _ as *mut _ }},
+ #[cfg(not(MODULE))]
+ mod_: core::ptr::null_mut(),
+ ops: unsafe {{ &{ops} }} as *const kernel::bindings::kernel_param_ops,
+ perm: {permissions},
+ level: -1,
+ flags: 0,
+ __bindgen_anon_1: {kparam}
+ }});
+ ",
+ name = info.name,
+ param_type_internal = param_type_internal,
+ read_func = read_func,
+ param_default = param_default,
+ param_name = param_name,
+ ops = ops,
+ permissions = param_permissions,
+ kparam = kparam,
+ )
+ .unwrap();
+ }
+ }
+
+ let mut generated_array_types = String::new();
+
+ for (vals, max_length) in array_types_to_generate {
+ let ops_name = generated_array_ops_name(&vals, max_length);
+ write!(
+ generated_array_types,
+ "
+ kernel::make_param_ops!(
+ {ops_name},
+ kernel::module_param::ArrayParam<{vals}, {{ {max_length} }}>
+ );
+ ",
+ ops_name = ops_name,
+ vals = vals,
+ max_length = max_length,
+ )
+ .unwrap();
+ }
+
+ format!(
+ "
+ /// The module name.
+ ///
+ /// Used by the printing macros, e.g. [`info!`].
+ const __LOG_PREFIX: &[u8] = b\"{name}\\0\";
+
+ /// The \"Rust loadable module\" mark, for `scripts/is_rust_module.sh`.
+ //
+ // This may be best done another way later on, e.g. as a new modinfo
+ // key or a new section. For the moment, keep it simple.
+ #[cfg(MODULE)]
+ #[doc(hidden)]
+ #[used]
+ static __IS_RUST_MODULE: () = ();
+
+ static mut __MOD: Option<{type_}> = None;
+
+ // SAFETY: `__this_module` is constructed by the kernel at load time and will not be
+ // freed until the module is unloaded.
+ #[cfg(MODULE)]
+ static THIS_MODULE: kernel::ThisModule = unsafe {{
+ kernel::ThisModule::from_ptr(&kernel::bindings::__this_module as *const _ as *mut _)
+ }};
+ #[cfg(not(MODULE))]
+ static THIS_MODULE: kernel::ThisModule = unsafe {{
+ kernel::ThisModule::from_ptr(core::ptr::null_mut())
+ }};
+
+ // Loadable modules need to export the `{{init,cleanup}}_module` identifiers.
+ #[cfg(MODULE)]
+ #[doc(hidden)]
+ #[no_mangle]
+ pub extern \"C\" fn init_module() -> core::ffi::c_int {{
+ __init()
+ }}
+
+ #[cfg(MODULE)]
+ #[doc(hidden)]
+ #[no_mangle]
+ pub extern \"C\" fn cleanup_module() {{
+ __exit()
+ }}
+
+ // Built-in modules are initialized through an initcall pointer
+ // and the identifiers need to be unique.
+ #[cfg(not(MODULE))]
+ #[cfg(not(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS))]
+ #[doc(hidden)]
+ #[link_section = \"{initcall_section}\"]
+ #[used]
+ pub static __{name}_initcall: extern \"C\" fn() -> core::ffi::c_int = __{name}_init;
+
+ #[cfg(not(MODULE))]
+ #[cfg(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)]
+ core::arch::global_asm!(
+ r#\".section \"{initcall_section}\", \"a\"
+ __{name}_initcall:
+ .long __{name}_init - .
+ .previous
+ \"#
+ );
+
+ #[cfg(not(MODULE))]
+ #[doc(hidden)]
+ #[no_mangle]
+ pub extern \"C\" fn __{name}_init() -> core::ffi::c_int {{
+ __init()
+ }}
+
+ #[cfg(not(MODULE))]
+ #[doc(hidden)]
+ #[no_mangle]
+ pub extern \"C\" fn __{name}_exit() {{
+ __exit()
+ }}
+
+ fn __init() -> core::ffi::c_int {{
+ match <{type_} as kernel::Module>::init(kernel::c_str!(\"{name}\"), &THIS_MODULE) {{
+ Ok(m) => {{
+ unsafe {{
+ __MOD = Some(m);
+ }}
+ return 0;
+ }}
+ Err(e) => {{
+ return e.to_kernel_errno();
+ }}
+ }}
+ }}
+
+ fn __exit() {{
+ unsafe {{
+ // Invokes `drop()` on `__MOD`, which should be used for cleanup.
+ __MOD = None;
+ }}
+ }}
+
+ {modinfo}
+
+ {generated_array_types}
+ ",
+ type_ = info.type_,
+ name = info.name,
+ modinfo = modinfo.buffer,
+ generated_array_types = generated_array_types,
+ initcall_section = ".initcall6.init"
+ )
+ .parse()
+ .expect("Error parsing formatted string into token stream.")
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn test_permissions_are_readonly() {
+ assert!(permissions_are_readonly("0b000000000"));
+ assert!(permissions_are_readonly("0o000"));
+ assert!(permissions_are_readonly("000"));
+ assert!(permissions_are_readonly("0x000"));
+
+ assert!(!permissions_are_readonly("0b111111111"));
+ assert!(!permissions_are_readonly("0o777"));
+ assert!(!permissions_are_readonly("511"));
+ assert!(!permissions_are_readonly("0x1ff"));
+
+ assert!(permissions_are_readonly("0o014"));
+ assert!(permissions_are_readonly("0o015"));
+
+ assert!(!permissions_are_readonly("0o214"));
+ assert!(!permissions_are_readonly("0o024"));
+ assert!(!permissions_are_readonly("0o012"));
+
+ assert!(!permissions_are_readonly("0o315"));
+ assert!(!permissions_are_readonly("0o065"));
+ assert!(!permissions_are_readonly("0o017"));
+ }
+}
diff --git a/rust/macros/vtable.rs b/rust/macros/vtable.rs
new file mode 100644
index 000000000000..34d5e7fb5768
--- /dev/null
+++ b/rust/macros/vtable.rs
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+
+use proc_macro::{Delimiter, Group, TokenStream, TokenTree};
+use std::collections::HashSet;
+use std::fmt::Write;
+
+pub(crate) fn vtable(_attr: TokenStream, ts: TokenStream) -> TokenStream {
+ let mut tokens: Vec<_> = ts.into_iter().collect();
+
+ // Scan for the `trait` or `impl` keyword.
+ let is_trait = tokens
+ .iter()
+ .find_map(|token| match token {
+ TokenTree::Ident(ident) => match ident.to_string().as_str() {
+ "trait" => Some(true),
+ "impl" => Some(false),
+ _ => None,
+ },
+ _ => None,
+ })
+ .expect("#[vtable] attribute should only be applied to trait or impl block");
+
+ // Retrieve the main body. The main body should be the last token tree.
+ let body = match tokens.pop() {
+ Some(TokenTree::Group(group)) if group.delimiter() == Delimiter::Brace => group,
+ _ => panic!("cannot locate main body of trait or impl block"),
+ };
+
+ let mut body_it = body.stream().into_iter();
+ let mut functions = Vec::new();
+ let mut consts = HashSet::new();
+ while let Some(token) = body_it.next() {
+ match token {
+ TokenTree::Ident(ident) if ident.to_string() == "fn" => {
+ let fn_name = match body_it.next() {
+ Some(TokenTree::Ident(ident)) => ident.to_string(),
+ // Possibly we've encountered a fn pointer type instead.
+ _ => continue,
+ };
+ functions.push(fn_name);
+ }
+ TokenTree::Ident(ident) if ident.to_string() == "const" => {
+ let const_name = match body_it.next() {
+ Some(TokenTree::Ident(ident)) => ident.to_string(),
+ // Possibly we've encountered an inline const block instead.
+ _ => continue,
+ };
+ consts.insert(const_name);
+ }
+ _ => (),
+ }
+ }
+
+ let mut const_items;
+ if is_trait {
+ const_items = "
+ /// A marker to prevent implementors from forgetting to use [`#[vtable]`](vtable)
+ /// attribute when implementing this trait.
+ const USE_VTABLE_ATTR: ();
+ "
+ .to_owned();
+
+ for f in functions {
+ let gen_const_name = format!("HAS_{}", f.to_uppercase());
+ // Skip if it's declared already -- this allows user override.
+ if consts.contains(&gen_const_name) {
+ continue;
+ }
+ // We don't know on the implementation-site whether a method is required or provided
+ // so we have to generate a const for all methods.
+ write!(
+ const_items,
+ "/// Indicates if the `{f}` method is overridden by the implementor.
+ const {gen_const_name}: bool = false;",
+ )
+ .unwrap();
+ }
+ } else {
+ const_items = "const USE_VTABLE_ATTR: () = ();".to_owned();
+
+ for f in functions {
+ let gen_const_name = format!("HAS_{}", f.to_uppercase());
+ if consts.contains(&gen_const_name) {
+ continue;
+ }
+ write!(const_items, "const {gen_const_name}: bool = true;").unwrap();
+ }
+ }
+
+ let new_body = vec![const_items.parse().unwrap(), body.stream()]
+ .into_iter()
+ .collect();
+ tokens.push(TokenTree::Group(Group::new(Delimiter::Brace, new_body)));
+ tokens.into_iter().collect()
+}
diff --git a/samples/Kconfig b/samples/Kconfig
index 470ee3baf2e1..0d81c00289ee 100644
--- a/samples/Kconfig
+++ b/samples/Kconfig
@@ -263,6 +263,8 @@ config SAMPLE_CORESIGHT_SYSCFG
This demonstrates how a user may create their own CoreSight
configurations and easily load them into the system at runtime.
+source "samples/rust/Kconfig"
+
endif # SAMPLES
config HAVE_SAMPLE_FTRACE_DIRECT
diff --git a/samples/Makefile b/samples/Makefile
index 701e912ab5af..9832ef3f8fcb 100644
--- a/samples/Makefile
+++ b/samples/Makefile
@@ -35,3 +35,4 @@ subdir-$(CONFIG_SAMPLE_WATCH_QUEUE) += watch_queue
obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak/
obj-$(CONFIG_SAMPLE_CORESIGHT_SYSCFG) += coresight/
obj-$(CONFIG_SAMPLE_FPROBE) += fprobe/
+obj-$(CONFIG_SAMPLES_RUST) += rust/
diff --git a/samples/rust/Kconfig b/samples/rust/Kconfig
new file mode 100644
index 000000000000..861b35318b4d
--- /dev/null
+++ b/samples/rust/Kconfig
@@ -0,0 +1,165 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menuconfig SAMPLES_RUST
+ bool "Rust samples"
+ depends on RUST
+ help
+ You can build sample Rust kernel code here.
+
+ If unsure, say N.
+
+if SAMPLES_RUST
+
+config SAMPLE_RUST_MINIMAL
+ tristate "Minimal"
+ help
+ This option builds the Rust minimal module sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_minimal.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_PRINT
+ tristate "Printing macros"
+ help
+ This option builds the Rust printing macros sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_print.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_MODULE_PARAMETERS
+ tristate "Module parameters"
+ help
+ This option builds the Rust module parameters sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_module_parameters.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_SYNC
+ tristate "Synchronisation primitives"
+ help
+ This option builds the Rust synchronisation primitives sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_sync.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_CHRDEV
+ tristate "Character device"
+ help
+ This option builds the Rust character device sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_chrdev.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_MISCDEV
+ tristate "Miscellaneous device"
+ help
+ This option builds the Rust miscellaneous device sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_miscdev.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_STACK_PROBING
+ tristate "Stack probing"
+ help
+ This option builds the Rust stack probing sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_stack_probing.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_SEMAPHORE
+ tristate "Semaphore"
+ help
+ This option builds the Rust semaphore sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_semaphore.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_SEMAPHORE_C
+ tristate "Semaphore (in C, for comparison)"
+ help
+ This option builds the Rust semaphore sample (in C, for comparison).
+
+ To compile this as a module, choose M here:
+ the module will be called rust_semaphore_c.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_RANDOM
+ tristate "Random"
+ help
+ This option builds the Rust random sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_random.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_PLATFORM
+ tristate "Platform device driver"
+ help
+ This option builds the Rust platform device driver sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_platform.
+
+config SAMPLE_RUST_FS
+ tristate "File system"
+ help
+ This option builds the Rust file system sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_fs.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_NETFILTER
+ tristate "Network filter module"
+ help
+ This option builds the Rust netfilter module sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_netfilter.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_ECHO_SERVER
+ tristate "Echo server module"
+ help
+ This option builds the Rust echo server module sample.
+
+ To compile this as a module, choose M here:
+ the module will be called rust_echo_server.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_HOSTPROGS
+ bool "Host programs"
+ help
+ This option builds the Rust host program samples.
+
+ If unsure, say N.
+
+config SAMPLE_RUST_SELFTESTS
+ tristate "Self tests"
+ help
+ This option builds the self test cases for Rust.
+
+ If unsure, say N.
+
+endif # SAMPLES_RUST
diff --git a/samples/rust/Makefile b/samples/rust/Makefile
new file mode 100644
index 000000000000..420bcefeb082
--- /dev/null
+++ b/samples/rust/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SAMPLE_RUST_MINIMAL) += rust_minimal.o
+obj-$(CONFIG_SAMPLE_RUST_PRINT) += rust_print.o
+obj-$(CONFIG_SAMPLE_RUST_MODULE_PARAMETERS) += rust_module_parameters.o
+obj-$(CONFIG_SAMPLE_RUST_SYNC) += rust_sync.o
+obj-$(CONFIG_SAMPLE_RUST_CHRDEV) += rust_chrdev.o
+obj-$(CONFIG_SAMPLE_RUST_MISCDEV) += rust_miscdev.o
+obj-$(CONFIG_SAMPLE_RUST_STACK_PROBING) += rust_stack_probing.o
+obj-$(CONFIG_SAMPLE_RUST_SEMAPHORE) += rust_semaphore.o
+obj-$(CONFIG_SAMPLE_RUST_SEMAPHORE_C) += rust_semaphore_c.o
+obj-$(CONFIG_SAMPLE_RUST_RANDOM) += rust_random.o
+obj-$(CONFIG_SAMPLE_RUST_PLATFORM) += rust_platform.o
+obj-$(CONFIG_SAMPLE_RUST_NETFILTER) += rust_netfilter.o
+obj-$(CONFIG_SAMPLE_RUST_ECHO_SERVER) += rust_echo_server.o
+obj-$(CONFIG_SAMPLE_RUST_FS) += rust_fs.o
+obj-$(CONFIG_SAMPLE_RUST_SELFTESTS) += rust_selftests.o
+
+subdir-$(CONFIG_SAMPLE_RUST_HOSTPROGS) += hostprogs
diff --git a/samples/rust/hostprogs/.gitignore b/samples/rust/hostprogs/.gitignore
new file mode 100644
index 000000000000..a6c173da5048
--- /dev/null
+++ b/samples/rust/hostprogs/.gitignore
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+single
diff --git a/samples/rust/hostprogs/Makefile b/samples/rust/hostprogs/Makefile
new file mode 100644
index 000000000000..8ddcbd7416db
--- /dev/null
+++ b/samples/rust/hostprogs/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+hostprogs-always-y := single
+
+single-rust := y
diff --git a/samples/rust/hostprogs/a.rs b/samples/rust/hostprogs/a.rs
new file mode 100644
index 000000000000..f7a4a3d0f4e0
--- /dev/null
+++ b/samples/rust/hostprogs/a.rs
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust single host program sample: module `a`.
+
+pub(crate) fn f(x: i32) {
+ println!("The number is {}.", x);
+}
diff --git a/samples/rust/hostprogs/b.rs b/samples/rust/hostprogs/b.rs
new file mode 100644
index 000000000000..c1675890648f
--- /dev/null
+++ b/samples/rust/hostprogs/b.rs
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust single host program sample: module `b`.
+
+pub(crate) const CONSTANT: i32 = 42;
diff --git a/samples/rust/hostprogs/single.rs b/samples/rust/hostprogs/single.rs
new file mode 100644
index 000000000000..8c48a119339a
--- /dev/null
+++ b/samples/rust/hostprogs/single.rs
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust single host program sample.
+
+mod a;
+mod b;
+
+fn main() {
+ println!("Hello world!");
+
+ a::f(b::CONSTANT);
+}
diff --git a/samples/rust/rust_chrdev.rs b/samples/rust/rust_chrdev.rs
new file mode 100644
index 000000000000..52f6e652d1a6
--- /dev/null
+++ b/samples/rust/rust_chrdev.rs
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust character device sample.
+
+use kernel::prelude::*;
+use kernel::{chrdev, file};
+
+module! {
+ type: RustChrdev,
+ name: b"rust_chrdev",
+ author: b"Rust for Linux Contributors",
+ description: b"Rust character device sample",
+ license: b"GPL",
+}
+
+struct RustFile;
+
+#[vtable]
+impl file::Operations for RustFile {
+ fn open(_shared: &(), _file: &file::File) -> Result {
+ Ok(())
+ }
+}
+
+struct RustChrdev {
+ _dev: Pin<Box<chrdev::Registration<2>>>,
+}
+
+impl kernel::Module for RustChrdev {
+ fn init(name: &'static CStr, module: &'static ThisModule) -> Result<Self> {
+ pr_info!("Rust character device sample (init)\n");
+
+ let mut chrdev_reg = chrdev::Registration::new_pinned(name, 0, module)?;
+
+ // Register the same kind of device twice, we're just demonstrating
+ // that you can use multiple minors. There are two minors in this case
+ // because its type is `chrdev::Registration<2>`
+ chrdev_reg.as_mut().register::<RustFile>()?;
+ chrdev_reg.as_mut().register::<RustFile>()?;
+
+ Ok(RustChrdev { _dev: chrdev_reg })
+ }
+}
+
+impl Drop for RustChrdev {
+ fn drop(&mut self) {
+ pr_info!("Rust character device sample (exit)\n");
+ }
+}
diff --git a/samples/rust/rust_echo_server.rs b/samples/rust/rust_echo_server.rs
new file mode 100644
index 000000000000..5fc802f4dc33
--- /dev/null
+++ b/samples/rust/rust_echo_server.rs
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust echo server sample.
+
+use kernel::{
+ kasync::executor::{workqueue::Executor as WqExecutor, AutoStopHandle, Executor},
+ kasync::net::{TcpListener, TcpStream},
+ net::{self, Ipv4Addr, SocketAddr, SocketAddrV4},
+ prelude::*,
+ spawn_task,
+ sync::{Ref, RefBorrow},
+};
+
+async fn echo_server(stream: TcpStream) -> Result {
+ let mut buf = [0u8; 1024];
+ loop {
+ let n = stream.read(&mut buf).await?;
+ if n == 0 {
+ return Ok(());
+ }
+ stream.write_all(&buf[..n]).await?;
+ }
+}
+
+async fn accept_loop(listener: TcpListener, executor: Ref<impl Executor>) {
+ loop {
+ if let Ok(stream) = listener.accept().await {
+ let _ = spawn_task!(executor.as_ref_borrow(), echo_server(stream));
+ }
+ }
+}
+
+fn start_listener(ex: RefBorrow<'_, impl Executor + Send + Sync + 'static>) -> Result {
+ let addr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::ANY, 8080));
+ let listener = TcpListener::try_new(net::init_ns(), &addr)?;
+ spawn_task!(ex, accept_loop(listener, ex.into()))?;
+ Ok(())
+}
+
+struct RustEchoServer {
+ _handle: AutoStopHandle<dyn Executor>,
+}
+
+impl kernel::Module for RustEchoServer {
+ fn init(_name: &'static CStr, _module: &'static ThisModule) -> Result<Self> {
+ let handle = WqExecutor::try_new(kernel::workqueue::system())?;
+ start_listener(handle.executor())?;
+ Ok(Self {
+ _handle: handle.into(),
+ })
+ }
+}
+
+module! {
+ type: RustEchoServer,
+ name: b"rust_echo_server",
+ author: b"Rust for Linux Contributors",
+ description: b"Rust tcp echo sample",
+ license: b"GPL v2",
+}
diff --git a/samples/rust/rust_fs.rs b/samples/rust/rust_fs.rs
new file mode 100644
index 000000000000..d286b396dd17
--- /dev/null
+++ b/samples/rust/rust_fs.rs
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust file system sample.
+
+use kernel::prelude::*;
+use kernel::{c_str, fs};
+
+module_fs! {
+ type: RustFs,
+ name: b"rust_fs",
+ author: b"Rust for Linux Contributors",
+ license: b"GPL",
+}
+
+struct RustFs;
+
+#[vtable]
+impl fs::Context<Self> for RustFs {
+ type Data = ();
+
+ kernel::define_fs_params! {(),
+ {flag, "flag", |_, v| { pr_info!("flag passed-in: {v}\n"); Ok(()) } },
+ {flag_no, "flagno", |_, v| { pr_info!("flagno passed-in: {v}\n"); Ok(()) } },
+ {bool, "bool", |_, v| { pr_info!("bool passed-in: {v}\n"); Ok(()) } },
+ {u32, "u32", |_, v| { pr_info!("u32 passed-in: {v}\n"); Ok(()) } },
+ {u32oct, "u32oct", |_, v| { pr_info!("u32oct passed-in: {v}\n"); Ok(()) } },
+ {u32hex, "u32hex", |_, v| { pr_info!("u32hex passed-in: {v}\n"); Ok(()) } },
+ {s32, "s32", |_, v| { pr_info!("s32 passed-in: {v}\n"); Ok(()) } },
+ {u64, "u64", |_, v| { pr_info!("u64 passed-in: {v}\n"); Ok(()) } },
+ {string, "string", |_, v| { pr_info!("string passed-in: {v}\n"); Ok(()) } },
+ {enum, "enum", [("first", 10), ("second", 20)], |_, v| {
+ pr_info!("enum passed-in: {v}\n"); Ok(()) }
+ },
+ }
+
+ fn try_new() -> Result {
+ pr_info!("context created!\n");
+ Ok(())
+ }
+}
+
+impl fs::Type for RustFs {
+ type Context = Self;
+ const SUPER_TYPE: fs::Super = fs::Super::Independent;
+ const NAME: &'static CStr = c_str!("rustfs");
+ const FLAGS: i32 = fs::flags::USERNS_MOUNT;
+
+ fn fill_super(_data: (), sb: fs::NewSuperBlock<'_, Self>) -> Result<&fs::SuperBlock<Self>> {
+ let sb = sb.init(
+ (),
+ &fs::SuperParams {
+ magic: 0x72757374,
+ ..fs::SuperParams::DEFAULT
+ },
+ )?;
+ let sb = sb.init_root()?;
+ Ok(sb)
+ }
+}
diff --git a/samples/rust/rust_minimal.rs b/samples/rust/rust_minimal.rs
new file mode 100644
index 000000000000..6e1a926c6f62
--- /dev/null
+++ b/samples/rust/rust_minimal.rs
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust minimal sample.
+
+use kernel::prelude::*;
+
+module! {
+ type: RustMinimal,
+ name: b"rust_minimal",
+ author: b"Rust for Linux Contributors",
+ description: b"Rust minimal sample",
+ license: b"GPL",
+}
+
+struct RustMinimal {
+ message: String,
+}
+
+impl kernel::Module for RustMinimal {
+ fn init(_name: &'static CStr, _module: &'static ThisModule) -> Result<Self> {
+ pr_info!("Rust minimal sample (init)\n");
+ pr_info!("Am I built-in? {}\n", !cfg!(MODULE));
+
+ Ok(RustMinimal {
+ message: "on the heap!".try_to_owned()?,
+ })
+ }
+}
+
+impl Drop for RustMinimal {
+ fn drop(&mut self) {
+ pr_info!("My message is {}\n", self.message);
+ pr_info!("Rust minimal sample (exit)\n");
+ }
+}
diff --git a/samples/rust/rust_miscdev.rs b/samples/rust/rust_miscdev.rs
new file mode 100644
index 000000000000..647b77864f10
--- /dev/null
+++ b/samples/rust/rust_miscdev.rs
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust miscellaneous device sample.
+
+use kernel::prelude::*;
+use kernel::{
+ file::{self, File},
+ io_buffer::{IoBufferReader, IoBufferWriter},
+ miscdev,
+ sync::{CondVar, Mutex, Ref, RefBorrow, UniqueRef},
+};
+
+module! {
+ type: RustMiscdev,
+ name: b"rust_miscdev",
+ author: b"Rust for Linux Contributors",
+ description: b"Rust miscellaneous device sample",
+ license: b"GPL",
+}
+
+const MAX_TOKENS: usize = 3;
+
+struct SharedStateInner {
+ token_count: usize,
+}
+
+struct SharedState {
+ state_changed: CondVar,
+ inner: Mutex<SharedStateInner>,
+}
+
+impl SharedState {
+ fn try_new() -> Result<Ref<Self>> {
+ let mut state = Pin::from(UniqueRef::try_new(Self {
+ // SAFETY: `condvar_init!` is called below.
+ state_changed: unsafe { CondVar::new() },
+ // SAFETY: `mutex_init!` is called below.
+ inner: unsafe { Mutex::new(SharedStateInner { token_count: 0 }) },
+ })?);
+
+ // SAFETY: `state_changed` is pinned when `state` is.
+ let pinned = unsafe { state.as_mut().map_unchecked_mut(|s| &mut s.state_changed) };
+ kernel::condvar_init!(pinned, "SharedState::state_changed");
+
+ // SAFETY: `inner` is pinned when `state` is.
+ let pinned = unsafe { state.as_mut().map_unchecked_mut(|s| &mut s.inner) };
+ kernel::mutex_init!(pinned, "SharedState::inner");
+
+ Ok(state.into())
+ }
+}
+
+struct Token;
+#[vtable]
+impl file::Operations for Token {
+ type Data = Ref<SharedState>;
+ type OpenData = Ref<SharedState>;
+
+ fn open(shared: &Ref<SharedState>, _file: &File) -> Result<Self::Data> {
+ Ok(shared.clone())
+ }
+
+ fn read(
+ shared: RefBorrow<'_, SharedState>,
+ _: &File,
+ data: &mut impl IoBufferWriter,
+ offset: u64,
+ ) -> Result<usize> {
+ // Succeed if the caller doesn't provide a buffer or if not at the start.
+ if data.is_empty() || offset != 0 {
+ return Ok(0);
+ }
+
+ {
+ let mut inner = shared.inner.lock();
+
+ // Wait until we are allowed to decrement the token count or a signal arrives.
+ while inner.token_count == 0 {
+ if shared.state_changed.wait(&mut inner) {
+ return Err(EINTR);
+ }
+ }
+
+ // Consume a token.
+ inner.token_count -= 1;
+ }
+
+ // Notify a possible writer waiting.
+ shared.state_changed.notify_all();
+
+ // Write a one-byte 1 to the reader.
+ data.write_slice(&[1u8; 1])?;
+ Ok(1)
+ }
+
+ fn write(
+ shared: RefBorrow<'_, SharedState>,
+ _: &File,
+ data: &mut impl IoBufferReader,
+ _offset: u64,
+ ) -> Result<usize> {
+ {
+ let mut inner = shared.inner.lock();
+
+ // Wait until we are allowed to increment the token count or a signal arrives.
+ while inner.token_count == MAX_TOKENS {
+ if shared.state_changed.wait(&mut inner) {
+ return Err(EINTR);
+ }
+ }
+
+ // Increment the number of token so that a reader can be released.
+ inner.token_count += 1;
+ }
+
+ // Notify a possible reader waiting.
+ shared.state_changed.notify_all();
+ Ok(data.len())
+ }
+}
+
+struct RustMiscdev {
+ _dev: Pin<Box<miscdev::Registration<Token>>>,
+}
+
+impl kernel::Module for RustMiscdev {
+ fn init(name: &'static CStr, _module: &'static ThisModule) -> Result<Self> {
+ pr_info!("Rust miscellaneous device sample (init)\n");
+
+ let state = SharedState::try_new()?;
+
+ Ok(RustMiscdev {
+ _dev: miscdev::Registration::new_pinned(fmt!("{name}"), state)?,
+ })
+ }
+}
+
+impl Drop for RustMiscdev {
+ fn drop(&mut self) {
+ pr_info!("Rust miscellaneous device sample (exit)\n");
+ }
+}
diff --git a/samples/rust/rust_module_parameters.rs b/samples/rust/rust_module_parameters.rs
new file mode 100644
index 000000000000..12fe5e738e83
--- /dev/null
+++ b/samples/rust/rust_module_parameters.rs
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust module parameters sample.
+
+use kernel::prelude::*;
+
+module! {
+ type: RustModuleParameters,
+ name: b"rust_module_parameters",
+ author: b"Rust for Linux Contributors",
+ description: b"Rust module parameters sample",
+ license: b"GPL",
+ params: {
+ my_bool: bool {
+ default: true,
+ permissions: 0,
+ description: b"Example of bool",
+ },
+ my_i32: i32 {
+ default: 42,
+ permissions: 0o644,
+ description: b"Example of i32",
+ },
+ my_str: str {
+ default: b"default str val",
+ permissions: 0o644,
+ description: b"Example of a string param",
+ },
+ my_usize: usize {
+ default: 42,
+ permissions: 0o644,
+ description: b"Example of usize",
+ },
+ my_array: ArrayParam<i32, 3> {
+ default: [0, 1],
+ permissions: 0,
+ description: b"Example of array",
+ },
+ },
+}
+
+struct RustModuleParameters;
+
+impl kernel::Module for RustModuleParameters {
+ fn init(_name: &'static CStr, module: &'static ThisModule) -> Result<Self> {
+ pr_info!("Rust module parameters sample (init)\n");
+
+ {
+ let lock = module.kernel_param_lock();
+ pr_info!("Parameters:\n");
+ pr_info!(" my_bool: {}\n", my_bool.read());
+ pr_info!(" my_i32: {}\n", my_i32.read(&lock));
+ pr_info!(
+ " my_str: {}\n",
+ core::str::from_utf8(my_str.read(&lock))?
+ );
+ pr_info!(" my_usize: {}\n", my_usize.read(&lock));
+ pr_info!(" my_array: {:?}\n", my_array.read());
+ }
+
+ Ok(RustModuleParameters)
+ }
+}
+
+impl Drop for RustModuleParameters {
+ fn drop(&mut self) {
+ pr_info!("Rust module parameters sample (exit)\n");
+ }
+}
diff --git a/samples/rust/rust_netfilter.rs b/samples/rust/rust_netfilter.rs
new file mode 100644
index 000000000000..4bd5c07fee8c
--- /dev/null
+++ b/samples/rust/rust_netfilter.rs
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust netfilter sample.
+
+use kernel::net;
+use kernel::net::filter::{self as netfilter, inet, Disposition, Family};
+use kernel::prelude::*;
+
+module! {
+ type: RustNetfilter,
+ name: b"rust_netfilter",
+ author: b"Rust for Linux Contributors",
+ description: b"Rust netfilter sample",
+ license: b"GPL",
+}
+
+struct RustNetfilter {
+ _in: Pin<Box<netfilter::Registration<Self>>>,
+ _out: Pin<Box<netfilter::Registration<Self>>>,
+}
+
+impl netfilter::Filter for RustNetfilter {
+ fn filter(_: (), skb: &net::SkBuff) -> Disposition {
+ let data = skb.head_data();
+ pr_info!(
+ "packet headlen={}, len={}, first bytes={:02x?}\n",
+ data.len(),
+ skb.len(),
+ &data[..core::cmp::min(10, data.len())]
+ );
+ Disposition::Accept
+ }
+}
+
+impl kernel::Module for RustNetfilter {
+ fn init(_name: &'static CStr, _module: &'static ThisModule) -> Result<Self> {
+ Ok(Self {
+ _in: netfilter::Registration::new_pinned(
+ Family::INet(inet::Hook::PreRouting),
+ 0,
+ net::init_ns().into(),
+ None,
+ (),
+ )?,
+ _out: netfilter::Registration::new_pinned(
+ Family::INet(inet::Hook::PostRouting),
+ 0,
+ net::init_ns().into(),
+ None,
+ (),
+ )?,
+ })
+ }
+}
diff --git a/samples/rust/rust_platform.rs b/samples/rust/rust_platform.rs
new file mode 100644
index 000000000000..f62784676919
--- /dev/null
+++ b/samples/rust/rust_platform.rs
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust platform device driver sample.
+
+use kernel::{module_platform_driver, of, platform, prelude::*};
+
+module_platform_driver! {
+ type: Driver,
+ name: b"rust_platform",
+ license: b"GPL",
+}
+
+struct Driver;
+impl platform::Driver for Driver {
+ kernel::define_of_id_table! {(), [
+ (of::DeviceId::Compatible(b"rust,sample"), None),
+ ]}
+
+ fn probe(_dev: &mut platform::Device, _id_info: Option<&Self::IdInfo>) -> Result {
+ Ok(())
+ }
+}
diff --git a/samples/rust/rust_print.rs b/samples/rust/rust_print.rs
new file mode 100644
index 000000000000..30d96e025d89
--- /dev/null
+++ b/samples/rust/rust_print.rs
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust printing macros sample.
+
+use kernel::prelude::*;
+use kernel::{pr_cont, str::CStr, ThisModule};
+
+module! {
+ type: RustPrint,
+ name: b"rust_print",
+ author: b"Rust for Linux Contributors",
+ description: b"Rust printing macros sample",
+ license: b"GPL",
+}
+
+struct RustPrint;
+
+impl kernel::Module for RustPrint {
+ fn init(_name: &'static CStr, _module: &'static ThisModule) -> Result<Self> {
+ pr_info!("Rust printing macros sample (init)\n");
+
+ pr_emerg!("Emergency message (level 0) without args\n");
+ pr_alert!("Alert message (level 1) without args\n");
+ pr_crit!("Critical message (level 2) without args\n");
+ pr_err!("Error message (level 3) without args\n");
+ pr_warn!("Warning message (level 4) without args\n");
+ pr_notice!("Notice message (level 5) without args\n");
+ pr_info!("Info message (level 6) without args\n");
+
+ pr_info!("A line that");
+ pr_cont!(" is continued");
+ pr_cont!(" without args\n");
+
+ pr_emerg!("{} message (level {}) with args\n", "Emergency", 0);
+ pr_alert!("{} message (level {}) with args\n", "Alert", 1);
+ pr_crit!("{} message (level {}) with args\n", "Critical", 2);
+ pr_err!("{} message (level {}) with args\n", "Error", 3);
+ pr_warn!("{} message (level {}) with args\n", "Warning", 4);
+ pr_notice!("{} message (level {}) with args\n", "Notice", 5);
+ pr_info!("{} message (level {}) with args\n", "Info", 6);
+
+ pr_info!("A {} that", "line");
+ pr_cont!(" is {}", "continued");
+ pr_cont!(" with {}\n", "args");
+
+ Ok(RustPrint)
+ }
+}
+
+impl Drop for RustPrint {
+ fn drop(&mut self) {
+ pr_info!("Rust printing macros sample (exit)\n");
+ }
+}
diff --git a/samples/rust/rust_random.rs b/samples/rust/rust_random.rs
new file mode 100644
index 000000000000..771c7a940b3d
--- /dev/null
+++ b/samples/rust/rust_random.rs
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust random device.
+//!
+//! Adapted from Alex Gaynor's original available at
+//! <https://github.com/alex/just-use/blob/master/src/lib.rs>.
+
+use kernel::{
+ file::{self, File},
+ io_buffer::{IoBufferReader, IoBufferWriter},
+ prelude::*,
+};
+
+module_misc_device! {
+ type: RandomFile,
+ name: b"rust_random",
+ author: b"Rust for Linux Contributors",
+ description: b"Just use /dev/urandom: Now with early-boot safety",
+ license: b"GPL",
+}
+
+struct RandomFile;
+
+#[vtable]
+impl file::Operations for RandomFile {
+ fn open(_data: &(), _file: &File) -> Result {
+ Ok(())
+ }
+
+ fn read(_this: (), file: &File, buf: &mut impl IoBufferWriter, _: u64) -> Result<usize> {
+ let total_len = buf.len();
+ let mut chunkbuf = [0; 256];
+
+ while !buf.is_empty() {
+ let len = chunkbuf.len().min(buf.len());
+ let chunk = &mut chunkbuf[0..len];
+ let blocking = (file.flags() & file::flags::O_NONBLOCK) == 0;
+
+ if blocking {
+ kernel::random::getrandom(chunk)?;
+ } else {
+ kernel::random::getrandom_nonblock(chunk)?;
+ }
+ buf.write_slice(chunk)?;
+ }
+ Ok(total_len)
+ }
+
+ fn write(_this: (), _file: &File, buf: &mut impl IoBufferReader, _: u64) -> Result<usize> {
+ let total_len = buf.len();
+ let mut chunkbuf = [0; 256];
+ while !buf.is_empty() {
+ let len = chunkbuf.len().min(buf.len());
+ let chunk = &mut chunkbuf[0..len];
+ buf.read_slice(chunk)?;
+ kernel::random::add_randomness(chunk);
+ }
+ Ok(total_len)
+ }
+}
diff --git a/samples/rust/rust_selftests.rs b/samples/rust/rust_selftests.rs
new file mode 100644
index 000000000000..965c48fd0e29
--- /dev/null
+++ b/samples/rust/rust_selftests.rs
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Self test cases for Rust.
+
+use kernel::prelude::*;
+// Keep the `use` for a test in its test function. Module-level `use`s are only for the test
+// framework.
+
+module! {
+ type: RustSelftests,
+ name: b"rust_selftests",
+ author: b"Rust for Linux Contributors",
+ description: b"Self test cases for Rust",
+ license: b"GPL",
+}
+
+struct RustSelftests;
+
+/// A summary of testing.
+///
+/// A test can
+///
+/// * pass (successfully), or
+/// * fail (without hitting any error), or
+/// * hit an error (interrupted).
+///
+/// This is the type that differentiates the first two (pass and fail) cases.
+///
+/// When a test hits an error, the test function should skip and return the error. Note that this
+/// doesn't mean the test fails, for example if the system doesn't have enough memory for
+/// testing, the test function may return an `Err(ENOMEM)` and skip.
+#[allow(dead_code)]
+enum TestSummary {
+ Pass,
+ Fail,
+}
+
+use TestSummary::Fail;
+use TestSummary::Pass;
+
+macro_rules! do_tests {
+ ($($name:ident),*) => {
+ let mut total = 0;
+ let mut pass = 0;
+ let mut fail = 0;
+
+ $({
+ total += 1;
+
+ match $name() {
+ Ok(Pass) => {
+ pass += 1;
+ pr_info!("{} passed!", stringify!($name));
+ },
+ Ok(Fail) => {
+ fail += 1;
+ pr_info!("{} failed!", stringify!($name));
+ },
+ Err(err) => {
+ pr_info!("{} hit error {:?}", stringify!($name), err);
+ }
+ }
+ })*
+
+ pr_info!("{} tests run, {} passed, {} failed, {} hit errors\n",
+ total, pass, fail, total - pass - fail);
+
+ if total == pass {
+ pr_info!("All tests passed. Congratulations!\n");
+ }
+ }
+}
+
+/// An example of test.
+#[allow(dead_code)]
+fn test_example() -> Result<TestSummary> {
+ // `use` declarations for the test can be put here, e.g. `use foo::bar;`.
+
+ // Always pass.
+ Ok(Pass)
+}
+
+impl kernel::Module for RustSelftests {
+ fn init(_name: &'static CStr, _module: &'static ThisModule) -> Result<Self> {
+ pr_info!("Rust self tests (init)\n");
+
+ do_tests! {
+ test_example // TODO: Remove when there is at least a real test.
+ };
+
+ Ok(RustSelftests)
+ }
+}
+
+impl Drop for RustSelftests {
+ fn drop(&mut self) {
+ pr_info!("Rust self tests (exit)\n");
+ }
+}
diff --git a/samples/rust/rust_semaphore.rs b/samples/rust/rust_semaphore.rs
new file mode 100644
index 000000000000..e91f82a6abfb
--- /dev/null
+++ b/samples/rust/rust_semaphore.rs
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust semaphore sample.
+//!
+//! A counting semaphore that can be used by userspace.
+//!
+//! The count is incremented by writes to the device. A write of `n` bytes results in an increment
+//! of `n`. It is decremented by reads; each read results in the count being decremented by 1. If
+//! the count is already zero, a read will block until another write increments it.
+//!
+//! This can be used in user space from the shell for example as follows (assuming a node called
+//! `semaphore`): `cat semaphore` decrements the count by 1 (waiting for it to become non-zero
+//! before decrementing); `echo -n 123 > semaphore` increments the semaphore by 3, potentially
+//! unblocking up to 3 blocked readers.
+
+use core::sync::atomic::{AtomicU64, Ordering};
+use kernel::{
+ condvar_init,
+ file::{self, File, IoctlCommand, IoctlHandler},
+ io_buffer::{IoBufferReader, IoBufferWriter},
+ miscdev::Registration,
+ mutex_init,
+ prelude::*,
+ sync::{CondVar, Mutex, Ref, UniqueRef},
+ user_ptr::{UserSlicePtrReader, UserSlicePtrWriter},
+};
+
+module! {
+ type: RustSemaphore,
+ name: b"rust_semaphore",
+ author: b"Rust for Linux Contributors",
+ description: b"Rust semaphore sample",
+ license: b"GPL",
+}
+
+struct SemaphoreInner {
+ count: usize,
+ max_seen: usize,
+}
+
+struct Semaphore {
+ changed: CondVar,
+ inner: Mutex<SemaphoreInner>,
+}
+
+struct FileState {
+ read_count: AtomicU64,
+ shared: Ref<Semaphore>,
+}
+
+impl FileState {
+ fn consume(&self) -> Result {
+ let mut inner = self.shared.inner.lock();
+ while inner.count == 0 {
+ if self.shared.changed.wait(&mut inner) {
+ return Err(EINTR);
+ }
+ }
+ inner.count -= 1;
+ Ok(())
+ }
+}
+
+#[vtable]
+impl file::Operations for FileState {
+ type Data = Box<Self>;
+ type OpenData = Ref<Semaphore>;
+
+ fn open(shared: &Ref<Semaphore>, _file: &File) -> Result<Box<Self>> {
+ Ok(Box::try_new(Self {
+ read_count: AtomicU64::new(0),
+ shared: shared.clone(),
+ })?)
+ }
+
+ fn read(this: &Self, _: &File, data: &mut impl IoBufferWriter, offset: u64) -> Result<usize> {
+ if data.is_empty() || offset > 0 {
+ return Ok(0);
+ }
+ this.consume()?;
+ data.write_slice(&[0u8; 1])?;
+ this.read_count.fetch_add(1, Ordering::Relaxed);
+ Ok(1)
+ }
+
+ fn write(this: &Self, _: &File, data: &mut impl IoBufferReader, _offs: u64) -> Result<usize> {
+ {
+ let mut inner = this.shared.inner.lock();
+ inner.count = inner.count.saturating_add(data.len());
+ if inner.count > inner.max_seen {
+ inner.max_seen = inner.count;
+ }
+ }
+
+ this.shared.changed.notify_all();
+ Ok(data.len())
+ }
+
+ fn ioctl(this: &Self, file: &File, cmd: &mut IoctlCommand) -> Result<i32> {
+ cmd.dispatch::<Self>(this, file)
+ }
+}
+
+struct RustSemaphore {
+ _dev: Pin<Box<Registration<FileState>>>,
+}
+
+impl kernel::Module for RustSemaphore {
+ fn init(name: &'static CStr, _module: &'static ThisModule) -> Result<Self> {
+ pr_info!("Rust semaphore sample (init)\n");
+
+ let mut sema = Pin::from(UniqueRef::try_new(Semaphore {
+ // SAFETY: `condvar_init!` is called below.
+ changed: unsafe { CondVar::new() },
+
+ // SAFETY: `mutex_init!` is called below.
+ inner: unsafe {
+ Mutex::new(SemaphoreInner {
+ count: 0,
+ max_seen: 0,
+ })
+ },
+ })?);
+
+ // SAFETY: `changed` is pinned when `sema` is.
+ let pinned = unsafe { sema.as_mut().map_unchecked_mut(|s| &mut s.changed) };
+ condvar_init!(pinned, "Semaphore::changed");
+
+ // SAFETY: `inner` is pinned when `sema` is.
+ let pinned = unsafe { sema.as_mut().map_unchecked_mut(|s| &mut s.inner) };
+ mutex_init!(pinned, "Semaphore::inner");
+
+ Ok(Self {
+ _dev: Registration::new_pinned(fmt!("{name}"), sema.into())?,
+ })
+ }
+}
+
+impl Drop for RustSemaphore {
+ fn drop(&mut self) {
+ pr_info!("Rust semaphore sample (exit)\n");
+ }
+}
+
+const IOCTL_GET_READ_COUNT: u32 = 0x80086301;
+const IOCTL_SET_READ_COUNT: u32 = 0x40086301;
+
+impl IoctlHandler for FileState {
+ type Target<'a> = &'a Self;
+
+ fn read(this: &Self, _: &File, cmd: u32, writer: &mut UserSlicePtrWriter) -> Result<i32> {
+ match cmd {
+ IOCTL_GET_READ_COUNT => {
+ writer.write(&this.read_count.load(Ordering::Relaxed))?;
+ Ok(0)
+ }
+ _ => Err(EINVAL),
+ }
+ }
+
+ fn write(this: &Self, _: &File, cmd: u32, reader: &mut UserSlicePtrReader) -> Result<i32> {
+ match cmd {
+ IOCTL_SET_READ_COUNT => {
+ this.read_count.store(reader.read()?, Ordering::Relaxed);
+ Ok(0)
+ }
+ _ => Err(EINVAL),
+ }
+ }
+}
diff --git a/samples/rust/rust_semaphore_c.c b/samples/rust/rust_semaphore_c.c
new file mode 100644
index 000000000000..7672b0b4c105
--- /dev/null
+++ b/samples/rust/rust_semaphore_c.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rust semaphore sample (in C, for comparison)
+ *
+ * This is a C implementation of `rust_semaphore.rs`. Refer to the description
+ * in that file for details on the device.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/refcount.h>
+#include <linux/wait.h>
+
+#define IOCTL_GET_READ_COUNT _IOR('c', 1, u64)
+#define IOCTL_SET_READ_COUNT _IOW('c', 1, u64)
+
+struct semaphore_state {
+ struct kref ref;
+ struct miscdevice miscdev;
+ wait_queue_head_t changed;
+ struct mutex mutex;
+ size_t count;
+ size_t max_seen;
+};
+
+struct file_state {
+ atomic64_t read_count;
+ struct semaphore_state *shared;
+};
+
+static int semaphore_consume(struct semaphore_state *state)
+{
+ DEFINE_WAIT(wait);
+
+ mutex_lock(&state->mutex);
+ while (state->count == 0) {
+ prepare_to_wait(&state->changed, &wait, TASK_INTERRUPTIBLE);
+ mutex_unlock(&state->mutex);
+ schedule();
+ finish_wait(&state->changed, &wait);
+ if (signal_pending(current))
+ return -EINTR;
+ mutex_lock(&state->mutex);
+ }
+
+ state->count--;
+ mutex_unlock(&state->mutex);
+
+ return 0;
+}
+
+static int semaphore_open(struct inode *nodp, struct file *filp)
+{
+ struct semaphore_state *shared =
+ container_of(filp->private_data, struct semaphore_state, miscdev);
+ struct file_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ kref_get(&shared->ref);
+ state->shared = shared;
+ atomic64_set(&state->read_count, 0);
+
+ filp->private_data = state;
+
+ return 0;
+}
+
+static ssize_t semaphore_write(struct file *filp, const char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct file_state *state = filp->private_data;
+ struct semaphore_state *shared = state->shared;
+
+ mutex_lock(&shared->mutex);
+
+ shared->count += count;
+ if (shared->count < count)
+ shared->count = SIZE_MAX;
+
+ if (shared->count > shared->max_seen)
+ shared->max_seen = shared->count;
+
+ mutex_unlock(&shared->mutex);
+
+ wake_up_all(&shared->changed);
+
+ return count;
+}
+
+static ssize_t semaphore_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct file_state *state = filp->private_data;
+ char c = 0;
+ int ret;
+
+ if (count == 0 || *ppos > 0)
+ return 0;
+
+ ret = semaphore_consume(state->shared);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(buffer, &c, sizeof(c)))
+ return -EFAULT;
+
+ atomic64_add(1, &state->read_count);
+ *ppos += 1;
+ return 1;
+}
+
+static long semaphore_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct file_state *state = filp->private_data;
+ void __user *buffer = (void __user *)arg;
+ u64 value;
+
+ switch (cmd) {
+ case IOCTL_GET_READ_COUNT:
+ value = atomic64_read(&state->read_count);
+ if (copy_to_user(buffer, &value, sizeof(value)))
+ return -EFAULT;
+ return 0;
+ case IOCTL_SET_READ_COUNT:
+ if (copy_from_user(&value, buffer, sizeof(value)))
+ return -EFAULT;
+ atomic64_set(&state->read_count, value);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void semaphore_free(struct kref *kref)
+{
+ struct semaphore_state *device;
+
+ device = container_of(kref, struct semaphore_state, ref);
+ kfree(device);
+}
+
+static int semaphore_release(struct inode *nodp, struct file *filp)
+{
+ struct file_state *state = filp->private_data;
+
+ kref_put(&state->shared->ref, semaphore_free);
+ kfree(state);
+ return 0;
+}
+
+static const struct file_operations semaphore_fops = {
+ .owner = THIS_MODULE,
+ .open = semaphore_open,
+ .read = semaphore_read,
+ .write = semaphore_write,
+ .compat_ioctl = semaphore_ioctl,
+ .release = semaphore_release,
+};
+
+static struct semaphore_state *device;
+
+static int __init semaphore_init(void)
+{
+ int ret;
+ struct semaphore_state *state;
+
+ pr_info("Rust semaphore sample (in C, for comparison) (init)\n");
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ mutex_init(&state->mutex);
+ kref_init(&state->ref);
+ init_waitqueue_head(&state->changed);
+
+ state->miscdev.fops = &semaphore_fops;
+ state->miscdev.minor = MISC_DYNAMIC_MINOR;
+ state->miscdev.name = "semaphore";
+
+ ret = misc_register(&state->miscdev);
+ if (ret < 0) {
+ kfree(state);
+ return ret;
+ }
+
+ device = state;
+
+ return 0;
+}
+
+static void __exit semaphore_exit(void)
+{
+ pr_info("Rust semaphore sample (in C, for comparison) (exit)\n");
+
+ misc_deregister(&device->miscdev);
+ kref_put(&device->ref, semaphore_free);
+}
+
+module_init(semaphore_init);
+module_exit(semaphore_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rust for Linux Contributors");
+MODULE_DESCRIPTION("Rust semaphore sample (in C, for comparison)");
diff --git a/samples/rust/rust_stack_probing.rs b/samples/rust/rust_stack_probing.rs
new file mode 100644
index 000000000000..1448fe8e1b56
--- /dev/null
+++ b/samples/rust/rust_stack_probing.rs
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust stack probing sample.
+
+use kernel::prelude::*;
+
+module! {
+ type: RustStackProbing,
+ name: b"rust_stack_probing",
+ author: b"Rust for Linux Contributors",
+ description: b"Rust stack probing sample",
+ license: b"GPL",
+}
+
+struct RustStackProbing;
+
+impl kernel::Module for RustStackProbing {
+ fn init(_name: &'static CStr, _module: &'static ThisModule) -> Result<Self> {
+ pr_info!("Rust stack probing sample (init)\n");
+
+ // Including this large variable on the stack will trigger
+ // stack probing on the supported archs.
+ // This will verify that stack probing does not lead to
+ // any errors if we need to link `__rust_probestack`.
+ let x: [u64; 514] = core::hint::black_box([5; 514]);
+ pr_info!("Large array has length: {}\n", x.len());
+
+ Ok(RustStackProbing)
+ }
+}
+
+impl Drop for RustStackProbing {
+ fn drop(&mut self) {
+ pr_info!("Rust stack probing sample (exit)\n");
+ }
+}
diff --git a/samples/rust/rust_sync.rs b/samples/rust/rust_sync.rs
new file mode 100644
index 000000000000..46637ace2f7f
--- /dev/null
+++ b/samples/rust/rust_sync.rs
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Rust synchronisation primitives sample.
+
+use kernel::prelude::*;
+use kernel::{
+ condvar_init, mutex_init, spinlock_init,
+ sync::{CondVar, Mutex, SpinLock},
+};
+
+module! {
+ type: RustSync,
+ name: b"rust_sync",
+ author: b"Rust for Linux Contributors",
+ description: b"Rust synchronisation primitives sample",
+ license: b"GPL",
+}
+
+kernel::init_static_sync! {
+ static SAMPLE_MUTEX: Mutex<u32> = 10;
+ static SAMPLE_CONDVAR: CondVar;
+}
+
+struct RustSync;
+
+impl kernel::Module for RustSync {
+ fn init(_name: &'static CStr, _module: &'static ThisModule) -> Result<Self> {
+ pr_info!("Rust synchronisation primitives sample (init)\n");
+
+ // Test mutexes.
+ {
+ // SAFETY: `init` is called below.
+ let mut data = Pin::from(Box::try_new(unsafe { Mutex::new(0) })?);
+ mutex_init!(data.as_mut(), "RustSync::init::data1");
+ *data.lock() = 10;
+ pr_info!("Value: {}\n", *data.lock());
+
+ // SAFETY: `init` is called below.
+ let mut cv = Pin::from(Box::try_new(unsafe { CondVar::new() })?);
+ condvar_init!(cv.as_mut(), "RustSync::init::cv1");
+
+ {
+ let mut guard = data.lock();
+ while *guard != 10 {
+ let _ = cv.wait(&mut guard);
+ }
+ }
+ cv.notify_one();
+ cv.notify_all();
+ cv.free_waiters();
+ }
+
+ // Test static mutex + condvar.
+ *SAMPLE_MUTEX.lock() = 20;
+
+ {
+ let mut guard = SAMPLE_MUTEX.lock();
+ while *guard != 20 {
+ let _ = SAMPLE_CONDVAR.wait(&mut guard);
+ }
+ }
+
+ // Test spinlocks.
+ {
+ // SAFETY: `init` is called below.
+ let mut data = Pin::from(Box::try_new(unsafe { SpinLock::new(0) })?);
+ spinlock_init!(data.as_mut(), "RustSync::init::data2");
+ *data.lock() = 10;
+ pr_info!("Value: {}\n", *data.lock());
+
+ // SAFETY: `init` is called below.
+ let mut cv = Pin::from(Box::try_new(unsafe { CondVar::new() })?);
+ condvar_init!(cv.as_mut(), "RustSync::init::cv2");
+ {
+ let mut guard = data.lock();
+ while *guard != 10 {
+ let _ = cv.wait(&mut guard);
+ }
+ }
+ cv.notify_one();
+ cv.notify_all();
+ cv.free_waiters();
+ }
+
+ Ok(RustSync)
+ }
+}
+
+impl Drop for RustSync {
+ fn drop(&mut self) {
+ pr_info!("Rust synchronisation primitives sample (exit)\n");
+ }
+}
diff --git a/scripts/.gitignore b/scripts/.gitignore
index eed308bef604..b7aec8eb1bd4 100644
--- a/scripts/.gitignore
+++ b/scripts/.gitignore
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
/asn1_compiler
/bin2c
+/generate_rust_target
/insert-sys-cert
/kallsyms
/module.lds
diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
index a0ccceb22cf8..274125307ebd 100644
--- a/scripts/Kconfig.include
+++ b/scripts/Kconfig.include
@@ -36,12 +36,12 @@ ld-option = $(success,$(LD) -v $(1))
as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -c -x assembler -o /dev/null -)
# check if $(CC) and $(LD) exist
-$(error-if,$(failure,command -v $(CC)),compiler '$(CC)' not found)
+$(error-if,$(failure,command -v $(CC)),C compiler '$(CC)' not found)
$(error-if,$(failure,command -v $(LD)),linker '$(LD)' not found)
-# Get the compiler name, version, and error out if it is not supported.
+# Get the C compiler name, version, and error out if it is not supported.
cc-info := $(shell,$(srctree)/scripts/cc-version.sh $(CC))
-$(error-if,$(success,test -z "$(cc-info)"),Sorry$(comma) this compiler is not supported.)
+$(error-if,$(success,test -z "$(cc-info)"),Sorry$(comma) this C compiler is not supported.)
cc-name := $(shell,set -- $(cc-info) && echo $1)
cc-version := $(shell,set -- $(cc-info) && echo $2)
diff --git a/scripts/Makefile b/scripts/Makefile
index f084f08ed176..1575af84d557 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -10,6 +10,9 @@ hostprogs-always-$(CONFIG_BUILDTIME_TABLE_SORT) += sorttable
hostprogs-always-$(CONFIG_ASN1) += asn1_compiler
hostprogs-always-$(CONFIG_MODULE_SIG_FORMAT) += sign-file
hostprogs-always-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE) += insert-sys-cert
+hostprogs-always-$(CONFIG_RUST) += generate_rust_target
+
+generate_rust_target-rust := y
HOSTCFLAGS_sorttable.o = -I$(srctree)/tools/include
HOSTLDLIBS_sorttable = -lpthread
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 784f46d41959..00a0cd8f6e9f 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -26,6 +26,7 @@ EXTRA_CPPFLAGS :=
EXTRA_LDFLAGS :=
asflags-y :=
ccflags-y :=
+rustflags-y :=
cppflags-y :=
ldflags-y :=
@@ -271,6 +272,65 @@ quiet_cmd_cc_lst_c = MKLST $@
$(obj)/%.lst: $(src)/%.c FORCE
$(call if_changed_dep,cc_lst_c)
+# Compile Rust sources (.rs)
+# ---------------------------------------------------------------------------
+
+rust_allowed_features := allocator_api,bench_black_box,core_ffi_c,generic_associated_types,const_ptr_offset_from,const_refs_to_cell
+
+rust_common_cmd = \
+ RUST_MODFILE=$(modfile) $(RUSTC_OR_CLIPPY) $(rust_flags) \
+ -Zallow-features=$(rust_allowed_features) \
+ -Zcrate-attr=no_std \
+ -Zcrate-attr='feature($(rust_allowed_features))' \
+ --extern alloc --extern kernel \
+ --crate-type rlib --out-dir $(obj) -L $(objtree)/rust/ \
+ --crate-name $(basename $(notdir $@))
+
+rust_handle_depfile = \
+ mv $(obj)/$(basename $(notdir $@)).d $(depfile); \
+ sed -i '/^\#/d' $(depfile)
+
+# `--emit=obj`, `--emit=asm` and `--emit=llvm-ir` imply a single codegen unit
+# will be used. We explicitly request `-Ccodegen-units=1` in any case, and
+# the compiler shows a warning if it is not 1. However, if we ever stop
+# requesting it explicitly and we start using some other `--emit` that does not
+# imply it (and for which codegen is performed), then we would be out of sync,
+# i.e. the outputs we would get for the different single targets (e.g. `.ll`)
+# would not match each other.
+
+quiet_cmd_rustc_o_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@
+ cmd_rustc_o_rs = \
+ $(rust_common_cmd) --emit=dep-info,obj $<; \
+ $(rust_handle_depfile)
+
+$(obj)/%.o: $(src)/%.rs FORCE
+ $(call if_changed_dep,rustc_o_rs)
+
+quiet_cmd_rustc_rsi_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@
+ cmd_rustc_rsi_rs = \
+ $(rust_common_cmd) --emit=dep-info -Zunpretty=expanded $< >$@; \
+ command -v $(RUSTFMT) >/dev/null && $(RUSTFMT) $@; \
+ $(rust_handle_depfile)
+
+$(obj)/%.rsi: $(src)/%.rs FORCE
+ $(call if_changed_dep,rustc_rsi_rs)
+
+quiet_cmd_rustc_s_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@
+ cmd_rustc_s_rs = \
+ $(rust_common_cmd) --emit=dep-info,asm $<; \
+ $(rust_handle_depfile)
+
+$(obj)/%.s: $(src)/%.rs FORCE
+ $(call if_changed_dep,rustc_s_rs)
+
+quiet_cmd_rustc_ll_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@
+ cmd_rustc_ll_rs = \
+ $(rust_common_cmd) --emit=dep-info,llvm-ir $<; \
+ $(rust_handle_depfile)
+
+$(obj)/%.ll: $(src)/%.rs FORCE
+ $(call if_changed_dep,rustc_ll_rs)
+
# Compile assembler sources (.S)
# ---------------------------------------------------------------------------
diff --git a/scripts/Makefile.debug b/scripts/Makefile.debug
index 9f39b0130551..fe87389d52c0 100644
--- a/scripts/Makefile.debug
+++ b/scripts/Makefile.debug
@@ -1,4 +1,5 @@
DEBUG_CFLAGS :=
+DEBUG_RUSTFLAGS :=
ifdef CONFIG_DEBUG_INFO_SPLIT
DEBUG_CFLAGS += -gsplit-dwarf
@@ -10,6 +11,12 @@ ifndef CONFIG_AS_IS_LLVM
KBUILD_AFLAGS += -Wa,-gdwarf-2
endif
+ifdef CONFIG_DEBUG_INFO_REDUCED
+DEBUG_RUSTFLAGS += -Cdebuginfo=1
+else
+DEBUG_RUSTFLAGS += -Cdebuginfo=2
+endif
+
ifndef CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
dwarf-version-$(CONFIG_DEBUG_INFO_DWARF4) := 4
dwarf-version-$(CONFIG_DEBUG_INFO_DWARF5) := 5
@@ -31,3 +38,6 @@ endif
KBUILD_CFLAGS += $(DEBUG_CFLAGS)
export DEBUG_CFLAGS
+
+KBUILD_RUSTFLAGS += $(DEBUG_RUSTFLAGS)
+export DEBUG_RUSTFLAGS
diff --git a/scripts/Makefile.host b/scripts/Makefile.host
index 278b4d6ac945..da133780b751 100644
--- a/scripts/Makefile.host
+++ b/scripts/Makefile.host
@@ -22,6 +22,8 @@ $(obj)/%.tab.c $(obj)/%.tab.h: $(src)/%.y FORCE
# to preprocess a data file.
#
# Both C and C++ are supported, but preferred language is C for such utilities.
+# Rust is also supported, but it may only be used in scenarios where a Rust
+# toolchain is required to be available (e.g. when `CONFIG_RUST` is enabled).
#
# Sample syntax (see Documentation/kbuild/makefiles.rst for reference)
# hostprogs := bin2hex
@@ -37,15 +39,20 @@ $(obj)/%.tab.c $(obj)/%.tab.h: $(src)/%.y FORCE
# qconf-objs := menu.o
# Will compile qconf as a C++ program, and menu as a C program.
# They are linked as C++ code to the executable qconf
+#
+# hostprogs := target
+# target-rust := y
+# Will compile `target` as a Rust program, using `target.rs` as the crate root.
+# The crate may consist of several source files.
# C code
# Executables compiled from a single .c file
host-csingle := $(foreach m,$(hostprogs), \
- $(if $($(m)-objs)$($(m)-cxxobjs),,$(m)))
+ $(if $($(m)-objs)$($(m)-cxxobjs)$($(m)-rust),,$(m)))
# C executables linked based on several .o files
host-cmulti := $(foreach m,$(hostprogs),\
- $(if $($(m)-cxxobjs),,$(if $($(m)-objs),$(m))))
+ $(if $($(m)-cxxobjs)$($(m)-rust),,$(if $($(m)-objs),$(m))))
# Object (.o) files compiled from .c files
host-cobjs := $(sort $(foreach m,$(hostprogs),$($(m)-objs)))
@@ -58,11 +65,17 @@ host-cxxmulti := $(foreach m,$(hostprogs),$(if $($(m)-cxxobjs),$(m)))
# C++ Object (.o) files compiled from .cc files
host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
+# Rust code
+# Executables compiled from a single Rust crate (which may consist of
+# one or more .rs files)
+host-rust := $(foreach m,$(hostprogs),$(if $($(m)-rust),$(m)))
+
host-csingle := $(addprefix $(obj)/,$(host-csingle))
host-cmulti := $(addprefix $(obj)/,$(host-cmulti))
host-cobjs := $(addprefix $(obj)/,$(host-cobjs))
host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti))
host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs))
+host-rust := $(addprefix $(obj)/,$(host-rust))
#####
# Handle options to gcc. Support building with separate output directory
@@ -71,6 +84,8 @@ _hostc_flags = $(KBUILD_HOSTCFLAGS) $(HOST_EXTRACFLAGS) \
$(HOSTCFLAGS_$(target-stem).o)
_hostcxx_flags = $(KBUILD_HOSTCXXFLAGS) $(HOST_EXTRACXXFLAGS) \
$(HOSTCXXFLAGS_$(target-stem).o)
+_hostrust_flags = $(KBUILD_HOSTRUSTFLAGS) $(HOST_EXTRARUSTFLAGS) \
+ $(HOSTRUSTFLAGS_$(target-stem))
# $(objtree)/$(obj) for including generated headers from checkin source files
ifeq ($(KBUILD_EXTMOD),)
@@ -82,6 +97,7 @@ endif
hostc_flags = -Wp,-MMD,$(depfile) $(_hostc_flags)
hostcxx_flags = -Wp,-MMD,$(depfile) $(_hostcxx_flags)
+hostrust_flags = $(_hostrust_flags)
#####
# Compile programs on the host
@@ -128,5 +144,17 @@ quiet_cmd_host-cxxobjs = HOSTCXX $@
$(host-cxxobjs): $(obj)/%.o: $(src)/%.cc FORCE
$(call if_changed_dep,host-cxxobjs)
+# Create executable from a single Rust crate (which may consist of
+# one or more `.rs` files)
+# host-rust -> Executable
+quiet_cmd_host-rust = HOSTRUSTC $@
+ cmd_host-rust = \
+ $(HOSTRUSTC) $(hostrust_flags) --emit=dep-info,link \
+ --out-dir=$(obj)/ $<; \
+ mv $(obj)/$(target-stem).d $(depfile); \
+ sed -i '/^\#/d' $(depfile)
+$(host-rust): $(obj)/%: $(src)/%.rs FORCE
+ $(call if_changed_dep,host-rust)
+
targets += $(host-csingle) $(host-cmulti) $(host-cobjs) \
- $(host-cxxmulti) $(host-cxxobjs)
+ $(host-cxxmulti) $(host-cxxobjs) $(host-rust)
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 3fb6a99e78c4..c88b98b5dc44 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -8,6 +8,7 @@ ldflags-y += $(EXTRA_LDFLAGS)
# flags that take effect in current and sub directories
KBUILD_AFLAGS += $(subdir-asflags-y)
KBUILD_CFLAGS += $(subdir-ccflags-y)
+KBUILD_RUSTFLAGS += $(subdir-rustflags-y)
# Figure out what we need to build from the various variables
# ===========================================================================
@@ -128,6 +129,10 @@ _c_flags = $(filter-out $(CFLAGS_REMOVE_$(target-stem).o), \
$(filter-out $(ccflags-remove-y), \
$(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(ccflags-y)) \
$(CFLAGS_$(target-stem).o))
+_rust_flags = $(filter-out $(RUSTFLAGS_REMOVE_$(target-stem).o), \
+ $(filter-out $(rustflags-remove-y), \
+ $(KBUILD_RUSTFLAGS) $(rustflags-y)) \
+ $(RUSTFLAGS_$(target-stem).o))
_a_flags = $(filter-out $(AFLAGS_REMOVE_$(target-stem).o), \
$(filter-out $(asflags-remove-y), \
$(KBUILD_CPPFLAGS) $(KBUILD_AFLAGS) $(asflags-y)) \
@@ -202,6 +207,11 @@ modkern_cflags = \
$(KBUILD_CFLAGS_MODULE) $(CFLAGS_MODULE), \
$(KBUILD_CFLAGS_KERNEL) $(CFLAGS_KERNEL) $(modfile_flags))
+modkern_rustflags = \
+ $(if $(part-of-module), \
+ $(KBUILD_RUSTFLAGS_MODULE) $(RUSTFLAGS_MODULE), \
+ $(KBUILD_RUSTFLAGS_KERNEL) $(RUSTFLAGS_KERNEL))
+
modkern_aflags = $(if $(part-of-module), \
$(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE), \
$(KBUILD_AFLAGS_KERNEL) $(AFLAGS_KERNEL))
@@ -211,6 +221,8 @@ c_flags = -Wp,-MMD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
$(_c_flags) $(modkern_cflags) \
$(basename_flags) $(modname_flags)
+rust_flags = $(_rust_flags) $(modkern_rustflags) @$(objtree)/include/generated/rustc_cfg
+
a_flags = -Wp,-MMD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \
$(_a_flags) $(modkern_aflags)
diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal
index 35100e981f4a..9a1fa6aa30fe 100644
--- a/scripts/Makefile.modfinal
+++ b/scripts/Makefile.modfinal
@@ -39,11 +39,13 @@ quiet_cmd_ld_ko_o = LD [M] $@
quiet_cmd_btf_ko = BTF [M] $@
cmd_btf_ko = \
- if [ -f vmlinux ]; then \
+ if [ ! -f vmlinux ]; then \
+ printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \
+ elif [ -n "$(CONFIG_RUST)" ] && $(srctree)/scripts/is_rust_module.sh $@; then \
+ printf "Skipping BTF generation for %s because it's a Rust module\n" $@ 1>&2; \
+ else \
LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J $(PAHOLE_FLAGS) --btf_base vmlinux $@; \
$(RESOLVE_BTFIDS) -b vmlinux $@; \
- else \
- printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \
fi;
# Same as newer-prereqs, but allows to exclude specified extra dependencies
diff --git a/scripts/cc-version.sh b/scripts/cc-version.sh
index f1952c522466..2401c86fcf53 100755
--- a/scripts/cc-version.sh
+++ b/scripts/cc-version.sh
@@ -1,13 +1,13 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
#
-# Print the compiler name and its version in a 5 or 6-digit form.
+# Print the C compiler name and its version in a 5 or 6-digit form.
# Also, perform the minimum version check.
set -e
-# Print the compiler name and some version components.
-get_compiler_info()
+# Print the C compiler name and some version components.
+get_c_compiler_info()
{
cat <<- EOF | "$@" -E -P -x c - 2>/dev/null
#if defined(__clang__)
@@ -32,7 +32,7 @@ get_canonical_version()
# $@ instead of $1 because multiple words might be given, e.g. CC="ccache gcc".
orig_args="$@"
-set -- $(get_compiler_info "$@")
+set -- $(get_c_compiler_info "$@")
name=$1
@@ -52,7 +52,7 @@ ICC)
min_version=$($min_tool_version icc)
;;
*)
- echo "$orig_args: unknown compiler" >&2
+ echo "$orig_args: unknown C compiler" >&2
exit 1
;;
esac
@@ -62,7 +62,7 @@ min_cversion=$(get_canonical_version $min_version)
if [ "$cversion" -lt "$min_cversion" ]; then
echo >&2 "***"
- echo >&2 "*** Compiler is too old."
+ echo >&2 "*** C compiler is too old."
echo >&2 "*** Your $name version: $version"
echo >&2 "*** Minimum $name version: $min_version"
echo >&2 "***"
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 503e8abbb2c1..bd0025d77bcf 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3615,7 +3615,7 @@ sub process {
my $comment = "";
if ($realfile =~ /\.(h|s|S)$/) {
$comment = '/*';
- } elsif ($realfile =~ /\.(c|dts|dtsi)$/) {
+ } elsif ($realfile =~ /\.(c|rs|dts|dtsi)$/) {
$comment = '//';
} elsif (($checklicenseline == 2) || $realfile =~ /\.(sh|pl|py|awk|tc|yaml)$/) {
$comment = '#';
@@ -3663,7 +3663,7 @@ sub process {
}
# check we are in a valid source file if not then ignore this hunk
- next if ($realfile !~ /\.(h|c|s|S|sh|dtsi|dts)$/);
+ next if ($realfile !~ /\.(h|c|rs|s|S|sh|dtsi|dts)$/);
# check for using SPDX-License-Identifier on the wrong line number
if ($realline != $checklicenseline &&
@@ -6782,15 +6782,19 @@ sub process {
}
if ($bad_specifier ne "") {
my $stat_real = get_stat_real($linenr, $lc);
+ my $msg_level = \&WARN;
my $ext_type = "Invalid";
my $use = "";
if ($bad_specifier =~ /p[Ff]/) {
$use = " - use %pS instead";
$use =~ s/pS/ps/ if ($bad_specifier =~ /pf/);
+ } elsif ($bad_specifier =~ /pA/) {
+ $use = " - '%pA' is only intended to be used from Rust code";
+ $msg_level = \&ERROR;
}
- WARN("VSPRINTF_POINTER_EXTENSION",
- "$ext_type vsprintf pointer extension '$bad_specifier'$use\n" . "$here\n$stat_real\n");
+ &{$msg_level}("VSPRINTF_POINTER_EXTENSION",
+ "$ext_type vsprintf pointer extension '$bad_specifier'$use\n" . "$here\n$stat_real\n");
}
}
}
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index 7075e26ab2c4..564c5632e1a2 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -8,6 +8,14 @@ usage() {
echo " $0 -r <release> | <vmlinux> [<base path>|auto] [<modules path>]"
}
+# Try to find a Rust demangler
+if type llvm-cxxfilt >/dev/null 2>&1 ; then
+ cppfilt=llvm-cxxfilt
+elif type c++filt >/dev/null 2>&1 ; then
+ cppfilt=c++filt
+ cppfilt_opts=-i
+fi
+
if [[ $1 == "-r" ]] ; then
vmlinux=""
basepath="auto"
@@ -180,6 +188,12 @@ parse_symbol() {
# In the case of inlines, move everything to same line
code=${code//$'\n'/' '}
+ # Demangle if the name looks like a Rust symbol and if
+ # we got a Rust demangler
+ if [[ $name =~ ^_R && $cppfilt != "" ]] ; then
+ name=$("$cppfilt" "$cppfilt_opts" "$name")
+ fi
+
# Replace old address with pretty line numbers
symbol="$segment$name ($code)"
}
diff --git a/scripts/generate_rust_analyzer.py b/scripts/generate_rust_analyzer.py
new file mode 100755
index 000000000000..ecc7ea9a4dcf
--- /dev/null
+++ b/scripts/generate_rust_analyzer.py
@@ -0,0 +1,141 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+"""generate_rust_analyzer - Generates the `rust-project.json` file for `rust-analyzer`.
+"""
+
+import argparse
+import json
+import logging
+import pathlib
+import sys
+
+def generate_crates(srctree, objtree, sysroot_src):
+ # Generate the configuration list.
+ cfg = []
+ with open(objtree / "include" / "generated" / "rustc_cfg") as fd:
+ for line in fd:
+ line = line.replace("--cfg=", "")
+ line = line.replace("\n", "")
+ cfg.append(line)
+
+ # Now fill the crates list -- dependencies need to come first.
+ #
+ # Avoid O(n^2) iterations by keeping a map of indexes.
+ crates = []
+ crates_indexes = {}
+
+ def append_crate(display_name, root_module, deps, cfg=[], is_workspace_member=True, is_proc_macro=False):
+ crates_indexes[display_name] = len(crates)
+ crates.append({
+ "display_name": display_name,
+ "root_module": str(root_module),
+ "is_workspace_member": is_workspace_member,
+ "is_proc_macro": is_proc_macro,
+ "deps": [{"crate": crates_indexes[dep], "name": dep} for dep in deps],
+ "cfg": cfg,
+ "edition": "2021",
+ "env": {
+ "RUST_MODFILE": "This is only for rust-analyzer"
+ }
+ })
+
+ # First, the ones in `rust/` since they are a bit special.
+ append_crate(
+ "core",
+ sysroot_src / "core" / "src" / "lib.rs",
+ [],
+ is_workspace_member=False,
+ )
+
+ append_crate(
+ "compiler_builtins",
+ srctree / "rust" / "compiler_builtins.rs",
+ [],
+ )
+
+ append_crate(
+ "alloc",
+ srctree / "rust" / "alloc" / "lib.rs",
+ ["core", "compiler_builtins"],
+ )
+
+ append_crate(
+ "macros",
+ srctree / "rust" / "macros" / "lib.rs",
+ [],
+ is_proc_macro=True,
+ )
+ crates[-1]["proc_macro_dylib_path"] = "rust/libmacros.so"
+
+ append_crate(
+ "build_error",
+ srctree / "rust" / "build_error.rs",
+ ["core", "compiler_builtins"],
+ )
+
+ append_crate(
+ "bindings",
+ srctree / "rust"/ "bindings" / "lib.rs",
+ ["core"],
+ cfg=cfg,
+ )
+ crates[-1]["env"]["OBJTREE"] = str(objtree.resolve(True))
+
+ append_crate(
+ "kernel",
+ srctree / "rust" / "kernel" / "lib.rs",
+ ["core", "alloc", "macros", "build_error", "bindings"],
+ cfg=cfg,
+ )
+ crates[-1]["source"] = {
+ "include_dirs": [
+ str(srctree / "rust" / "kernel"),
+ str(objtree / "rust")
+ ],
+ "exclude_dirs": [],
+ }
+
+ # Then, the rest outside of `rust/`.
+ #
+ # We explicitly mention the top-level folders we want to cover.
+ for folder in ("samples", "drivers"):
+ for path in (srctree / folder).rglob("*.rs"):
+ logging.info("Checking %s", path)
+ name = path.name.replace(".rs", "")
+
+ # Skip those that are not crate roots.
+ if f"{name}.o" not in open(path.parent / "Makefile").read():
+ continue
+
+ logging.info("Adding %s", name)
+ append_crate(
+ name,
+ path,
+ ["core", "alloc", "kernel"],
+ cfg=cfg,
+ )
+
+ return crates
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--verbose', '-v', action='store_true')
+ parser.add_argument("srctree", type=pathlib.Path)
+ parser.add_argument("objtree", type=pathlib.Path)
+ parser.add_argument("sysroot_src", type=pathlib.Path)
+ args = parser.parse_args()
+
+ logging.basicConfig(
+ format="[%(asctime)s] [%(levelname)s] %(message)s",
+ level=logging.INFO if args.verbose else logging.WARNING
+ )
+
+ rust_project = {
+ "crates": generate_crates(args.srctree, args.objtree, args.sysroot_src),
+ "sysroot_src": str(args.sysroot_src),
+ }
+
+ json.dump(rust_project, sys.stdout, sort_keys=True, indent=4)
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/generate_rust_target.rs b/scripts/generate_rust_target.rs
new file mode 100644
index 000000000000..0a1ba95d74e7
--- /dev/null
+++ b/scripts/generate_rust_target.rs
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! The custom target specification file generator for `rustc`.
+//!
+//! To configure a target from scratch, a JSON-encoded file has to be passed
+//! to `rustc` (introduced in [RFC 131]). These options and the file itself are
+//! unstable. Eventually, `rustc` should provide a way to do this in a stable
+//! manner. For instance, via command-line arguments. Therefore, this file
+//! should avoid using keys which can be set via `-C` or `-Z` options.
+//!
+//! [RFC 131]: https://rust-lang.github.io/rfcs/0131-target-specification.html
+
+use std::{
+ collections::HashMap,
+ fmt::{Display, Formatter, Result},
+ io::BufRead,
+};
+
+enum Value {
+ Boolean(bool),
+ Number(i32),
+ String(String),
+ Object(Object),
+}
+
+type Object = Vec<(String, Value)>;
+
+/// Minimal "almost JSON" generator (e.g. no `null`s, no arrays, no escaping),
+/// enough for this purpose.
+impl Display for Value {
+ fn fmt(&self, formatter: &mut Formatter<'_>) -> Result {
+ match self {
+ Value::Boolean(boolean) => write!(formatter, "{}", boolean),
+ Value::Number(number) => write!(formatter, "{}", number),
+ Value::String(string) => write!(formatter, "\"{}\"", string),
+ Value::Object(object) => {
+ formatter.write_str("{")?;
+ if let [ref rest @ .., ref last] = object[..] {
+ for (key, value) in rest {
+ write!(formatter, "\"{}\": {},", key, value)?;
+ }
+ write!(formatter, "\"{}\": {}", last.0, last.1)?;
+ }
+ formatter.write_str("}")
+ }
+ }
+ }
+}
+
+struct TargetSpec(Object);
+
+impl TargetSpec {
+ fn new() -> TargetSpec {
+ TargetSpec(Vec::new())
+ }
+}
+
+trait Push<T> {
+ fn push(&mut self, key: &str, value: T);
+}
+
+impl Push<bool> for TargetSpec {
+ fn push(&mut self, key: &str, value: bool) {
+ self.0.push((key.to_string(), Value::Boolean(value)));
+ }
+}
+
+impl Push<i32> for TargetSpec {
+ fn push(&mut self, key: &str, value: i32) {
+ self.0.push((key.to_string(), Value::Number(value)));
+ }
+}
+
+impl Push<String> for TargetSpec {
+ fn push(&mut self, key: &str, value: String) {
+ self.0.push((key.to_string(), Value::String(value)));
+ }
+}
+
+impl Push<&str> for TargetSpec {
+ fn push(&mut self, key: &str, value: &str) {
+ self.push(key, value.to_string());
+ }
+}
+
+impl Push<Object> for TargetSpec {
+ fn push(&mut self, key: &str, value: Object) {
+ self.0.push((key.to_string(), Value::Object(value)));
+ }
+}
+
+impl Display for TargetSpec {
+ fn fmt(&self, formatter: &mut Formatter<'_>) -> Result {
+ // We add some newlines for clarity.
+ formatter.write_str("{\n")?;
+ if let [ref rest @ .., ref last] = self.0[..] {
+ for (key, value) in rest {
+ write!(formatter, " \"{}\": {},\n", key, value)?;
+ }
+ write!(formatter, " \"{}\": {}\n", last.0, last.1)?;
+ }
+ formatter.write_str("}")
+ }
+}
+
+struct KernelConfig(HashMap<String, String>);
+
+impl KernelConfig {
+ /// Parses `include/config/auto.conf` from `stdin`.
+ fn from_stdin() -> KernelConfig {
+ let mut result = HashMap::new();
+
+ let stdin = std::io::stdin();
+ let mut handle = stdin.lock();
+ let mut line = String::new();
+
+ loop {
+ line.clear();
+
+ if handle.read_line(&mut line).unwrap() == 0 {
+ break;
+ }
+
+ if line.starts_with('#') {
+ continue;
+ }
+
+ let (key, value) = line.split_once('=').expect("Missing `=` in line.");
+ result.insert(key.to_string(), value.trim_end_matches('\n').to_string());
+ }
+
+ KernelConfig(result)
+ }
+
+ /// Does the option exist in the configuration (any value)?
+ ///
+ /// The argument must be passed without the `CONFIG_` prefix.
+ /// This avoids repetition and it also avoids `fixdep` making us
+ /// depend on it.
+ fn has(&self, option: &str) -> bool {
+ let option = "CONFIG_".to_owned() + option;
+ self.0.contains_key(&option)
+ }
+}
+
+fn main() {
+ let cfg = KernelConfig::from_stdin();
+ let mut ts = TargetSpec::new();
+
+ // `llvm-target`s are taken from `scripts/Makefile.clang`.
+ if cfg.has("ARM") {
+ ts.push("arch", "arm");
+ ts.push(
+ "data-layout",
+ "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64",
+ );
+ ts.push("features", "+strict-align,+v6");
+ ts.push("llvm-target", "arm-linux-gnueabi");
+ ts.push("max-atomic-width", 64);
+ ts.push("target-mcount", "\\u0001__gnu_mcount_nc");
+ ts.push("target-pointer-width", "32");
+ } else if cfg.has("ARM64") {
+ ts.push("arch", "aarch64");
+ ts.push(
+ "data-layout",
+ "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128",
+ );
+ ts.push("disable-redzone", true);
+ ts.push("features", "+strict-align,-neon,-fp-armv8");
+ ts.push("llvm-target", "aarch64-linux-gnu");
+ ts.push("max-atomic-width", 128);
+ ts.push("target-pointer-width", "64");
+ } else if cfg.has("PPC") {
+ ts.push("arch", "powerpc64");
+ ts.push("code-model", "large");
+ ts.push("data-layout", "e-m:e-i64:64-n32:64");
+ ts.push("features", "-altivec,-vsx,-hard-float");
+ ts.push("llvm-target", "powerpc64le-linux-gnu");
+ ts.push("max-atomic-width", 64);
+ ts.push("target-mcount", "_mcount");
+ ts.push("target-pointer-width", "64");
+ } else if cfg.has("RISCV") {
+ if cfg.has("64BIT") {
+ ts.push("arch", "riscv64");
+ ts.push("data-layout", "e-m:e-p:64:64-i64:64-i128:128-n64-S128");
+ ts.push("llvm-target", "riscv64-linux-gnu");
+ ts.push("target-pointer-width", "64");
+ } else {
+ ts.push("arch", "riscv32");
+ ts.push("data-layout", "e-m:e-p:32:32-i64:64-n32-S128");
+ ts.push("llvm-target", "riscv32-linux-gnu");
+ ts.push("target-pointer-width", "32");
+ }
+ ts.push("code-model", "medium");
+ ts.push("disable-redzone", true);
+ let mut features = "+m,+a".to_string();
+ if cfg.has("RISCV_ISA_C") {
+ features += ",+c";
+ }
+ ts.push("features", features);
+ } else if cfg.has("X86_64") {
+ ts.push("arch", "x86_64");
+ ts.push(
+ "data-layout",
+ "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128",
+ );
+ let mut features = "-3dnow,-3dnowa,-mmx,+soft-float".to_string();
+ if cfg.has("RETPOLINE") {
+ features += ",+retpoline-external-thunk";
+ }
+ ts.push("features", features);
+ ts.push("llvm-target", "x86_64-linux-gnu");
+ ts.push("target-pointer-width", "64");
+ } else {
+ panic!("Unsupported architecture");
+ }
+
+ ts.push("emit-debug-gdb-scripts", false);
+ ts.push("frame-pointer", "may-omit");
+ ts.push(
+ "stack-probes",
+ vec![("kind".to_string(), Value::String("none".to_string()))],
+ );
+
+ // Everything else is LE, whether `CPU_LITTLE_ENDIAN` is declared or not
+ // (e.g. x86). It is also `rustc`'s default.
+ if cfg.has("CPU_BIG_ENDIAN") {
+ ts.push("target-endian", "big");
+ }
+
+ println!("{}", ts);
+}
diff --git a/scripts/is_rust_module.sh b/scripts/is_rust_module.sh
new file mode 100755
index 000000000000..28b3831a7593
--- /dev/null
+++ b/scripts/is_rust_module.sh
@@ -0,0 +1,16 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# is_rust_module.sh module.ko
+#
+# Returns `0` if `module.ko` is a Rust module, `1` otherwise.
+
+set -e
+
+# Using the `16_` prefix ensures other symbols with the same substring
+# are not picked up (even if it would be unlikely). The last part is
+# used just in case LLVM decides to use the `.` suffix.
+#
+# In the future, checking for the `.comment` section may be another
+# option, see https://github.com/rust-lang/rust/pull/97550.
+${NM} "$*" | grep -qE '^[0-9a-fA-F]+ r _R[^[:space:]]+16___IS_RUST_MODULE[^[:space:]]*$'
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index f18e6dfc68c5..3b64dda7e4cc 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -27,7 +27,17 @@
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
-#define KSYM_NAME_LEN 128
+#define _stringify_1(x) #x
+#define _stringify(x) _stringify_1(x)
+
+#define KSYM_NAME_LEN 512
+
+/* A substantially bigger size than the current maximum. */
+#define KSYM_NAME_LEN_BUFFER 2048
+_Static_assert(
+ KSYM_NAME_LEN_BUFFER == KSYM_NAME_LEN * 4,
+ "Please keep KSYM_NAME_LEN_BUFFER in sync with KSYM_NAME_LEN"
+);
struct sym_entry {
unsigned long long addr;
@@ -198,15 +208,15 @@ static void check_symbol_range(const char *sym, unsigned long long addr,
static struct sym_entry *read_symbol(FILE *in)
{
- char name[500], type;
+ char name[KSYM_NAME_LEN_BUFFER+1], type;
unsigned long long addr;
unsigned int len;
struct sym_entry *sym;
int rc;
- rc = fscanf(in, "%llx %c %499s\n", &addr, &type, name);
+ rc = fscanf(in, "%llx %c %" _stringify(KSYM_NAME_LEN_BUFFER) "s\n", &addr, &type, name);
if (rc != 3) {
- if (rc != EOF && fgets(name, 500, in) == NULL)
+ if (rc != EOF && fgets(name, sizeof(name), in) == NULL)
fprintf(stderr, "Read error or end of file.\n");
return NULL;
}
@@ -471,12 +481,35 @@ static void write_src(void)
if ((i & 0xFF) == 0)
markers[i >> 8] = off;
- printf("\t.byte 0x%02x", table[i]->len);
+ /* There cannot be any symbol of length zero. */
+ if (table[i]->len == 0) {
+ fprintf(stderr, "kallsyms failure: "
+ "unexpected zero symbol length\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Only lengths that fit in up-to-two-byte ULEB128 are supported. */
+ if (table[i]->len > 0x3FFF) {
+ fprintf(stderr, "kallsyms failure: "
+ "unexpected huge symbol length\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Encode length with ULEB128. */
+ if (table[i]->len <= 0x7F) {
+ /* Most symbols use a single byte for the length. */
+ printf("\t.byte 0x%02x", table[i]->len);
+ off += table[i]->len + 1;
+ } else {
+ /* "Big" symbols use two bytes. */
+ printf("\t.byte 0x%02x, 0x%02x",
+ (table[i]->len & 0x7F) | 0x80,
+ (table[i]->len >> 7) & 0x7F);
+ off += table[i]->len + 2;
+ }
for (k = 0; k < table[i]->len; k++)
printf(", 0x%02x", table[i]->sym[k]);
printf("\n");
-
- off += table[i]->len + 1;
}
printf("\n");
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index c4340c90e172..b7c9f1dd5e42 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -216,6 +216,13 @@ static const char *conf_get_autoheader_name(void)
return name ? name : "include/generated/autoconf.h";
}
+static const char *conf_get_rustccfg_name(void)
+{
+ char *name = getenv("KCONFIG_RUSTCCFG");
+
+ return name ? name : "include/generated/rustc_cfg";
+}
+
static int conf_set_sym_val(struct symbol *sym, int def, int def_flags, char *p)
{
char *p2;
@@ -605,6 +612,9 @@ static const struct comment_style comment_style_c = {
static void conf_write_heading(FILE *fp, const struct comment_style *cs)
{
+ if (!cs)
+ return;
+
fprintf(fp, "%s\n", cs->prefix);
fprintf(fp, "%s Automatically generated file; DO NOT EDIT.\n",
@@ -745,6 +755,65 @@ static void print_symbol_for_c(FILE *fp, struct symbol *sym)
free(escaped);
}
+static void print_symbol_for_rustccfg(FILE *fp, struct symbol *sym)
+{
+ const char *val;
+ const char *val_prefix = "";
+ char *val_prefixed = NULL;
+ size_t val_prefixed_len;
+ char *escaped = NULL;
+
+ if (sym->type == S_UNKNOWN)
+ return;
+
+ val = sym_get_string_value(sym);
+
+ switch (sym->type) {
+ case S_BOOLEAN:
+ case S_TRISTATE:
+ /*
+ * We do not care about disabled ones, i.e. no need for
+ * what otherwise are "comments" in other printers.
+ */
+ if (*val == 'n')
+ return;
+
+ /*
+ * To have similar functionality to the C macro `IS_ENABLED()`
+ * we provide an empty `--cfg CONFIG_X` here in both `y`
+ * and `m` cases.
+ *
+ * Then, the common `fprintf()` below will also give us
+ * a `--cfg CONFIG_X="y"` or `--cfg CONFIG_X="m"`, which can
+ * be used as the equivalent of `IS_BUILTIN()`/`IS_MODULE()`.
+ */
+ fprintf(fp, "--cfg=%s%s\n", CONFIG_, sym->name);
+ break;
+ case S_HEX:
+ if (val[0] != '0' || (val[1] != 'x' && val[1] != 'X'))
+ val_prefix = "0x";
+ break;
+ default:
+ break;
+ }
+
+ if (strlen(val_prefix) > 0) {
+ val_prefixed_len = strlen(val) + strlen(val_prefix) + 1;
+ val_prefixed = xmalloc(val_prefixed_len);
+ snprintf(val_prefixed, val_prefixed_len, "%s%s", val_prefix, val);
+ val = val_prefixed;
+ }
+
+ /* All values get escaped: the `--cfg` option only takes strings */
+ escaped = escape_string_value(val);
+ val = escaped;
+
+ fprintf(fp, "--cfg=%s%s=%s\n", CONFIG_, sym->name, val);
+
+ free(escaped);
+ free(val_prefixed);
+}
+
/*
* Write out a minimal config.
* All values that has default values are skipped as this is redundant.
@@ -1132,6 +1201,12 @@ int conf_write_autoconf(int overwrite)
if (ret)
return ret;
+ ret = __conf_write_autoconf(conf_get_rustccfg_name(),
+ print_symbol_for_rustccfg,
+ NULL);
+ if (ret)
+ return ret;
+
/*
* Create include/config/auto.conf. This must be the last step because
* Kbuild has a dependency on auto.conf and this marks the successful
diff --git a/scripts/min-tool-version.sh b/scripts/min-tool-version.sh
index 250925aab101..b6593eac5003 100755
--- a/scripts/min-tool-version.sh
+++ b/scripts/min-tool-version.sh
@@ -30,6 +30,12 @@ llvm)
echo 11.0.0
fi
;;
+rustc)
+ echo 1.62.0
+ ;;
+bindgen)
+ echo 0.56.0
+ ;;
*)
echo "$1: unknown tool" >&2
exit 1
diff --git a/scripts/rust-is-available-bindgen-libclang.h b/scripts/rust-is-available-bindgen-libclang.h
new file mode 100644
index 000000000000..0ef6db10d674
--- /dev/null
+++ b/scripts/rust-is-available-bindgen-libclang.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#pragma message("clang version " __clang_version__)
diff --git a/scripts/rust-is-available.sh b/scripts/rust-is-available.sh
new file mode 100755
index 000000000000..0ddad3fcd32c
--- /dev/null
+++ b/scripts/rust-is-available.sh
@@ -0,0 +1,160 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+#
+# Tests whether a suitable Rust toolchain is available.
+#
+# Pass `-v` for human output and more checks (as warnings).
+
+set -e
+
+min_tool_version=$(dirname $0)/min-tool-version.sh
+
+# Convert the version string x.y.z to a canonical up-to-7-digits form.
+#
+# Note that this function uses one more digit (compared to other
+# instances in other version scripts) to give a bit more space to
+# `rustc` since it will reach 1.100.0 in late 2026.
+get_canonical_version()
+{
+ IFS=.
+ set -- $1
+ echo $((100000 * $1 + 100 * $2 + $3))
+}
+
+# Check that the Rust compiler exists.
+if ! command -v "$RUSTC" >/dev/null; then
+ if [ "$1" = -v ]; then
+ echo >&2 "***"
+ echo >&2 "*** Rust compiler '$RUSTC' could not be found."
+ echo >&2 "***"
+ fi
+ exit 1
+fi
+
+# Check that the Rust bindings generator exists.
+if ! command -v "$BINDGEN" >/dev/null; then
+ if [ "$1" = -v ]; then
+ echo >&2 "***"
+ echo >&2 "*** Rust bindings generator '$BINDGEN' could not be found."
+ echo >&2 "***"
+ fi
+ exit 1
+fi
+
+# Check that the Rust compiler version is suitable.
+#
+# Non-stable and distributions' versions may have a version suffix, e.g. `-dev`.
+rust_compiler_version=$( \
+ LC_ALL=C "$RUSTC" --version 2>/dev/null \
+ | head -n 1 \
+ | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' \
+)
+rust_compiler_min_version=$($min_tool_version rustc)
+rust_compiler_cversion=$(get_canonical_version $rust_compiler_version)
+rust_compiler_min_cversion=$(get_canonical_version $rust_compiler_min_version)
+if [ "$rust_compiler_cversion" -lt "$rust_compiler_min_cversion" ]; then
+ if [ "$1" = -v ]; then
+ echo >&2 "***"
+ echo >&2 "*** Rust compiler '$RUSTC' is too old."
+ echo >&2 "*** Your version: $rust_compiler_version"
+ echo >&2 "*** Minimum version: $rust_compiler_min_version"
+ echo >&2 "***"
+ fi
+ exit 1
+fi
+if [ "$1" = -v ] && [ "$rust_compiler_cversion" -gt "$rust_compiler_min_cversion" ]; then
+ echo >&2 "***"
+ echo >&2 "*** Rust compiler '$RUSTC' is too new. This may or may not work."
+ echo >&2 "*** Your version: $rust_compiler_version"
+ echo >&2 "*** Expected version: $rust_compiler_min_version"
+ echo >&2 "***"
+fi
+
+# Check that the Rust bindings generator is suitable.
+#
+# Non-stable and distributions' versions may have a version suffix, e.g. `-dev`.
+rust_bindings_generator_version=$( \
+ LC_ALL=C "$BINDGEN" --version 2>/dev/null \
+ | head -n 1 \
+ | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' \
+)
+rust_bindings_generator_min_version=$($min_tool_version bindgen)
+rust_bindings_generator_cversion=$(get_canonical_version $rust_bindings_generator_version)
+rust_bindings_generator_min_cversion=$(get_canonical_version $rust_bindings_generator_min_version)
+if [ "$rust_bindings_generator_cversion" -lt "$rust_bindings_generator_min_cversion" ]; then
+ if [ "$1" = -v ]; then
+ echo >&2 "***"
+ echo >&2 "*** Rust bindings generator '$BINDGEN' is too old."
+ echo >&2 "*** Your version: $rust_bindings_generator_version"
+ echo >&2 "*** Minimum version: $rust_bindings_generator_min_version"
+ echo >&2 "***"
+ fi
+ exit 1
+fi
+if [ "$1" = -v ] && [ "$rust_bindings_generator_cversion" -gt "$rust_bindings_generator_min_cversion" ]; then
+ echo >&2 "***"
+ echo >&2 "*** Rust bindings generator '$BINDGEN' is too new. This may or may not work."
+ echo >&2 "*** Your version: $rust_bindings_generator_version"
+ echo >&2 "*** Expected version: $rust_bindings_generator_min_version"
+ echo >&2 "***"
+fi
+
+# Check that the `libclang` used by the Rust bindings generator is suitable.
+bindgen_libclang_version=$( \
+ LC_ALL=C "$BINDGEN" $(dirname $0)/rust-is-available-bindgen-libclang.h 2>&1 >/dev/null \
+ | grep -F 'clang version ' \
+ | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' \
+ | head -n 1 \
+)
+bindgen_libclang_min_version=$($min_tool_version llvm)
+bindgen_libclang_cversion=$(get_canonical_version $bindgen_libclang_version)
+bindgen_libclang_min_cversion=$(get_canonical_version $bindgen_libclang_min_version)
+if [ "$bindgen_libclang_cversion" -lt "$bindgen_libclang_min_cversion" ]; then
+ if [ "$1" = -v ]; then
+ echo >&2 "***"
+ echo >&2 "*** libclang (used by the Rust bindings generator '$BINDGEN') is too old."
+ echo >&2 "*** Your version: $bindgen_libclang_version"
+ echo >&2 "*** Minimum version: $bindgen_libclang_min_version"
+ echo >&2 "***"
+ fi
+ exit 1
+fi
+
+# If the C compiler is Clang, then we can also check whether its version
+# matches the `libclang` version used by the Rust bindings generator.
+#
+# In the future, we might be able to perform a full version check, see
+# https://github.com/rust-lang/rust-bindgen/issues/2138.
+if [ "$1" = -v ]; then
+ cc_name=$($(dirname $0)/cc-version.sh "$CC" | cut -f1 -d' ')
+ if [ "$cc_name" = Clang ]; then
+ clang_version=$( \
+ LC_ALL=C "$CC" --version 2>/dev/null \
+ | sed -nE '1s:.*version ([0-9]+\.[0-9]+\.[0-9]+).*:\1:p'
+ )
+ if [ "$clang_version" != "$bindgen_libclang_version" ]; then
+ echo >&2 "***"
+ echo >&2 "*** libclang (used by the Rust bindings generator '$BINDGEN')"
+ echo >&2 "*** version does not match Clang's. This may be a problem."
+ echo >&2 "*** libclang version: $bindgen_libclang_version"
+ echo >&2 "*** Clang version: $clang_version"
+ echo >&2 "***"
+ fi
+ fi
+fi
+
+# Check that the source code for the `core` standard library exists.
+#
+# `$KRUSTFLAGS` is passed in case the user added `--sysroot`.
+rustc_sysroot=$("$RUSTC" $KRUSTFLAGS --print sysroot)
+rustc_src=${RUST_LIB_SRC:-"$rustc_sysroot/lib/rustlib/src/rust/library"}
+rustc_src_core="$rustc_src/core/src/lib.rs"
+if [ ! -e "$rustc_src_core" ]; then
+ if [ "$1" = -v ]; then
+ echo >&2 "***"
+ echo >&2 "*** Source code for the 'core' standard library could not be found"
+ echo >&2 "*** at '$rustc_src_core'."
+ echo >&2 "***"
+ fi
+ exit 1
+fi
diff --git a/scripts/rustdoc_test_builder.py b/scripts/rustdoc_test_builder.py
new file mode 100755
index 000000000000..d9b47a5c54fc
--- /dev/null
+++ b/scripts/rustdoc_test_builder.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+"""rustdoc_test_builder - Test builder for `rustdoc`-generated tests.
+"""
+
+import json
+import pathlib
+import re
+import sys
+
+RUST_DIR = pathlib.Path("rust")
+TESTS_DIR = RUST_DIR / "test" / "doctests" / "kernel"
+
+# `[^\s]*` removes the prefix (e.g. `_doctest_main_`) plus any
+# leading path (for `O=` builds).
+MAIN_RE = re.compile(
+ r"^"
+ r"fn main\(\) { "
+ r"#\[allow\(non_snake_case\)\] "
+ r"fn ([^\s]*rust_kernel_([a-zA-Z0-9_]+))\(\) {"
+ r"$"
+)
+
+def main():
+ found_main = False
+ test_header = ""
+ test_body = ""
+ for line in sys.stdin.readlines():
+ main_match = MAIN_RE.match(line)
+ if main_match:
+ if found_main:
+ raise Exception("More than one `main` line found.")
+ found_main = True
+ function_name = main_match.group(1)
+ test_name = f"rust_kernel_doctest_{main_match.group(2)}"
+ continue
+
+ if found_main:
+ test_body += line
+ else:
+ test_header += line
+
+ if not found_main:
+ raise Exception("No `main` line found.")
+
+ call_line = f"}} {function_name}() }}"
+ if not test_body.endswith(call_line):
+ raise Exception("Unexpected end of test body.")
+ test_body = test_body[:-len(call_line)]
+
+ with open(TESTS_DIR / f"{test_name}.json", "w") as fd:
+ json.dump({
+ "name": test_name,
+ "header": test_header,
+ "body": test_body,
+ }, fd, sort_keys=True, indent=4)
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/rustdoc_test_gen.py b/scripts/rustdoc_test_gen.py
new file mode 100755
index 000000000000..ad9a94293ab5
--- /dev/null
+++ b/scripts/rustdoc_test_gen.py
@@ -0,0 +1,164 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+"""rustdoc_test_gen - Generates KUnit tests from saved `rustdoc`-generated tests.
+"""
+
+import json
+import os
+import pathlib
+
+RUST_DIR = pathlib.Path("rust")
+TESTS_DIR = RUST_DIR / "test" / "doctests" / "kernel"
+
+RUST_FILE = RUST_DIR / "doctests_kernel_generated.rs"
+C_FILE = RUST_DIR / "doctests_kernel_generated_kunit.c"
+
+RUST_TEMPLATE_TEST = """
+/// Generated `{test_name}` KUnit test case from a Rust documentation test.
+#[no_mangle]
+pub fn {test_name}(__kunit_test: *mut kernel::bindings::kunit) {{
+ /// Provides mutual exclusion (see `# Implementation` notes).
+ static __KUNIT_TEST_MUTEX: kernel::sync::smutex::Mutex<()> =
+ kernel::sync::smutex::Mutex::new(());
+
+ /// Saved argument (see `# Implementation` notes).
+ static __KUNIT_TEST: core::sync::atomic::AtomicPtr<kernel::bindings::kunit> =
+ core::sync::atomic::AtomicPtr::new(core::ptr::null_mut());
+
+ let __kunit_test_mutex_guard = __KUNIT_TEST_MUTEX.lock();
+ __KUNIT_TEST.store(__kunit_test, core::sync::atomic::Ordering::SeqCst);
+
+ /// Overrides the usual [`assert!`] macro with one that calls KUnit instead.
+ macro_rules! assert {{
+ ($cond:expr $(,)?) => {{{{
+ kernel::kunit_assert!(
+ __KUNIT_TEST.load(core::sync::atomic::Ordering::SeqCst),
+ $cond
+ );
+ }}}}
+ }}
+
+ /// Overrides the usual [`assert_eq!`] macro with one that calls KUnit instead.
+ macro_rules! assert_eq {{
+ ($left:expr, $right:expr $(,)?) => {{{{
+ kernel::kunit_assert_eq!(
+ __KUNIT_TEST.load(core::sync::atomic::Ordering::SeqCst),
+ $left,
+ $right
+ );
+ }}}}
+ }}
+
+ // Many tests need the prelude, so provide it by default.
+ use kernel::prelude::*;
+
+ {test_body}
+}}
+"""
+RUST_TEMPLATE = """// SPDX-License-Identifier: GPL-2.0
+
+//! `kernel` crate documentation tests.
+
+// # Implementation
+//
+// KUnit gives us a context in the form of the `kunit_test` parameter that one
+// needs to pass back to other KUnit functions and macros.
+//
+// However, we want to keep this as an implementation detail because:
+//
+// - Test code should not care about the implementation.
+//
+// - Documentation looks worse if it needs to carry extra details unrelated
+// to the piece being described.
+//
+// - Test code should be able to define functions and call them, without
+// having to carry the context (since functions cannot capture dynamic
+// environment).
+//
+// - Later on, we may want to be able to test non-kernel code (e.g. `core`,
+// `alloc` or external crates) which likely use the standard library
+// `assert*!` macros.
+//
+// For this reason, `static`s are used in the generated code to save the
+// argument which then gets read by the asserting macros. These macros then
+// call back into KUnit, instead of panicking.
+//
+// To avoid depending on whether KUnit allows to run tests concurrently and/or
+// reentrantly, we ensure mutual exclusion on our end. To ensure a single test
+// being killed does not trigger failure of every other test (timing out),
+// we provide different `static`s per test (which also allow for concurrent
+// execution, though KUnit runs them sequentially).
+//
+// Furthermore, since test code may create threads and assert from them, we use
+// an `AtomicPtr` to hold the context (though each test only writes once before
+// threads may be created).
+
+{rust_header}
+
+const __LOG_PREFIX: &[u8] = b"rust_kernel_doctests\\0";
+
+{rust_tests}
+"""
+
+C_TEMPLATE_TEST_DECLARATION = "void {test_name}(struct kunit *);\n"
+C_TEMPLATE_TEST_CASE = " KUNIT_CASE({test_name}),\n"
+C_TEMPLATE = """// SPDX-License-Identifier: GPL-2.0
+/*
+ * `kernel` crate documentation tests.
+ */
+
+#include <kunit/test.h>
+
+{c_test_declarations}
+
+static struct kunit_case test_cases[] = {{
+ {c_test_cases}
+ {{ }}
+}};
+
+static struct kunit_suite test_suite = {{
+ .name = "rust_kernel_doctests",
+ .test_cases = test_cases,
+}};
+
+kunit_test_suite(test_suite);
+
+MODULE_LICENSE("GPL");
+"""
+
+def main():
+ rust_header = set()
+ rust_tests = ""
+ c_test_declarations = ""
+ c_test_cases = ""
+ for filename in sorted(os.listdir(TESTS_DIR)):
+ with open(TESTS_DIR / filename, "r") as fd:
+ test = json.load(fd)
+ for line in test["header"].strip().split("\n"):
+ rust_header.add(line)
+ rust_tests += RUST_TEMPLATE_TEST.format(
+ test_name = test["name"],
+ test_body = test["body"]
+ )
+ c_test_declarations += C_TEMPLATE_TEST_DECLARATION.format(
+ test_name = test["name"]
+ )
+ c_test_cases += C_TEMPLATE_TEST_CASE.format(
+ test_name = test["name"]
+ )
+ rust_header = sorted(rust_header)
+
+ with open(RUST_FILE, "w") as fd:
+ fd.write(RUST_TEMPLATE.format(
+ rust_header = "\n".join(rust_header).strip(),
+ rust_tests = rust_tests.strip(),
+ ))
+
+ with open(C_FILE, "w") as fd:
+ fd.write(C_TEMPLATE.format(
+ c_test_declarations=c_test_declarations.strip(),
+ c_test_cases=c_test_cases.strip(),
+ ))
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/include/linux/kallsyms.h b/tools/include/linux/kallsyms.h
index efb6c3f5f2a9..5a37ccbec54f 100644
--- a/tools/include/linux/kallsyms.h
+++ b/tools/include/linux/kallsyms.h
@@ -6,7 +6,7 @@
#include <stdio.h>
#include <unistd.h>
-#define KSYM_NAME_LEN 128
+#define KSYM_NAME_LEN 512
struct module;
diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
index 556bb06798f2..3ac4874082f8 100644
--- a/tools/lib/perf/include/perf/event.h
+++ b/tools/lib/perf/include/perf/event.h
@@ -95,7 +95,7 @@ struct perf_record_throttle {
};
#ifndef KSYM_NAME_LEN
-#define KSYM_NAME_LEN 256
+#define KSYM_NAME_LEN 512
#endif
struct perf_record_ksymbol {
diff --git a/tools/lib/symbol/kallsyms.h b/tools/lib/symbol/kallsyms.h
index 72ab9870454b..542f9b059c3b 100644
--- a/tools/lib/symbol/kallsyms.h
+++ b/tools/lib/symbol/kallsyms.h
@@ -7,7 +7,7 @@
#include <linux/types.h>
#ifndef KSYM_NAME_LEN
-#define KSYM_NAME_LEN 256
+#define KSYM_NAME_LEN 512
#endif
static inline u8 kallsyms2elf_binding(char type)