data_type large_stringclasses 3 values | source large_stringclasses 29 values | code large_stringlengths 98 49.4M | filepath large_stringlengths 5 161 ⌀ | message large_stringclasses 234 values | commit large_stringclasses 234 values | subject large_stringclasses 418 values | critique large_stringlengths 101 1.26M ⌀ | metadata dict |
|---|---|---|---|---|---|---|---|---|
lkml_critique | lkml | The latest MC firmware version added a new command to retrieve all DPMAC
counters in a single firmware call. Use this new command, when possible,
in dpaa2-mac as well.
In order to use the dpmac_get_statistics() API, two DMA memory areas are
used: one to transmit what counters the driver is requesting and one to
receive the values of those counters. These memory areas are allocated
and DMA mapped at probe time so that we don't waste time at runtime.
And since we are planning to add rmon, eth-ctrl and other standard
statistics using the same infrastructure, make the setup and cleanup
processes as generic as possibile through the dpaa2_mac_setup_stats()
and dpaa2_mac_clear_stats() functions.
Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
---
.../net/ethernet/freescale/dpaa2/dpaa2-mac.c | 195 ++++++++++++++----
.../net/ethernet/freescale/dpaa2/dpaa2-mac.h | 10 +-
2 files changed, 166 insertions(+), 39 deletions(-)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 422ce13a7c94..63dc597dbd7c 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-/* Copyright 2019 NXP */
+/* Copyright 2019, 2024-2026 NXP */
#include <linux/acpi.h>
#include <linux/pcs-lynx.h>
@@ -15,7 +15,121 @@
#define DPMAC_PROTOCOL_CHANGE_VER_MAJOR 4
#define DPMAC_PROTOCOL_CHANGE_VER_MINOR 8
+#define DPMAC_STATS_BUNDLE_VER_MAJOR 4
+#define DPMAC_STATS_BUNDLE_VER_MINOR 10
+
#define DPAA2_MAC_FEATURE_PROTOCOL_CHANGE BIT(0)
+#define DPAA2_MAC_FEATURE_STATS_BUNDLE BIT(1)
+
+struct dpmac_counter {
+ enum dpmac_counter_id id;
+ const char *name;
+};
+
+#define DPMAC_UNSTRUCTURED_COUNTER(counter_id, counter_name) \
+ { \
+ .id = counter_id, \
+ .name = counter_name, \
+ }
+
+static const struct dpmac_counter dpaa2_mac_ethtool_stats[] = {
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_ALL_FRAME, "[mac] rx all frames"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_GOOD_FRAME, "[mac] rx frames ok"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_ERR_FRAME, "[mac] rx frame errors"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_FRAME_DISCARD, "[mac] rx frame discards"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_UCAST_FRAME, "[mac] rx u-cast"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_BCAST_FRAME, "[mac] rx b-cast"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_MCAST_FRAME, "[mac] rx m-cast"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_FRAME_64, "[mac] rx 64 bytes"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_FRAME_127, "[mac] rx 65-127 bytes"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_FRAME_255, "[mac] rx 128-255 bytes"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_FRAME_511, "[mac] rx 256-511 bytes"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_FRAME_1023, "[mac] rx 512-1023 bytes"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_FRAME_1518, "[mac] rx 1024-1518 bytes"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_FRAME_1519_MAX, "[mac] rx 1519-max bytes"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_FRAG, "[mac] rx frags"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_JABBER, "[mac] rx jabber"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_ALIGN_ERR, "[mac] rx align errors"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_OVERSIZED, "[mac] rx oversized"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_VALID_PAUSE_FRAME, "[mac] rx pause"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_ING_BYTE, "[mac] rx bytes"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_EGR_GOOD_FRAME, "[mac] tx frames ok"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_EGR_UCAST_FRAME, "[mac] tx u-cast"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_EGR_MCAST_FRAME, "[mac] tx m-cast"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_EGR_BCAST_FRAME, "[mac] tx b-cast"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_EGR_ERR_FRAME, "[mac] tx frame errors"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_EGR_UNDERSIZED, "[mac] tx undersized"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_EGR_VALID_PAUSE_FRAME, "[mac] tx b-pause"),
+ DPMAC_UNSTRUCTURED_COUNTER(DPMAC_CNT_EGR_BYTE, "[mac] tx bytes"),
+};
+
+#define DPAA2_MAC_NUM_ETHTOOL_STATS ARRAY_SIZE(dpaa2_mac_ethtool_stats)
+
+static void dpaa2_mac_setup_stats(struct dpaa2_mac *mac, struct dpaa2_mac_stats *stats,
+ size_t num_stats, const struct dpmac_counter *counters)
+{
+ struct device *dev = mac->net_dev->dev.parent;
+ u32 *cnt_idx;
+
+ stats->idx_dma_mem = kcalloc(num_stats, sizeof(u32), GFP_KERNEL);
+ if (!stats->idx_dma_mem)
+ goto out;
+
+ stats->values_dma_mem = kcalloc(num_stats, sizeof(u64), GFP_KERNEL);
+ if (!stats->values_dma_mem)
+ goto err_alloc_values;
+
+ cnt_idx = stats->idx_dma_mem;
+ for (size_t i = 0; i < num_stats; i++)
+ *cnt_idx++ = cpu_to_le32((u32)(counters[i].id));
+
+ stats->idx_iova = dma_map_single(dev, stats->idx_dma_mem,
+ num_stats * sizeof(u32),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, stats->idx_iova))
+ goto err_dma_map_idx;
+
+ stats->values_iova = dma_map_single(dev, stats->values_dma_mem,
+ num_stats * sizeof(u64),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, stats->values_iova))
+ goto err_dma_map_values;
+
+ return;
+
+err_dma_map_values:
+ dma_unmap_single(dev, stats->idx_iova, num_stats * sizeof(u32),
+ DMA_TO_DEVICE);
+err_dma_map_idx:
+ kfree(stats->values_dma_mem);
+err_alloc_values:
+ kfree(stats->idx_dma_mem);
+out:
+ stats->idx_dma_mem = NULL;
+ stats->values_dma_mem = NULL;
+}
+
+static void dpaa2_mac_clear_stats(struct dpaa2_mac *mac, struct dpaa2_mac_stats *stats,
+ size_t num_stats)
+{
+ struct device *dev = mac->net_dev->dev.parent;
+
+ if (stats->idx_dma_mem) {
+ dma_unmap_single(dev, stats->idx_iova,
+ num_stats * sizeof(u32),
+ DMA_TO_DEVICE);
+ kfree(stats->idx_dma_mem);
+ stats->idx_dma_mem = NULL;
+ }
+
+ if (stats->values_dma_mem) {
+ dma_unmap_single(dev, stats->values_iova,
+ num_stats * sizeof(u64),
+ DMA_FROM_DEVICE);
+ kfree(stats->values_dma_mem);
+ stats->values_dma_mem = NULL;
+ }
+}
static int dpaa2_mac_cmp_ver(struct dpaa2_mac *mac,
u16 ver_major, u16 ver_minor)
@@ -32,6 +146,10 @@ static void dpaa2_mac_detect_features(struct dpaa2_mac *mac)
if (dpaa2_mac_cmp_ver(mac, DPMAC_PROTOCOL_CHANGE_VER_MAJOR,
DPMAC_PROTOCOL_CHANGE_VER_MINOR) >= 0)
mac->features |= DPAA2_MAC_FEATURE_PROTOCOL_CHANGE;
+
+ if (dpaa2_mac_cmp_ver(mac, DPMAC_STATS_BUNDLE_VER_MAJOR,
+ DPMAC_STATS_BUNDLE_VER_MINOR) >= 0)
+ mac->features |= DPAA2_MAC_FEATURE_STATS_BUNDLE;
}
static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
@@ -504,6 +622,10 @@ int dpaa2_mac_open(struct dpaa2_mac *mac)
mac->fw_node = fw_node;
net_dev->dev.of_node = to_of_node(mac->fw_node);
+ if (mac->features & DPAA2_MAC_FEATURE_STATS_BUNDLE)
+ dpaa2_mac_setup_stats(mac, &mac->ethtool_stats,
+ DPAA2_MAC_NUM_ETHTOOL_STATS, dpaa2_mac_ethtool_stats);
+
return 0;
err_close_dpmac:
@@ -515,64 +637,61 @@ void dpaa2_mac_close(struct dpaa2_mac *mac)
{
struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+ if (mac->features & DPAA2_MAC_FEATURE_STATS_BUNDLE)
+ dpaa2_mac_clear_stats(mac, &mac->ethtool_stats, DPAA2_MAC_NUM_ETHTOOL_STATS);
+
dpmac_close(mac->mc_io, 0, dpmac_dev->mc_handle);
if (mac->fw_node)
fwnode_handle_put(mac->fw_node);
}
-static char dpaa2_mac_ethtool_stats[][ETH_GSTRING_LEN] = {
- [DPMAC_CNT_ING_ALL_FRAME] = "[mac] rx all frames",
- [DPMAC_CNT_ING_GOOD_FRAME] = "[mac] rx frames ok",
- [DPMAC_CNT_ING_ERR_FRAME] = "[mac] rx frame errors",
- [DPMAC_CNT_ING_FRAME_DISCARD] = "[mac] rx frame discards",
- [DPMAC_CNT_ING_UCAST_FRAME] = "[mac] rx u-cast",
- [DPMAC_CNT_ING_BCAST_FRAME] = "[mac] rx b-cast",
- [DPMAC_CNT_ING_MCAST_FRAME] = "[mac] rx m-cast",
- [DPMAC_CNT_ING_FRAME_64] = "[mac] rx 64 bytes",
- [DPMAC_CNT_ING_FRAME_127] = "[mac] rx 65-127 bytes",
- [DPMAC_CNT_ING_FRAME_255] = "[mac] rx 128-255 bytes",
- [DPMAC_CNT_ING_FRAME_511] = "[mac] rx 256-511 bytes",
- [DPMAC_CNT_ING_FRAME_1023] = "[mac] rx 512-1023 bytes",
- [DPMAC_CNT_ING_FRAME_1518] = "[mac] rx 1024-1518 bytes",
- [DPMAC_CNT_ING_FRAME_1519_MAX] = "[mac] rx 1519-max bytes",
- [DPMAC_CNT_ING_FRAG] = "[mac] rx frags",
- [DPMAC_CNT_ING_JABBER] = "[mac] rx jabber",
- [DPMAC_CNT_ING_ALIGN_ERR] = "[mac] rx align errors",
- [DPMAC_CNT_ING_OVERSIZED] = "[mac] rx oversized",
- [DPMAC_CNT_ING_VALID_PAUSE_FRAME] = "[mac] rx pause",
- [DPMAC_CNT_ING_BYTE] = "[mac] rx bytes",
- [DPMAC_CNT_EGR_GOOD_FRAME] = "[mac] tx frames ok",
- [DPMAC_CNT_EGR_UCAST_FRAME] = "[mac] tx u-cast",
- [DPMAC_CNT_EGR_MCAST_FRAME] = "[mac] tx m-cast",
- [DPMAC_CNT_EGR_BCAST_FRAME] = "[mac] tx b-cast",
- [DPMAC_CNT_EGR_ERR_FRAME] = "[mac] tx frame errors",
- [DPMAC_CNT_EGR_UNDERSIZED] = "[mac] tx undersized",
- [DPMAC_CNT_EGR_VALID_PAUSE_FRAME] = "[mac] tx b-pause",
- [DPMAC_CNT_EGR_BYTE] = "[mac] tx bytes",
-};
-
-#define DPAA2_MAC_NUM_STATS ARRAY_SIZE(dpaa2_mac_ethtool_stats)
-
int dpaa2_mac_get_sset_count(void)
{
- return DPAA2_MAC_NUM_STATS;
+ return DPAA2_MAC_NUM_ETHTOOL_STATS;
}
void dpaa2_mac_get_strings(u8 **data)
{
int i;
- for (i = 0; i < DPAA2_MAC_NUM_STATS; i++)
- ethtool_puts(data, dpaa2_mac_ethtool_stats[i]);
+ for (i = 0; i < DPAA2_MAC_NUM_ETHTOOL_STATS; i++)
+ ethtool_puts(data, dpaa2_mac_ethtool_stats[i].name);
}
void dpaa2_mac_get_ethtool_stats(struct dpaa2_mac *mac, u64 *data)
{
+ struct device *dev = mac->net_dev->dev.parent;
struct fsl_mc_device *dpmac_dev = mac->mc_dev;
+ u64 *cnt_values;
int i, err;
u64 value;
- for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) {
+ if (!(mac->features & DPAA2_MAC_FEATURE_STATS_BUNDLE))
+ goto fallback;
+
+ if (!mac->ethtool_stats.idx_dma_mem || !mac->ethtool_stats.values_dma_mem)
+ goto fallback;
+
+ err = dpmac_get_statistics(mac->mc_io, 0, dpmac_dev->mc_handle,
+ mac->ethtool_stats.idx_iova, mac->ethtool_stats.values_iova,
+ DPAA2_MAC_NUM_ETHTOOL_STATS);
+ if (err)
+ goto fallback;
+
+ dma_sync_single_for_cpu(dev, mac->ethtool_stats.values_iova,
+ DPAA2_MAC_NUM_ETHTOOL_STATS * sizeof(u64),
+ DMA_FROM_DEVICE);
+
+ cnt_values = mac->ethtool_stats.values_dma_mem;
+ for (i = 0; i < DPAA2_MAC_NUM_ETHTOOL_STATS; i++)
+ *(data + i) = le64_to_cpu(*cnt_values++);
+
+ return;
+
+fallback:
+
+ /* Fallback and retrieve each counter one by one */
+ for (i = 0; i < DPAA2_MAC_NUM_ETHTOOL_STATS; i++) {
err = dpmac_get_counter(mac->mc_io, 0, dpmac_dev->mc_handle,
i, &value);
if (err) {
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 53f8d106d11e..386286209606 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
-/* Copyright 2019 NXP */
+/* Copyright 2019, 2024-2026 NXP */
#ifndef DPAA2_MAC_H
#define DPAA2_MAC_H
@@ -11,6 +11,12 @@
#include "dpmac.h"
#include "dpmac-cmd.h"
+struct dpaa2_mac_stats {
+ u32 *idx_dma_mem;
+ u64 *values_dma_mem;
+ dma_addr_t idx_iova, values_iova;
+};
+
struct dpaa2_mac {
struct fsl_mc_device *mc_dev;
struct dpmac_link_state state;
@@ -28,6 +34,8 @@ struct dpaa2_mac {
struct fwnode_handle *fw_node;
struct phy *serdes_phy;
+
+ struct dpaa2_mac_stats ethtool_stats;
};
static inline bool dpaa2_mac_is_type_phy(struct dpaa2_mac *mac)
--
2.25.1
| null | null | null | [PATCH net-next 2/5] net: dpaa2-mac: retrieve MAC statistics in one firmware command | Jakub Kicinski <kuba@kernel.org> writes:
I think the expectation is that by default, tests written in Bash are
run on one machine without remotes.
I think this fundamentally stems from the fact that running processes in
Python is a bit unwieldy, so it makes sense to have helpers, so
everybody uses them, so you can have helpers grow brains to do things
like over-the-ssh configuration. In Bash, running a traffic generator is
easier than working with arrays. So the helpers tend not to be as useful
and we don't generally have them. At least not in any consistent way.
Eyeing the file, the requirement for the remote interface to be up and
configured with an IP address is a bit surprising to me. I would think a
down state is the most natural, and have the test bring it up and
configure it in a way that it needs. I'm thinking maybe this is to allow
testing a sole interface on like an embedded device?
Anyway, that's a fairly strong differency between how Bash tests are
typically written and the NIC setup. I think basically all existing
tests assume the devices are theirs to tamper with.
In principle nothing prevents lib.sh from growing brains to support
these remote shenanigans. I think it's just that so far nobody cared
enough to actually do it.
I think that a helper that in effect does "run this on a machine where
$swp1 is" is mostly what is needed. That and "make sure $swp1 and $swp2
are on the same machine". It's going to be annoying to work with though,
because you need to annotate every single command. I bet there's a nice
syntax to make it not activelly annoying.
If we have this, it might make sense to require tests to make use of it.
(With an explicit opt-out for special cases.) But I do not want every
test to have to reinvent this wheel and cargo-cult snippets from other
tests.
BTW, my guess is that even many multi-port tests that I wrote boil down
to just a bunch of fairly independent loopbacks whose far ends could be
on remote machines. It's not a priori nonsense to me that one would run
a test like this, or whatever magic we'd use:
./test.sh ssh://petr@10.1.2.3:eth1 swp1 veth1 ns://foo:veth2
And it just works, because only swp1 and swp2 need to be bridged, the
rest can be remote, and the traffic generation helper knows that to pump
traffic to ssh://10.1.2.3:eth1, obviously you need to ssh there first.
But the library would need to have helpers for this, and the tests would
need to use them.
At least ethtool counters would cause problems obviously. | {
"author": "Petr Machata <petrm@nvidia.com>",
"date": "Fri, 27 Feb 2026 14:53:06 +0100",
"is_openbsd": false,
"thread_id": "87ms0ufc0p.fsf@nvidia.com.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | In preparation for converting simple_xattrs from rbtree to rhashtable,
add rhash_head and rcu_head members to struct simple_xattr. The
rhashtable implementation will use rhash_head for hash table linkage
and RCU-based lockless reads, requiring that replaced or removed xattr
entries be freed via call_rcu() rather than immediately.
Add simple_xattr_free_rcu() which schedules RCU-deferred freeing of an
xattr entry. This will be used by callers of simple_xattr_set() once
they switch to the rhashtable-based xattr store.
No functional changes.
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
fs/xattr.c | 23 +++++++++++++++++++++++
include/linux/xattr.h | 4 ++++
2 files changed, 27 insertions(+)
diff --git a/fs/xattr.c b/fs/xattr.c
index 3e49e612e1ba..9cbb1917bcb2 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -1197,6 +1197,29 @@ void simple_xattr_free(struct simple_xattr *xattr)
kvfree(xattr);
}
+static void simple_xattr_rcu_free(struct rcu_head *head)
+{
+ struct simple_xattr *xattr;
+
+ xattr = container_of(head, struct simple_xattr, rcu);
+ simple_xattr_free(xattr);
+}
+
+/**
+ * simple_xattr_free_rcu - free an xattr object after an RCU grace period
+ * @xattr: the xattr object
+ *
+ * Schedule RCU-deferred freeing of an xattr entry. This is used by
+ * rhashtable-based callers of simple_xattr_set() that replace or remove
+ * an existing entry while concurrent RCU readers may still be accessing
+ * it.
+ */
+void simple_xattr_free_rcu(struct simple_xattr *xattr)
+{
+ if (xattr)
+ call_rcu(&xattr->rcu, simple_xattr_rcu_free);
+}
+
/**
* simple_xattr_alloc - allocate new xattr object
* @value: value of the xattr object
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 64e9afe7d647..1328f2bfd2ce 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -16,6 +16,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
+#include <linux/rhashtable-types.h>
#include <linux/user_namespace.h>
#include <uapi/linux/xattr.h>
@@ -112,6 +113,8 @@ struct simple_xattrs {
struct simple_xattr {
struct rb_node rb_node;
+ struct rhash_head hash_node;
+ struct rcu_head rcu;
char *name;
size_t size;
char value[];
@@ -122,6 +125,7 @@ void simple_xattrs_free(struct simple_xattrs *xattrs, size_t *freed_space);
size_t simple_xattr_space(const char *name, size_t size);
struct simple_xattr *simple_xattr_alloc(const void *value, size_t size);
void simple_xattr_free(struct simple_xattr *xattr);
+void simple_xattr_free_rcu(struct simple_xattr *xattr);
int simple_xattr_get(struct simple_xattrs *xattrs, const char *name,
void *buffer, size_t size);
struct simple_xattr *simple_xattr_set(struct simple_xattrs *xattrs,
--
2.47.3 | {
"author": "Christian Brauner <brauner@kernel.org>",
"date": "Mon, 16 Feb 2026 14:31:57 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | Add rhashtable support to the simple_xattr subsystem while keeping the
existing rbtree code fully functional. This allows consumers to be
migrated one at a time without breaking any intermediate build.
struct simple_xattrs gains a dispatch flag and a union holding either
the rbtree (rb_root + rwlock) or rhashtable state:
struct simple_xattrs {
bool use_rhashtable;
union {
struct { struct rb_root rb_root; rwlock_t lock; };
struct rhashtable ht;
};
};
simple_xattrs_init() continues to set up the rbtree path for existing
embedded-struct callers.
Add simple_xattrs_alloc() which dynamically allocates a simple_xattrs
and initializes the rhashtable path. This is the entry point for
consumers switching to pointer-based lazy allocation.
The five core functions (get, set, list, add, free) dispatch based on
the use_rhashtable flag.
Existing callers continue to use the rbtree path unchanged. As each
consumer is converted it will switch to simple_xattrs_alloc() and the
rhashtable path. Once all consumers are converted a follow-up patch
will remove the rbtree code.
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
fs/xattr.c | 439 ++++++++++++++++++++++++++++++++++++++------------
include/linux/xattr.h | 25 ++-
mm/shmem.c | 2 +-
3 files changed, 357 insertions(+), 109 deletions(-)
diff --git a/fs/xattr.c b/fs/xattr.c
index 9cbb1917bcb2..1d98ea459b7b 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -22,6 +22,7 @@
#include <linux/audit.h>
#include <linux/vmalloc.h>
#include <linux/posix_acl_xattr.h>
+#include <linux/rhashtable.h>
#include <linux/uaccess.h>
@@ -1228,22 +1229,25 @@ void simple_xattr_free_rcu(struct simple_xattr *xattr)
* Allocate a new xattr object and initialize respective members. The caller is
* responsible for handling the name of the xattr.
*
- * Return: On success a new xattr object is returned. On failure NULL is
- * returned.
+ * Return: New xattr object on success, NULL if @value is NULL, ERR_PTR on
+ * failure.
*/
struct simple_xattr *simple_xattr_alloc(const void *value, size_t size)
{
struct simple_xattr *new_xattr;
size_t len;
+ if (!value)
+ return NULL;
+
/* wrap around? */
len = sizeof(*new_xattr) + size;
if (len < sizeof(*new_xattr))
- return NULL;
+ return ERR_PTR(-ENOMEM);
new_xattr = kvmalloc(len, GFP_KERNEL_ACCOUNT);
if (!new_xattr)
- return NULL;
+ return ERR_PTR(-ENOMEM);
new_xattr->size = size;
memcpy(new_xattr->value, value, size);
@@ -1287,6 +1291,33 @@ static int rbtree_simple_xattr_node_cmp(struct rb_node *new_node,
return rbtree_simple_xattr_cmp(xattr->name, node);
}
+static u32 simple_xattr_hashfn(const void *data, u32 len, u32 seed)
+{
+ const char *name = data;
+ return jhash(name, strlen(name), seed);
+}
+
+static u32 simple_xattr_obj_hashfn(const void *obj, u32 len, u32 seed)
+{
+ const struct simple_xattr *xattr = obj;
+ return jhash(xattr->name, strlen(xattr->name), seed);
+}
+
+static int simple_xattr_obj_cmpfn(struct rhashtable_compare_arg *arg,
+ const void *obj)
+{
+ const struct simple_xattr *xattr = obj;
+ return strcmp(xattr->name, arg->key);
+}
+
+static const struct rhashtable_params simple_xattr_params = {
+ .head_offset = offsetof(struct simple_xattr, hash_node),
+ .hashfn = simple_xattr_hashfn,
+ .obj_hashfn = simple_xattr_obj_hashfn,
+ .obj_cmpfn = simple_xattr_obj_cmpfn,
+ .automatic_shrinking = true,
+};
+
/**
* simple_xattr_get - get an xattr object
* @xattrs: the header of the xattr object
@@ -1306,22 +1337,41 @@ int simple_xattr_get(struct simple_xattrs *xattrs, const char *name,
void *buffer, size_t size)
{
struct simple_xattr *xattr = NULL;
- struct rb_node *rbp;
int ret = -ENODATA;
- read_lock(&xattrs->lock);
- rbp = rb_find(name, &xattrs->rb_root, rbtree_simple_xattr_cmp);
- if (rbp) {
- xattr = rb_entry(rbp, struct simple_xattr, rb_node);
- ret = xattr->size;
- if (buffer) {
- if (size < xattr->size)
- ret = -ERANGE;
- else
- memcpy(buffer, xattr->value, xattr->size);
+ if (xattrs->use_rhashtable) {
+ guard(rcu)();
+ xattr = rhashtable_lookup(&xattrs->ht, name,
+ simple_xattr_params);
+ if (xattr) {
+ ret = xattr->size;
+ if (buffer) {
+ if (size < xattr->size)
+ ret = -ERANGE;
+ else
+ memcpy(buffer, xattr->value,
+ xattr->size);
+ }
+ }
+ } else {
+ struct rb_node *rbp;
+
+ read_lock(&xattrs->lock);
+ rbp = rb_find(name, &xattrs->rb_root,
+ rbtree_simple_xattr_cmp);
+ if (rbp) {
+ xattr = rb_entry(rbp, struct simple_xattr, rb_node);
+ ret = xattr->size;
+ if (buffer) {
+ if (size < xattr->size)
+ ret = -ERANGE;
+ else
+ memcpy(buffer, xattr->value,
+ xattr->size);
+ }
}
+ read_unlock(&xattrs->lock);
}
- read_unlock(&xattrs->lock);
return ret;
}
@@ -1355,78 +1405,134 @@ struct simple_xattr *simple_xattr_set(struct simple_xattrs *xattrs,
const char *name, const void *value,
size_t size, int flags)
{
- struct simple_xattr *old_xattr = NULL, *new_xattr = NULL;
- struct rb_node *parent = NULL, **rbp;
- int err = 0, ret;
+ struct simple_xattr *old_xattr = NULL;
+ int err = 0;
- /* value == NULL means remove */
- if (value) {
- new_xattr = simple_xattr_alloc(value, size);
- if (!new_xattr)
- return ERR_PTR(-ENOMEM);
+ CLASS(simple_xattr, new_xattr)(value, size);
+ if (IS_ERR(new_xattr))
+ return new_xattr;
+ if (new_xattr) {
new_xattr->name = kstrdup(name, GFP_KERNEL_ACCOUNT);
- if (!new_xattr->name) {
- simple_xattr_free(new_xattr);
+ if (!new_xattr->name)
return ERR_PTR(-ENOMEM);
- }
}
- write_lock(&xattrs->lock);
- rbp = &xattrs->rb_root.rb_node;
- while (*rbp) {
- parent = *rbp;
- ret = rbtree_simple_xattr_cmp(name, *rbp);
- if (ret < 0)
- rbp = &(*rbp)->rb_left;
- else if (ret > 0)
- rbp = &(*rbp)->rb_right;
- else
- old_xattr = rb_entry(*rbp, struct simple_xattr, rb_node);
- if (old_xattr)
- break;
- }
+ if (xattrs->use_rhashtable) {
+ /*
+ * Lookup is safe without RCU here since writes are
+ * serialized by the caller.
+ */
+ old_xattr = rhashtable_lookup_fast(&xattrs->ht, name,
+ simple_xattr_params);
+
+ if (old_xattr) {
+ /* Fail if XATTR_CREATE is requested and the xattr exists. */
+ if (flags & XATTR_CREATE)
+ return ERR_PTR(-EEXIST);
+
+ if (new_xattr) {
+ err = rhashtable_replace_fast(&xattrs->ht,
+ &old_xattr->hash_node,
+ &new_xattr->hash_node,
+ simple_xattr_params);
+ if (err)
+ return ERR_PTR(err);
+ } else {
+ err = rhashtable_remove_fast(&xattrs->ht,
+ &old_xattr->hash_node,
+ simple_xattr_params);
+ if (err)
+ return ERR_PTR(err);
+ }
+ } else {
+ /* Fail if XATTR_REPLACE is requested but no xattr is found. */
+ if (flags & XATTR_REPLACE)
+ return ERR_PTR(-ENODATA);
+
+ /*
+ * If XATTR_CREATE or no flags are specified together
+ * with a new value simply insert it.
+ */
+ if (new_xattr) {
+ err = rhashtable_insert_fast(&xattrs->ht,
+ &new_xattr->hash_node,
+ simple_xattr_params);
+ if (err)
+ return ERR_PTR(err);
+ }
- if (old_xattr) {
- /* Fail if XATTR_CREATE is requested and the xattr exists. */
- if (flags & XATTR_CREATE) {
- err = -EEXIST;
- goto out_unlock;
+ /*
+ * If XATTR_CREATE or no flags are specified and
+ * neither an old or new xattr exist then we don't
+ * need to do anything.
+ */
}
-
- if (new_xattr)
- rb_replace_node(&old_xattr->rb_node,
- &new_xattr->rb_node, &xattrs->rb_root);
- else
- rb_erase(&old_xattr->rb_node, &xattrs->rb_root);
} else {
- /* Fail if XATTR_REPLACE is requested but no xattr is found. */
- if (flags & XATTR_REPLACE) {
- err = -ENODATA;
- goto out_unlock;
- }
+ struct rb_node *parent = NULL, **rbp;
+ int ret;
- /*
- * If XATTR_CREATE or no flags are specified together with a
- * new value simply insert it.
- */
- if (new_xattr) {
- rb_link_node(&new_xattr->rb_node, parent, rbp);
- rb_insert_color(&new_xattr->rb_node, &xattrs->rb_root);
+ write_lock(&xattrs->lock);
+ rbp = &xattrs->rb_root.rb_node;
+ while (*rbp) {
+ parent = *rbp;
+ ret = rbtree_simple_xattr_cmp(name, *rbp);
+ if (ret < 0)
+ rbp = &(*rbp)->rb_left;
+ else if (ret > 0)
+ rbp = &(*rbp)->rb_right;
+ else
+ old_xattr = rb_entry(*rbp, struct simple_xattr,
+ rb_node);
+ if (old_xattr)
+ break;
}
- /*
- * If XATTR_CREATE or no flags are specified and neither an
- * old or new xattr exist then we don't need to do anything.
- */
- }
+ if (old_xattr) {
+ /* Fail if XATTR_CREATE is requested and the xattr exists. */
+ if (flags & XATTR_CREATE) {
+ err = -EEXIST;
+ goto out_unlock;
+ }
+
+ if (new_xattr)
+ rb_replace_node(&old_xattr->rb_node,
+ &new_xattr->rb_node,
+ &xattrs->rb_root);
+ else
+ rb_erase(&old_xattr->rb_node,
+ &xattrs->rb_root);
+ } else {
+ /* Fail if XATTR_REPLACE is requested but no xattr is found. */
+ if (flags & XATTR_REPLACE) {
+ err = -ENODATA;
+ goto out_unlock;
+ }
+
+ /*
+ * If XATTR_CREATE or no flags are specified together
+ * with a new value simply insert it.
+ */
+ if (new_xattr) {
+ rb_link_node(&new_xattr->rb_node, parent, rbp);
+ rb_insert_color(&new_xattr->rb_node,
+ &xattrs->rb_root);
+ }
+
+ /*
+ * If XATTR_CREATE or no flags are specified and
+ * neither an old or new xattr exist then we don't
+ * need to do anything.
+ */
+ }
out_unlock:
- write_unlock(&xattrs->lock);
- if (!err)
- return old_xattr;
- simple_xattr_free(new_xattr);
- return ERR_PTR(err);
+ write_unlock(&xattrs->lock);
+ if (err)
+ return ERR_PTR(err);
+ }
+ retain_and_null_ptr(new_xattr);
+ return old_xattr;
}
static bool xattr_is_trusted(const char *name)
@@ -1467,7 +1573,6 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
{
bool trusted = ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
struct simple_xattr *xattr;
- struct rb_node *rbp;
ssize_t remaining_size = size;
int err = 0;
@@ -1487,23 +1592,62 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
remaining_size -= err;
err = 0;
- read_lock(&xattrs->lock);
- for (rbp = rb_first(&xattrs->rb_root); rbp; rbp = rb_next(rbp)) {
- xattr = rb_entry(rbp, struct simple_xattr, rb_node);
+ if (!xattrs)
+ return size - remaining_size;
- /* skip "trusted." attributes for unprivileged callers */
- if (!trusted && xattr_is_trusted(xattr->name))
- continue;
+ if (xattrs->use_rhashtable) {
+ struct rhashtable_iter iter;
- /* skip MAC labels; these are provided by LSM above */
- if (xattr_is_maclabel(xattr->name))
- continue;
+ rhashtable_walk_enter(&xattrs->ht, &iter);
+ rhashtable_walk_start(&iter);
- err = xattr_list_one(&buffer, &remaining_size, xattr->name);
- if (err)
- break;
+ while ((xattr = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(xattr)) {
+ if (PTR_ERR(xattr) == -EAGAIN)
+ continue;
+ err = PTR_ERR(xattr);
+ break;
+ }
+
+ /* skip "trusted." attributes for unprivileged callers */
+ if (!trusted && xattr_is_trusted(xattr->name))
+ continue;
+
+ /* skip MAC labels; these are provided by LSM above */
+ if (xattr_is_maclabel(xattr->name))
+ continue;
+
+ err = xattr_list_one(&buffer, &remaining_size,
+ xattr->name);
+ if (err)
+ break;
+ }
+
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+ } else {
+ struct rb_node *rbp;
+
+ read_lock(&xattrs->lock);
+ for (rbp = rb_first(&xattrs->rb_root); rbp;
+ rbp = rb_next(rbp)) {
+ xattr = rb_entry(rbp, struct simple_xattr, rb_node);
+
+ /* skip "trusted." attributes for unprivileged callers */
+ if (!trusted && xattr_is_trusted(xattr->name))
+ continue;
+
+ /* skip MAC labels; these are provided by LSM above */
+ if (xattr_is_maclabel(xattr->name))
+ continue;
+
+ err = xattr_list_one(&buffer, &remaining_size,
+ xattr->name);
+ if (err)
+ break;
+ }
+ read_unlock(&xattrs->lock);
}
- read_unlock(&xattrs->lock);
return err ? err : size - remaining_size;
}
@@ -1536,9 +1680,16 @@ static bool rbtree_simple_xattr_less(struct rb_node *new_node,
void simple_xattr_add(struct simple_xattrs *xattrs,
struct simple_xattr *new_xattr)
{
- write_lock(&xattrs->lock);
- rb_add(&new_xattr->rb_node, &xattrs->rb_root, rbtree_simple_xattr_less);
- write_unlock(&xattrs->lock);
+ if (xattrs->use_rhashtable) {
+ WARN_ON(rhashtable_insert_fast(&xattrs->ht,
+ &new_xattr->hash_node,
+ simple_xattr_params));
+ } else {
+ write_lock(&xattrs->lock);
+ rb_add(&new_xattr->rb_node, &xattrs->rb_root,
+ rbtree_simple_xattr_less);
+ write_unlock(&xattrs->lock);
+ }
}
/**
@@ -1549,10 +1700,80 @@ void simple_xattr_add(struct simple_xattrs *xattrs,
*/
void simple_xattrs_init(struct simple_xattrs *xattrs)
{
+ xattrs->use_rhashtable = false;
xattrs->rb_root = RB_ROOT;
rwlock_init(&xattrs->lock);
}
+/**
+ * simple_xattrs_alloc - allocate and initialize a new xattr header
+ *
+ * Dynamically allocate a simple_xattrs header and initialize the
+ * underlying rhashtable. This is intended for consumers that want
+ * rhashtable-based xattr storage.
+ *
+ * Return: On success a new simple_xattrs is returned. On failure an
+ * ERR_PTR is returned.
+ */
+struct simple_xattrs *simple_xattrs_alloc(void)
+{
+ struct simple_xattrs *xattrs __free(kfree) = NULL;
+
+ xattrs = kzalloc(sizeof(*xattrs), GFP_KERNEL);
+ if (!xattrs)
+ return ERR_PTR(-ENOMEM);
+
+ xattrs->use_rhashtable = true;
+ if (rhashtable_init(&xattrs->ht, &simple_xattr_params))
+ return ERR_PTR(-ENOMEM);
+
+ return no_free_ptr(xattrs);
+}
+
+/**
+ * simple_xattrs_lazy_alloc - get or allocate xattrs for a set operation
+ * @xattrsp: pointer to the xattrs pointer (may point to NULL)
+ * @value: value being set (NULL means remove)
+ * @flags: xattr set flags
+ *
+ * For lazily-allocated xattrs on the write path. If no xattrs exist yet
+ * and this is a remove operation, returns the appropriate result without
+ * allocating. Otherwise ensures xattrs is allocated and published with
+ * store-release semantics.
+ *
+ * Return: On success a valid pointer to the xattrs is returned. On
+ * failure or early-exit an ERR_PTR or NULL is returned. Callers should
+ * check with IS_ERR_OR_NULL() and propagate with PTR_ERR() which
+ * correctly returns 0 for the NULL no-op case.
+ */
+struct simple_xattrs *simple_xattrs_lazy_alloc(struct simple_xattrs **xattrsp,
+ const void *value, int flags)
+{
+ struct simple_xattrs *xattrs;
+
+ xattrs = READ_ONCE(*xattrsp);
+ if (xattrs)
+ return xattrs;
+
+ if (!value)
+ return (flags & XATTR_REPLACE) ? ERR_PTR(-ENODATA) : NULL;
+
+ xattrs = simple_xattrs_alloc();
+ if (!IS_ERR(xattrs))
+ smp_store_release(xattrsp, xattrs);
+ return xattrs;
+}
+
+static void simple_xattr_ht_free(void *ptr, void *arg)
+{
+ struct simple_xattr *xattr = ptr;
+ size_t *freed_space = arg;
+
+ if (freed_space)
+ *freed_space += simple_xattr_space(xattr->name, xattr->size);
+ simple_xattr_free(xattr);
+}
+
/**
* simple_xattrs_free - free xattrs
* @xattrs: xattr header whose xattrs to destroy
@@ -1563,22 +1784,28 @@ void simple_xattrs_init(struct simple_xattrs *xattrs)
*/
void simple_xattrs_free(struct simple_xattrs *xattrs, size_t *freed_space)
{
- struct rb_node *rbp;
-
if (freed_space)
*freed_space = 0;
- rbp = rb_first(&xattrs->rb_root);
- while (rbp) {
- struct simple_xattr *xattr;
- struct rb_node *rbp_next;
-
- rbp_next = rb_next(rbp);
- xattr = rb_entry(rbp, struct simple_xattr, rb_node);
- rb_erase(&xattr->rb_node, &xattrs->rb_root);
- if (freed_space)
- *freed_space += simple_xattr_space(xattr->name,
- xattr->size);
- simple_xattr_free(xattr);
- rbp = rbp_next;
+
+ if (xattrs->use_rhashtable) {
+ rhashtable_free_and_destroy(&xattrs->ht,
+ simple_xattr_ht_free, freed_space);
+ } else {
+ struct rb_node *rbp;
+
+ rbp = rb_first(&xattrs->rb_root);
+ while (rbp) {
+ struct simple_xattr *xattr;
+ struct rb_node *rbp_next;
+
+ rbp_next = rb_next(rbp);
+ xattr = rb_entry(rbp, struct simple_xattr, rb_node);
+ rb_erase(&xattr->rb_node, &xattrs->rb_root);
+ if (freed_space)
+ *freed_space += simple_xattr_space(xattr->name,
+ xattr->size);
+ simple_xattr_free(xattr);
+ rbp = rbp_next;
+ }
}
}
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 1328f2bfd2ce..ee4fd40717a0 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -107,8 +107,14 @@ static inline const char *xattr_prefix(const struct xattr_handler *handler)
}
struct simple_xattrs {
- struct rb_root rb_root;
- rwlock_t lock;
+ bool use_rhashtable;
+ union {
+ struct {
+ struct rb_root rb_root;
+ rwlock_t lock;
+ };
+ struct rhashtable ht;
+ };
};
struct simple_xattr {
@@ -121,6 +127,9 @@ struct simple_xattr {
};
void simple_xattrs_init(struct simple_xattrs *xattrs);
+struct simple_xattrs *simple_xattrs_alloc(void);
+struct simple_xattrs *simple_xattrs_lazy_alloc(struct simple_xattrs **xattrsp,
+ const void *value, int flags);
void simple_xattrs_free(struct simple_xattrs *xattrs, size_t *freed_space);
size_t simple_xattr_space(const char *name, size_t size);
struct simple_xattr *simple_xattr_alloc(const void *value, size_t size);
@@ -137,4 +146,16 @@ void simple_xattr_add(struct simple_xattrs *xattrs,
struct simple_xattr *new_xattr);
int xattr_list_one(char **buffer, ssize_t *remaining_size, const char *name);
+DEFINE_CLASS(simple_xattr,
+ struct simple_xattr *,
+ if (!IS_ERR_OR_NULL(_T)) simple_xattr_free(_T),
+ simple_xattr_alloc(value, size),
+ const void *value, size_t size)
+
+DEFINE_CLASS(simple_xattrs,
+ struct simple_xattrs *,
+ if (!IS_ERR_OR_NULL(_T)) { simple_xattrs_free(_T, NULL); kfree(_T); },
+ simple_xattrs_alloc(),
+ void)
+
#endif /* _LINUX_XATTR_H */
diff --git a/mm/shmem.c b/mm/shmem.c
index 063b4c3e4ccb..fc8020ce2e9f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -4293,7 +4293,7 @@ static int shmem_initxattrs(struct inode *inode,
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
- if (!new_xattr)
+ if (IS_ERR(new_xattr))
break;
len = strlen(xattr->name) + 1;
--
2.47.3 | {
"author": "Christian Brauner <brauner@kernel.org>",
"date": "Mon, 16 Feb 2026 14:31:58 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | Adapt tmpfs/shmem to use the rhashtable-based xattr path and switch
from an embedded struct to pointer-based lazy allocation.
Change shmem_inode_info.xattrs from embedded 'struct simple_xattrs' to
a pointer 'struct simple_xattrs *', initialized to NULL. This avoids
the rhashtable overhead for every tmpfs inode, which helps when a lot of
inodes exist.
The xattr store is allocated on first use:
- shmem_initxattrs(): Allocates via simple_xattrs_alloc() when
security modules set initial xattrs during inode creation.
- shmem_xattr_handler_set(): Allocates on first setxattr, with a
short-circuit for removal when no xattrs are stored yet.
All read paths (shmem_xattr_handler_get, shmem_listxattr) check for
NULL xattrs pointer and return -ENODATA or 0 respectively.
Replaced xattr entries are freed via simple_xattr_free_rcu() to allow
concurrent RCU readers to finish.
shmem_evict_inode() conditionally frees the xattr store only when
allocated.
Also change simple_xattr_add() from void to int to propagate
rhashtable insertion failures. shmem_initxattrs() is the only caller.
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
fs/xattr.c | 26 +++++++++++++-------------
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 4 ++--
mm/shmem.c | 44 +++++++++++++++++++++++++++++++-------------
4 files changed, 47 insertions(+), 29 deletions(-)
diff --git a/fs/xattr.c b/fs/xattr.c
index 1d98ea459b7b..eb45ae0fd17f 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -1677,19 +1677,19 @@ static bool rbtree_simple_xattr_less(struct rb_node *new_node,
* of matching xattrs is wanted. Should only be called during inode
* initialization when a few distinct initial xattrs are supposed to be set.
*/
-void simple_xattr_add(struct simple_xattrs *xattrs,
- struct simple_xattr *new_xattr)
-{
- if (xattrs->use_rhashtable) {
- WARN_ON(rhashtable_insert_fast(&xattrs->ht,
- &new_xattr->hash_node,
- simple_xattr_params));
- } else {
- write_lock(&xattrs->lock);
- rb_add(&new_xattr->rb_node, &xattrs->rb_root,
- rbtree_simple_xattr_less);
- write_unlock(&xattrs->lock);
- }
+int simple_xattr_add(struct simple_xattrs *xattrs,
+ struct simple_xattr *new_xattr)
+{
+ if (xattrs->use_rhashtable)
+ return rhashtable_insert_fast(&xattrs->ht,
+ &new_xattr->hash_node,
+ simple_xattr_params);
+
+ write_lock(&xattrs->lock);
+ rb_add(&new_xattr->rb_node, &xattrs->rb_root,
+ rbtree_simple_xattr_less);
+ write_unlock(&xattrs->lock);
+ return 0;
}
/**
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index e2069b3179c4..53d325409a8b 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -48,7 +48,7 @@ struct shmem_inode_info {
};
struct timespec64 i_crtime; /* file creation time */
struct shared_policy policy; /* NUMA memory alloc policy */
- struct simple_xattrs xattrs; /* list of xattrs */
+ struct simple_xattrs *xattrs; /* list of xattrs */
pgoff_t fallocend; /* highest fallocate endindex */
unsigned int fsflags; /* for FS_IOC_[SG]ETFLAGS */
atomic_t stop_eviction; /* hold when working on inode */
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index ee4fd40717a0..3063ecf0004d 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -142,8 +142,8 @@ struct simple_xattr *simple_xattr_set(struct simple_xattrs *xattrs,
size_t size, int flags);
ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
char *buffer, size_t size);
-void simple_xattr_add(struct simple_xattrs *xattrs,
- struct simple_xattr *new_xattr);
+int simple_xattr_add(struct simple_xattrs *xattrs,
+ struct simple_xattr *new_xattr);
int xattr_list_one(char **buffer, ssize_t *remaining_size, const char *name);
DEFINE_CLASS(simple_xattr,
diff --git a/mm/shmem.c b/mm/shmem.c
index fc8020ce2e9f..8761c9b4f1c5 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1426,7 +1426,10 @@ static void shmem_evict_inode(struct inode *inode)
}
}
- simple_xattrs_free(&info->xattrs, sbinfo->max_inodes ? &freed : NULL);
+ if (info->xattrs) {
+ simple_xattrs_free(info->xattrs, sbinfo->max_inodes ? &freed : NULL);
+ kfree(info->xattrs);
+ }
shmem_free_inode(inode->i_sb, freed);
WARN_ON(inode->i_blocks);
clear_inode(inode);
@@ -3118,7 +3121,6 @@ static struct inode *__shmem_get_inode(struct mnt_idmap *idmap,
shmem_set_inode_flags(inode, info->fsflags, NULL);
INIT_LIST_HEAD(&info->shrinklist);
INIT_LIST_HEAD(&info->swaplist);
- simple_xattrs_init(&info->xattrs);
cache_no_acl(inode);
if (sbinfo->noswap)
mapping_set_unevictable(inode->i_mapping);
@@ -4270,10 +4272,13 @@ static int shmem_initxattrs(struct inode *inode,
struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
const struct xattr *xattr;
- struct simple_xattr *new_xattr;
size_t ispace = 0;
size_t len;
+ CLASS(simple_xattrs, xattrs)();
+ if (IS_ERR(xattrs))
+ return PTR_ERR(xattrs);
+
if (sbinfo->max_inodes) {
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
ispace += simple_xattr_space(xattr->name,
@@ -4292,24 +4297,24 @@ static int shmem_initxattrs(struct inode *inode,
}
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
- new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
+ CLASS(simple_xattr, new_xattr)(xattr->value, xattr->value_len);
if (IS_ERR(new_xattr))
break;
len = strlen(xattr->name) + 1;
new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
GFP_KERNEL_ACCOUNT);
- if (!new_xattr->name) {
- kvfree(new_xattr);
+ if (!new_xattr->name)
break;
- }
memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
XATTR_SECURITY_PREFIX_LEN);
memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
xattr->name, len);
- simple_xattr_add(&info->xattrs, new_xattr);
+ if (simple_xattr_add(xattrs, new_xattr))
+ break;
+ retain_and_null_ptr(new_xattr);
}
if (xattr->name != NULL) {
@@ -4318,10 +4323,10 @@ static int shmem_initxattrs(struct inode *inode,
sbinfo->free_ispace += ispace;
raw_spin_unlock(&sbinfo->stat_lock);
}
- simple_xattrs_free(&info->xattrs, NULL);
return -ENOMEM;
}
+ smp_store_release(&info->xattrs, no_free_ptr(xattrs));
return 0;
}
@@ -4330,9 +4335,14 @@ static int shmem_xattr_handler_get(const struct xattr_handler *handler,
const char *name, void *buffer, size_t size)
{
struct shmem_inode_info *info = SHMEM_I(inode);
+ struct simple_xattrs *xattrs;
+
+ xattrs = READ_ONCE(info->xattrs);
+ if (!xattrs)
+ return -ENODATA;
name = xattr_full_name(handler, name);
- return simple_xattr_get(&info->xattrs, name, buffer, size);
+ return simple_xattr_get(xattrs, name, buffer, size);
}
static int shmem_xattr_handler_set(const struct xattr_handler *handler,
@@ -4343,10 +4353,16 @@ static int shmem_xattr_handler_set(const struct xattr_handler *handler,
{
struct shmem_inode_info *info = SHMEM_I(inode);
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
+ struct simple_xattrs *xattrs;
struct simple_xattr *old_xattr;
size_t ispace = 0;
name = xattr_full_name(handler, name);
+
+ xattrs = simple_xattrs_lazy_alloc(&info->xattrs, value, flags);
+ if (IS_ERR_OR_NULL(xattrs))
+ return PTR_ERR(xattrs);
+
if (value && sbinfo->max_inodes) {
ispace = simple_xattr_space(name, size);
raw_spin_lock(&sbinfo->stat_lock);
@@ -4359,13 +4375,13 @@ static int shmem_xattr_handler_set(const struct xattr_handler *handler,
return -ENOSPC;
}
- old_xattr = simple_xattr_set(&info->xattrs, name, value, size, flags);
+ old_xattr = simple_xattr_set(xattrs, name, value, size, flags);
if (!IS_ERR(old_xattr)) {
ispace = 0;
if (old_xattr && sbinfo->max_inodes)
ispace = simple_xattr_space(old_xattr->name,
old_xattr->size);
- simple_xattr_free(old_xattr);
+ simple_xattr_free_rcu(old_xattr);
old_xattr = NULL;
inode_set_ctime_current(inode);
inode_inc_iversion(inode);
@@ -4406,7 +4422,9 @@ static const struct xattr_handler * const shmem_xattr_handlers[] = {
static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
- return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
+
+ return simple_xattr_list(d_inode(dentry), READ_ONCE(info->xattrs),
+ buffer, size);
}
#endif /* CONFIG_TMPFS_XATTR */
--
2.47.3 | {
"author": "Christian Brauner <brauner@kernel.org>",
"date": "Mon, 16 Feb 2026 14:31:59 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | Adapt kernfs to use the rhashtable-based xattr path and switch from an
embedded struct to pointer-based lazy allocation.
Change kernfs_iattrs.xattrs from embedded 'struct simple_xattrs' to a
pointer 'struct simple_xattrs *', initialized to NULL (zeroed by
kmem_cache_zalloc). Since kernfs_iattrs is already lazily allocated
itself, this adds a second level of lazy allocation specifically for
the xattr store.
The xattr store is allocated on first setxattr. Read paths
check for NULL and return -ENODATA or empty list.
Replaced xattr entries are freed via simple_xattr_free_rcu() to allow
concurrent RCU readers to finish.
The cleanup paths in kernfs_free_rcu() and __kernfs_new_node() error
handling conditionally free the xattr store only when allocated.
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
fs/kernfs/dir.c | 15 +++++++++++----
fs/kernfs/inode.c | 34 +++++++++++++++++++++++++---------
fs/kernfs/kernfs-internal.h | 2 +-
3 files changed, 37 insertions(+), 14 deletions(-)
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 29baeeb97871..e5735c45fb99 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -547,10 +547,8 @@ static void kernfs_free_rcu(struct rcu_head *rcu)
/* If the whole node goes away, then name can't be used outside */
kfree_const(rcu_access_pointer(kn->name));
- if (kn->iattr) {
- simple_xattrs_free(&kn->iattr->xattrs, NULL);
+ if (kn->iattr)
kmem_cache_free(kernfs_iattrs_cache, kn->iattr);
- }
kmem_cache_free(kernfs_node_cache, kn);
}
@@ -584,6 +582,12 @@ void kernfs_put(struct kernfs_node *kn)
if (kernfs_type(kn) == KERNFS_LINK)
kernfs_put(kn->symlink.target_kn);
+ if (kn->iattr && kn->iattr->xattrs) {
+ simple_xattrs_free(kn->iattr->xattrs, NULL);
+ kfree(kn->iattr->xattrs);
+ kn->iattr->xattrs = NULL;
+ }
+
spin_lock(&root->kernfs_idr_lock);
idr_remove(&root->ino_idr, (u32)kernfs_ino(kn));
spin_unlock(&root->kernfs_idr_lock);
@@ -682,7 +686,10 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
err_out4:
if (kn->iattr) {
- simple_xattrs_free(&kn->iattr->xattrs, NULL);
+ if (kn->iattr->xattrs) {
+ simple_xattrs_free(kn->iattr->xattrs, NULL);
+ kfree(kn->iattr->xattrs);
+ }
kmem_cache_free(kernfs_iattrs_cache, kn->iattr);
}
err_out3:
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index a36aaee98dce..dfc3315b5afc 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -45,7 +45,6 @@ static struct kernfs_iattrs *__kernfs_iattrs(struct kernfs_node *kn, bool alloc)
ret->ia_mtime = ret->ia_atime;
ret->ia_ctime = ret->ia_atime;
- simple_xattrs_init(&ret->xattrs);
atomic_set(&ret->nr_user_xattrs, 0);
atomic_set(&ret->user_xattr_size, 0);
@@ -146,7 +145,8 @@ ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size)
if (!attrs)
return -ENOMEM;
- return simple_xattr_list(d_inode(dentry), &attrs->xattrs, buf, size);
+ return simple_xattr_list(d_inode(dentry), READ_ONCE(attrs->xattrs),
+ buf, size);
}
static inline void set_default_inode_attr(struct inode *inode, umode_t mode)
@@ -298,27 +298,38 @@ int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
void *value, size_t size)
{
struct kernfs_iattrs *attrs = kernfs_iattrs_noalloc(kn);
+ struct simple_xattrs *xattrs;
+
if (!attrs)
return -ENODATA;
- return simple_xattr_get(&attrs->xattrs, name, value, size);
+ xattrs = READ_ONCE(attrs->xattrs);
+ if (!xattrs)
+ return -ENODATA;
+
+ return simple_xattr_get(xattrs, name, value, size);
}
int kernfs_xattr_set(struct kernfs_node *kn, const char *name,
const void *value, size_t size, int flags)
{
struct simple_xattr *old_xattr;
+ struct simple_xattrs *xattrs;
struct kernfs_iattrs *attrs;
attrs = kernfs_iattrs(kn);
if (!attrs)
return -ENOMEM;
- old_xattr = simple_xattr_set(&attrs->xattrs, name, value, size, flags);
+ xattrs = simple_xattrs_lazy_alloc(&attrs->xattrs, value, flags);
+ if (IS_ERR_OR_NULL(xattrs))
+ return PTR_ERR(xattrs);
+
+ old_xattr = simple_xattr_set(xattrs, name, value, size, flags);
if (IS_ERR(old_xattr))
return PTR_ERR(old_xattr);
- simple_xattr_free(old_xattr);
+ simple_xattr_free_rcu(old_xattr);
return 0;
}
@@ -376,7 +387,7 @@ static int kernfs_vfs_user_xattr_add(struct kernfs_node *kn,
ret = 0;
size = old_xattr->size;
- simple_xattr_free(old_xattr);
+ simple_xattr_free_rcu(old_xattr);
dec_size_out:
atomic_sub(size, sz);
dec_count_out:
@@ -403,7 +414,7 @@ static int kernfs_vfs_user_xattr_rm(struct kernfs_node *kn,
atomic_sub(old_xattr->size, sz);
atomic_dec(nr);
- simple_xattr_free(old_xattr);
+ simple_xattr_free_rcu(old_xattr);
return 0;
}
@@ -415,6 +426,7 @@ static int kernfs_vfs_user_xattr_set(const struct xattr_handler *handler,
{
const char *full_name = xattr_full_name(handler, suffix);
struct kernfs_node *kn = inode->i_private;
+ struct simple_xattrs *xattrs;
struct kernfs_iattrs *attrs;
if (!(kernfs_root(kn)->flags & KERNFS_ROOT_SUPPORT_USER_XATTR))
@@ -424,11 +436,15 @@ static int kernfs_vfs_user_xattr_set(const struct xattr_handler *handler,
if (!attrs)
return -ENOMEM;
+ xattrs = simple_xattrs_lazy_alloc(&attrs->xattrs, value, flags);
+ if (IS_ERR_OR_NULL(xattrs))
+ return PTR_ERR(xattrs);
+
if (value)
- return kernfs_vfs_user_xattr_add(kn, full_name, &attrs->xattrs,
+ return kernfs_vfs_user_xattr_add(kn, full_name, xattrs,
value, size, flags);
else
- return kernfs_vfs_user_xattr_rm(kn, full_name, &attrs->xattrs,
+ return kernfs_vfs_user_xattr_rm(kn, full_name, xattrs,
value, size, flags);
}
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index 6061b6f70d2a..1324ed8c0661 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -26,7 +26,7 @@ struct kernfs_iattrs {
struct timespec64 ia_mtime;
struct timespec64 ia_ctime;
- struct simple_xattrs xattrs;
+ struct simple_xattrs *xattrs;
atomic_t nr_user_xattrs;
atomic_t user_xattr_size;
};
--
2.47.3 | {
"author": "Christian Brauner <brauner@kernel.org>",
"date": "Mon, 16 Feb 2026 14:32:00 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | Adapt pidfs to use the rhashtable-based xattr path by switching from a
dedicated slab cache to simple_xattrs_alloc().
Previously pidfs used a custom kmem_cache (pidfs_xattr_cachep) that
allocated a struct containing an embedded simple_xattrs plus
simple_xattrs_init(). Replace this with simple_xattrs_alloc() which
combines kzalloc + rhashtable_init, and drop the dedicated slab cache
entirely.
Use simple_xattr_free_rcu() for replaced xattr entries to allow
concurrent RCU readers to finish.
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
fs/pidfs.c | 65 +++++++++++++++++++++++++++++++++++++++-----------------------
1 file changed, 41 insertions(+), 24 deletions(-)
diff --git a/fs/pidfs.c b/fs/pidfs.c
index 1e20e36e0ed5..cb62000681df 100644
--- a/fs/pidfs.c
+++ b/fs/pidfs.c
@@ -21,6 +21,7 @@
#include <linux/utsname.h>
#include <net/net_namespace.h>
#include <linux/coredump.h>
+#include <linux/llist.h>
#include <linux/xattr.h>
#include "internal.h"
@@ -29,7 +30,6 @@
#define PIDFS_PID_DEAD ERR_PTR(-ESRCH)
static struct kmem_cache *pidfs_attr_cachep __ro_after_init;
-static struct kmem_cache *pidfs_xattr_cachep __ro_after_init;
static struct path pidfs_root_path = {};
@@ -44,9 +44,8 @@ enum pidfs_attr_mask_bits {
PIDFS_ATTR_BIT_COREDUMP = 1,
};
-struct pidfs_attr {
+struct pidfs_anon_attr {
unsigned long attr_mask;
- struct simple_xattrs *xattrs;
struct /* exit info */ {
__u64 cgroupid;
__s32 exit_code;
@@ -55,6 +54,14 @@ struct pidfs_attr {
__u32 coredump_signal;
};
+struct pidfs_attr {
+ struct simple_xattrs *xattrs;
+ union {
+ struct pidfs_anon_attr;
+ struct llist_node pidfs_llist;
+ };
+};
+
static struct rb_root pidfs_ino_tree = RB_ROOT;
#if BITS_PER_LONG == 32
@@ -147,10 +154,30 @@ void pidfs_remove_pid(struct pid *pid)
write_seqcount_end(&pidmap_lock_seq);
}
+static LLIST_HEAD(pidfs_free_list);
+
+static void pidfs_free_attr_work(struct work_struct *work)
+{
+ struct pidfs_attr *attr, *next;
+ struct llist_node *head;
+
+ head = llist_del_all(&pidfs_free_list);
+ llist_for_each_entry_safe(attr, next, head, pidfs_llist) {
+ struct simple_xattrs *xattrs = attr->xattrs;
+
+ if (xattrs) {
+ simple_xattrs_free(xattrs, NULL);
+ kfree(xattrs);
+ }
+ kfree(attr);
+ }
+}
+
+static DECLARE_WORK(pidfs_free_work, pidfs_free_attr_work);
+
void pidfs_free_pid(struct pid *pid)
{
- struct pidfs_attr *attr __free(kfree) = no_free_ptr(pid->attr);
- struct simple_xattrs *xattrs __free(kfree) = NULL;
+ struct pidfs_attr *attr = pid->attr;
/*
* Any dentry must've been wiped from the pid by now.
@@ -169,9 +196,10 @@ void pidfs_free_pid(struct pid *pid)
if (IS_ERR(attr))
return;
- xattrs = no_free_ptr(attr->xattrs);
- if (xattrs)
- simple_xattrs_free(xattrs, NULL);
+ if (likely(!attr->xattrs))
+ kfree(attr);
+ else if (llist_add(&attr->pidfs_llist, &pidfs_free_list))
+ schedule_work(&pidfs_free_work);
}
#ifdef CONFIG_PROC_FS
@@ -998,7 +1026,7 @@ static int pidfs_xattr_get(const struct xattr_handler *handler,
xattrs = READ_ONCE(attr->xattrs);
if (!xattrs)
- return 0;
+ return -ENODATA;
name = xattr_full_name(handler, suffix);
return simple_xattr_get(xattrs, name, value, size);
@@ -1018,22 +1046,16 @@ static int pidfs_xattr_set(const struct xattr_handler *handler,
/* Ensure we're the only one to set @attr->xattrs. */
WARN_ON_ONCE(!inode_is_locked(inode));
- xattrs = READ_ONCE(attr->xattrs);
- if (!xattrs) {
- xattrs = kmem_cache_zalloc(pidfs_xattr_cachep, GFP_KERNEL);
- if (!xattrs)
- return -ENOMEM;
-
- simple_xattrs_init(xattrs);
- smp_store_release(&pid->attr->xattrs, xattrs);
- }
+ xattrs = simple_xattrs_lazy_alloc(&attr->xattrs, value, flags);
+ if (IS_ERR_OR_NULL(xattrs))
+ return PTR_ERR(xattrs);
name = xattr_full_name(handler, suffix);
old_xattr = simple_xattr_set(xattrs, name, value, size, flags);
if (IS_ERR(old_xattr))
return PTR_ERR(old_xattr);
- simple_xattr_free(old_xattr);
+ simple_xattr_free_rcu(old_xattr);
return 0;
}
@@ -1108,11 +1130,6 @@ void __init pidfs_init(void)
(SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT |
SLAB_ACCOUNT | SLAB_PANIC), NULL);
- pidfs_xattr_cachep = kmem_cache_create("pidfs_xattr_cache",
- sizeof(struct simple_xattrs), 0,
- (SLAB_HWCACHE_ALIGN | SLAB_RECLAIM_ACCOUNT |
- SLAB_ACCOUNT | SLAB_PANIC), NULL);
-
pidfs_mnt = kern_mount(&pidfs_type);
if (IS_ERR(pidfs_mnt))
panic("Failed to mount pidfs pseudo filesystem");
--
2.47.3 | {
"author": "Christian Brauner <brauner@kernel.org>",
"date": "Mon, 16 Feb 2026 14:32:01 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | Now that all consumers (shmem, kernfs, pidfs) have been converted to
use the rhashtable-based simple_xattrs with pointer-based lazy
allocation, remove the legacy rbtree code path. The rhashtable
implementation provides O(1) average-case lookup with RCU-based lockless
reads, replacing the O(log n) rbtree with reader-writer spinlock
contention.
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
fs/xattr.c | 387 +++++++++++++-------------------------------------
include/linux/xattr.h | 12 +-
2 files changed, 103 insertions(+), 296 deletions(-)
diff --git a/fs/xattr.c b/fs/xattr.c
index eb45ae0fd17f..64803097e1dc 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -1200,20 +1200,18 @@ void simple_xattr_free(struct simple_xattr *xattr)
static void simple_xattr_rcu_free(struct rcu_head *head)
{
- struct simple_xattr *xattr;
+ struct simple_xattr *xattr = container_of(head, struct simple_xattr, rcu);
- xattr = container_of(head, struct simple_xattr, rcu);
simple_xattr_free(xattr);
}
/**
- * simple_xattr_free_rcu - free an xattr object after an RCU grace period
+ * simple_xattr_free_rcu - free an xattr object with RCU delay
* @xattr: the xattr object
*
- * Schedule RCU-deferred freeing of an xattr entry. This is used by
- * rhashtable-based callers of simple_xattr_set() that replace or remove
- * an existing entry while concurrent RCU readers may still be accessing
- * it.
+ * Free the xattr object after an RCU grace period. This must be used when
+ * the xattr was removed from a data structure that concurrent RCU readers
+ * may still be traversing. Can handle @xattr being NULL.
*/
void simple_xattr_free_rcu(struct simple_xattr *xattr)
{
@@ -1254,43 +1252,6 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size)
return new_xattr;
}
-/**
- * rbtree_simple_xattr_cmp - compare xattr name with current rbtree xattr entry
- * @key: xattr name
- * @node: current node
- *
- * Compare the xattr name with the xattr name attached to @node in the rbtree.
- *
- * Return: Negative value if continuing left, positive if continuing right, 0
- * if the xattr attached to @node matches @key.
- */
-static int rbtree_simple_xattr_cmp(const void *key, const struct rb_node *node)
-{
- const char *xattr_name = key;
- const struct simple_xattr *xattr;
-
- xattr = rb_entry(node, struct simple_xattr, rb_node);
- return strcmp(xattr->name, xattr_name);
-}
-
-/**
- * rbtree_simple_xattr_node_cmp - compare two xattr rbtree nodes
- * @new_node: new node
- * @node: current node
- *
- * Compare the xattr attached to @new_node with the xattr attached to @node.
- *
- * Return: Negative value if continuing left, positive if continuing right, 0
- * if the xattr attached to @new_node matches the xattr attached to @node.
- */
-static int rbtree_simple_xattr_node_cmp(struct rb_node *new_node,
- const struct rb_node *node)
-{
- struct simple_xattr *xattr;
- xattr = rb_entry(new_node, struct simple_xattr, rb_node);
- return rbtree_simple_xattr_cmp(xattr->name, node);
-}
-
static u32 simple_xattr_hashfn(const void *data, u32 len, u32 seed)
{
const char *name = data;
@@ -1336,41 +1297,19 @@ static const struct rhashtable_params simple_xattr_params = {
int simple_xattr_get(struct simple_xattrs *xattrs, const char *name,
void *buffer, size_t size)
{
- struct simple_xattr *xattr = NULL;
+ struct simple_xattr *xattr;
int ret = -ENODATA;
- if (xattrs->use_rhashtable) {
- guard(rcu)();
- xattr = rhashtable_lookup(&xattrs->ht, name,
- simple_xattr_params);
- if (xattr) {
- ret = xattr->size;
- if (buffer) {
- if (size < xattr->size)
- ret = -ERANGE;
- else
- memcpy(buffer, xattr->value,
- xattr->size);
- }
- }
- } else {
- struct rb_node *rbp;
-
- read_lock(&xattrs->lock);
- rbp = rb_find(name, &xattrs->rb_root,
- rbtree_simple_xattr_cmp);
- if (rbp) {
- xattr = rb_entry(rbp, struct simple_xattr, rb_node);
- ret = xattr->size;
- if (buffer) {
- if (size < xattr->size)
- ret = -ERANGE;
- else
- memcpy(buffer, xattr->value,
- xattr->size);
- }
+ guard(rcu)();
+ xattr = rhashtable_lookup(&xattrs->ht, name, simple_xattr_params);
+ if (xattr) {
+ ret = xattr->size;
+ if (buffer) {
+ if (size < xattr->size)
+ ret = -ERANGE;
+ else
+ memcpy(buffer, xattr->value, xattr->size);
}
- read_unlock(&xattrs->lock);
}
return ret;
}
@@ -1398,6 +1337,11 @@ int simple_xattr_get(struct simple_xattrs *xattrs, const char *name,
* nothing if XATTR_CREATE is specified in @flags or @flags is zero. For
* XATTR_REPLACE we fail as mentioned above.
*
+ * Note: Callers must externally serialize writes. All current callers hold
+ * the inode lock for write operations. The lookup->replace/remove sequence
+ * is not atomic with respect to the rhashtable's per-bucket locking, but
+ * is safe because writes are serialized by the caller.
+ *
* Return: On success, the removed or replaced xattr is returned, to be freed
* by the caller; or NULL if none. On failure a negative error code is returned.
*/
@@ -1406,7 +1350,7 @@ struct simple_xattr *simple_xattr_set(struct simple_xattrs *xattrs,
size_t size, int flags)
{
struct simple_xattr *old_xattr = NULL;
- int err = 0;
+ int err;
CLASS(simple_xattr, new_xattr)(value, size);
if (IS_ERR(new_xattr))
@@ -1418,119 +1362,52 @@ struct simple_xattr *simple_xattr_set(struct simple_xattrs *xattrs,
return ERR_PTR(-ENOMEM);
}
- if (xattrs->use_rhashtable) {
- /*
- * Lookup is safe without RCU here since writes are
- * serialized by the caller.
- */
- old_xattr = rhashtable_lookup_fast(&xattrs->ht, name,
- simple_xattr_params);
-
- if (old_xattr) {
- /* Fail if XATTR_CREATE is requested and the xattr exists. */
- if (flags & XATTR_CREATE)
- return ERR_PTR(-EEXIST);
-
- if (new_xattr) {
- err = rhashtable_replace_fast(&xattrs->ht,
- &old_xattr->hash_node,
- &new_xattr->hash_node,
- simple_xattr_params);
- if (err)
- return ERR_PTR(err);
- } else {
- err = rhashtable_remove_fast(&xattrs->ht,
- &old_xattr->hash_node,
- simple_xattr_params);
- if (err)
- return ERR_PTR(err);
- }
- } else {
- /* Fail if XATTR_REPLACE is requested but no xattr is found. */
- if (flags & XATTR_REPLACE)
- return ERR_PTR(-ENODATA);
-
- /*
- * If XATTR_CREATE or no flags are specified together
- * with a new value simply insert it.
- */
- if (new_xattr) {
- err = rhashtable_insert_fast(&xattrs->ht,
- &new_xattr->hash_node,
- simple_xattr_params);
- if (err)
- return ERR_PTR(err);
- }
-
- /*
- * If XATTR_CREATE or no flags are specified and
- * neither an old or new xattr exist then we don't
- * need to do anything.
- */
- }
- } else {
- struct rb_node *parent = NULL, **rbp;
- int ret;
-
- write_lock(&xattrs->lock);
- rbp = &xattrs->rb_root.rb_node;
- while (*rbp) {
- parent = *rbp;
- ret = rbtree_simple_xattr_cmp(name, *rbp);
- if (ret < 0)
- rbp = &(*rbp)->rb_left;
- else if (ret > 0)
- rbp = &(*rbp)->rb_right;
- else
- old_xattr = rb_entry(*rbp, struct simple_xattr,
- rb_node);
- if (old_xattr)
- break;
- }
+ /* Lookup is safe without RCU here since writes are serialized. */
+ old_xattr = rhashtable_lookup_fast(&xattrs->ht, name,
+ simple_xattr_params);
- if (old_xattr) {
- /* Fail if XATTR_CREATE is requested and the xattr exists. */
- if (flags & XATTR_CREATE) {
- err = -EEXIST;
- goto out_unlock;
- }
+ if (old_xattr) {
+ /* Fail if XATTR_CREATE is requested and the xattr exists. */
+ if (flags & XATTR_CREATE)
+ return ERR_PTR(-EEXIST);
- if (new_xattr)
- rb_replace_node(&old_xattr->rb_node,
- &new_xattr->rb_node,
- &xattrs->rb_root);
- else
- rb_erase(&old_xattr->rb_node,
- &xattrs->rb_root);
+ if (new_xattr) {
+ err = rhashtable_replace_fast(&xattrs->ht,
+ &old_xattr->hash_node,
+ &new_xattr->hash_node,
+ simple_xattr_params);
+ if (err)
+ return ERR_PTR(err);
} else {
- /* Fail if XATTR_REPLACE is requested but no xattr is found. */
- if (flags & XATTR_REPLACE) {
- err = -ENODATA;
- goto out_unlock;
- }
-
- /*
- * If XATTR_CREATE or no flags are specified together
- * with a new value simply insert it.
- */
- if (new_xattr) {
- rb_link_node(&new_xattr->rb_node, parent, rbp);
- rb_insert_color(&new_xattr->rb_node,
- &xattrs->rb_root);
- }
+ err = rhashtable_remove_fast(&xattrs->ht,
+ &old_xattr->hash_node,
+ simple_xattr_params);
+ if (err)
+ return ERR_PTR(err);
+ }
+ } else {
+ /* Fail if XATTR_REPLACE is requested but no xattr is found. */
+ if (flags & XATTR_REPLACE)
+ return ERR_PTR(-ENODATA);
- /*
- * If XATTR_CREATE or no flags are specified and
- * neither an old or new xattr exist then we don't
- * need to do anything.
- */
+ /*
+ * If XATTR_CREATE or no flags are specified together with a
+ * new value simply insert it.
+ */
+ if (new_xattr) {
+ err = rhashtable_insert_fast(&xattrs->ht,
+ &new_xattr->hash_node,
+ simple_xattr_params);
+ if (err)
+ return ERR_PTR(err);
}
-out_unlock:
- write_unlock(&xattrs->lock);
- if (err)
- return ERR_PTR(err);
+ /*
+ * If XATTR_CREATE or no flags are specified and neither an
+ * old or new xattr exist then we don't need to do anything.
+ */
}
+
retain_and_null_ptr(new_xattr);
return old_xattr;
}
@@ -1572,6 +1449,7 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
char *buffer, size_t size)
{
bool trusted = ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
+ struct rhashtable_iter iter;
struct simple_xattr *xattr;
ssize_t remaining_size = size;
int err = 0;
@@ -1595,77 +1473,34 @@ ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
if (!xattrs)
return size - remaining_size;
- if (xattrs->use_rhashtable) {
- struct rhashtable_iter iter;
-
- rhashtable_walk_enter(&xattrs->ht, &iter);
- rhashtable_walk_start(&iter);
-
- while ((xattr = rhashtable_walk_next(&iter)) != NULL) {
- if (IS_ERR(xattr)) {
- if (PTR_ERR(xattr) == -EAGAIN)
- continue;
- err = PTR_ERR(xattr);
- break;
- }
-
- /* skip "trusted." attributes for unprivileged callers */
- if (!trusted && xattr_is_trusted(xattr->name))
- continue;
+ rhashtable_walk_enter(&xattrs->ht, &iter);
+ rhashtable_walk_start(&iter);
- /* skip MAC labels; these are provided by LSM above */
- if (xattr_is_maclabel(xattr->name))
+ while ((xattr = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(xattr)) {
+ if (PTR_ERR(xattr) == -EAGAIN)
continue;
-
- err = xattr_list_one(&buffer, &remaining_size,
- xattr->name);
- if (err)
- break;
+ err = PTR_ERR(xattr);
+ break;
}
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
- } else {
- struct rb_node *rbp;
-
- read_lock(&xattrs->lock);
- for (rbp = rb_first(&xattrs->rb_root); rbp;
- rbp = rb_next(rbp)) {
- xattr = rb_entry(rbp, struct simple_xattr, rb_node);
-
- /* skip "trusted." attributes for unprivileged callers */
- if (!trusted && xattr_is_trusted(xattr->name))
- continue;
+ /* skip "trusted." attributes for unprivileged callers */
+ if (!trusted && xattr_is_trusted(xattr->name))
+ continue;
- /* skip MAC labels; these are provided by LSM above */
- if (xattr_is_maclabel(xattr->name))
- continue;
+ /* skip MAC labels; these are provided by LSM above */
+ if (xattr_is_maclabel(xattr->name))
+ continue;
- err = xattr_list_one(&buffer, &remaining_size,
- xattr->name);
- if (err)
- break;
- }
- read_unlock(&xattrs->lock);
+ err = xattr_list_one(&buffer, &remaining_size, xattr->name);
+ if (err)
+ break;
}
- return err ? err : size - remaining_size;
-}
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
-/**
- * rbtree_simple_xattr_less - compare two xattr rbtree nodes
- * @new_node: new node
- * @node: current node
- *
- * Compare the xattr attached to @new_node with the xattr attached to @node.
- * Note that this function technically tolerates duplicate entries.
- *
- * Return: True if insertion point in the rbtree is found.
- */
-static bool rbtree_simple_xattr_less(struct rb_node *new_node,
- const struct rb_node *node)
-{
- return rbtree_simple_xattr_node_cmp(new_node, node) < 0;
+ return err ? err : size - remaining_size;
}
/**
@@ -1676,33 +1511,29 @@ static bool rbtree_simple_xattr_less(struct rb_node *new_node,
* Add an xattr object to @xattrs. This assumes no replacement or removal
* of matching xattrs is wanted. Should only be called during inode
* initialization when a few distinct initial xattrs are supposed to be set.
+ *
+ * Return: On success zero is returned. On failure a negative error code is
+ * returned.
*/
int simple_xattr_add(struct simple_xattrs *xattrs,
struct simple_xattr *new_xattr)
{
- if (xattrs->use_rhashtable)
- return rhashtable_insert_fast(&xattrs->ht,
- &new_xattr->hash_node,
- simple_xattr_params);
-
- write_lock(&xattrs->lock);
- rb_add(&new_xattr->rb_node, &xattrs->rb_root,
- rbtree_simple_xattr_less);
- write_unlock(&xattrs->lock);
- return 0;
+ return rhashtable_insert_fast(&xattrs->ht, &new_xattr->hash_node,
+ simple_xattr_params);
}
/**
* simple_xattrs_init - initialize new xattr header
* @xattrs: header to initialize
*
- * Initialize relevant fields of a an xattr header.
+ * Initialize the rhashtable used to store xattr objects.
+ *
+ * Return: On success zero is returned. On failure a negative error code is
+ * returned.
*/
-void simple_xattrs_init(struct simple_xattrs *xattrs)
+int simple_xattrs_init(struct simple_xattrs *xattrs)
{
- xattrs->use_rhashtable = false;
- xattrs->rb_root = RB_ROOT;
- rwlock_init(&xattrs->lock);
+ return rhashtable_init(&xattrs->ht, &simple_xattr_params);
}
/**
@@ -1710,7 +1541,8 @@ void simple_xattrs_init(struct simple_xattrs *xattrs)
*
* Dynamically allocate a simple_xattrs header and initialize the
* underlying rhashtable. This is intended for consumers that want
- * rhashtable-based xattr storage.
+ * to lazily allocate xattr storage only when the first xattr is set,
+ * avoiding the per-inode rhashtable overhead when no xattrs are used.
*
* Return: On success a new simple_xattrs is returned. On failure an
* ERR_PTR is returned.
@@ -1718,14 +1550,15 @@ void simple_xattrs_init(struct simple_xattrs *xattrs)
struct simple_xattrs *simple_xattrs_alloc(void)
{
struct simple_xattrs *xattrs __free(kfree) = NULL;
+ int ret;
xattrs = kzalloc(sizeof(*xattrs), GFP_KERNEL);
if (!xattrs)
return ERR_PTR(-ENOMEM);
- xattrs->use_rhashtable = true;
- if (rhashtable_init(&xattrs->ht, &simple_xattr_params))
- return ERR_PTR(-ENOMEM);
+ ret = simple_xattrs_init(xattrs);
+ if (ret)
+ return ERR_PTR(ret);
return no_free_ptr(xattrs);
}
@@ -1784,28 +1617,10 @@ static void simple_xattr_ht_free(void *ptr, void *arg)
*/
void simple_xattrs_free(struct simple_xattrs *xattrs, size_t *freed_space)
{
+ might_sleep();
+
if (freed_space)
*freed_space = 0;
-
- if (xattrs->use_rhashtable) {
- rhashtable_free_and_destroy(&xattrs->ht,
- simple_xattr_ht_free, freed_space);
- } else {
- struct rb_node *rbp;
-
- rbp = rb_first(&xattrs->rb_root);
- while (rbp) {
- struct simple_xattr *xattr;
- struct rb_node *rbp_next;
-
- rbp_next = rb_next(rbp);
- xattr = rb_entry(rbp, struct simple_xattr, rb_node);
- rb_erase(&xattr->rb_node, &xattrs->rb_root);
- if (freed_space)
- *freed_space += simple_xattr_space(xattr->name,
- xattr->size);
- simple_xattr_free(xattr);
- rbp = rbp_next;
- }
- }
+ rhashtable_free_and_destroy(&xattrs->ht, simple_xattr_ht_free,
+ freed_space);
}
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 3063ecf0004d..f60357d9f938 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -107,18 +107,10 @@ static inline const char *xattr_prefix(const struct xattr_handler *handler)
}
struct simple_xattrs {
- bool use_rhashtable;
- union {
- struct {
- struct rb_root rb_root;
- rwlock_t lock;
- };
- struct rhashtable ht;
- };
+ struct rhashtable ht;
};
struct simple_xattr {
- struct rb_node rb_node;
struct rhash_head hash_node;
struct rcu_head rcu;
char *name;
@@ -126,7 +118,7 @@ struct simple_xattr {
char value[];
};
-void simple_xattrs_init(struct simple_xattrs *xattrs);
+int simple_xattrs_init(struct simple_xattrs *xattrs);
struct simple_xattrs *simple_xattrs_alloc(void);
struct simple_xattrs *simple_xattrs_lazy_alloc(struct simple_xattrs **xattrsp,
const void *value, int flags);
--
2.47.3 | {
"author": "Christian Brauner <brauner@kernel.org>",
"date": "Mon, 16 Feb 2026 14:32:02 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | Stop repeating the ?: in multiple places and use a simple helper for
this.
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
fs/xattr.c | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/fs/xattr.c b/fs/xattr.c
index 64803097e1dc..c4db8663c32e 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -106,6 +106,13 @@ int may_write_xattr(struct mnt_idmap *idmap, struct inode *inode)
return 0;
}
+static inline int xattr_permission_error(int mask)
+{
+ if (mask & MAY_WRITE)
+ return -EPERM;
+ return -ENODATA;
+}
+
/*
* Check permissions for extended attribute access. This is a bit complicated
* because different namespaces have very different rules.
@@ -135,7 +142,7 @@ xattr_permission(struct mnt_idmap *idmap, struct inode *inode,
*/
if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) {
if (!capable(CAP_SYS_ADMIN))
- return (mask & MAY_WRITE) ? -EPERM : -ENODATA;
+ return xattr_permission_error(mask);
return 0;
}
@@ -146,7 +153,7 @@ xattr_permission(struct mnt_idmap *idmap, struct inode *inode,
*/
if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
- return (mask & MAY_WRITE) ? -EPERM : -ENODATA;
+ return xattr_permission_error(mask);
if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) &&
(mask & MAY_WRITE) &&
!inode_owner_or_capable(idmap, inode))
--
2.47.3 | {
"author": "Christian Brauner <brauner@kernel.org>",
"date": "Mon, 16 Feb 2026 14:32:03 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | Simplify the codeflow by using a switch statement that switches on
S_IFMT.
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
fs/xattr.c | 18 +++++++++++++-----
1 file changed, 13 insertions(+), 5 deletions(-)
diff --git a/fs/xattr.c b/fs/xattr.c
index c4db8663c32e..328ed7558dfc 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -152,12 +152,20 @@ xattr_permission(struct mnt_idmap *idmap, struct inode *inode,
* privileged users can write attributes.
*/
if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
- if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
- return xattr_permission_error(mask);
- if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) &&
- (mask & MAY_WRITE) &&
- !inode_owner_or_capable(idmap, inode))
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFREG:
+ break;
+ case S_IFDIR:
+ if (!(inode->i_mode & S_ISVTX))
+ break;
+ if (!(mask & MAY_WRITE))
+ break;
+ if (inode_owner_or_capable(idmap, inode))
+ break;
return -EPERM;
+ default:
+ return xattr_permission_error(mask);
+ }
}
return inode_permission(idmap, inode, mask);
--
2.47.3 | {
"author": "Christian Brauner <brauner@kernel.org>",
"date": "Mon, 16 Feb 2026 14:32:04 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | Signed-off-by: Christian Brauner <brauner@kernel.org>
---
fs/kernfs/inode.c | 75 ++-------------------------------------------
fs/kernfs/kernfs-internal.h | 3 +-
fs/xattr.c | 65 +++++++++++++++++++++++++++++++++++++++
include/linux/kernfs.h | 2 --
include/linux/xattr.h | 18 +++++++++++
5 files changed, 87 insertions(+), 76 deletions(-)
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index dfc3315b5afc..1de10500842d 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -45,8 +45,7 @@ static struct kernfs_iattrs *__kernfs_iattrs(struct kernfs_node *kn, bool alloc)
ret->ia_mtime = ret->ia_atime;
ret->ia_ctime = ret->ia_atime;
- atomic_set(&ret->nr_user_xattrs, 0);
- atomic_set(&ret->user_xattr_size, 0);
+ simple_xattr_limits_init(&ret->xattr_limits);
/* If someone raced us, recognize it. */
if (!try_cmpxchg(&kn->iattr, &attr, ret))
@@ -355,69 +354,6 @@ static int kernfs_vfs_xattr_set(const struct xattr_handler *handler,
return kernfs_xattr_set(kn, name, value, size, flags);
}
-static int kernfs_vfs_user_xattr_add(struct kernfs_node *kn,
- const char *full_name,
- struct simple_xattrs *xattrs,
- const void *value, size_t size, int flags)
-{
- struct kernfs_iattrs *attr = kernfs_iattrs_noalloc(kn);
- atomic_t *sz = &attr->user_xattr_size;
- atomic_t *nr = &attr->nr_user_xattrs;
- struct simple_xattr *old_xattr;
- int ret;
-
- if (atomic_inc_return(nr) > KERNFS_MAX_USER_XATTRS) {
- ret = -ENOSPC;
- goto dec_count_out;
- }
-
- if (atomic_add_return(size, sz) > KERNFS_USER_XATTR_SIZE_LIMIT) {
- ret = -ENOSPC;
- goto dec_size_out;
- }
-
- old_xattr = simple_xattr_set(xattrs, full_name, value, size, flags);
- if (!old_xattr)
- return 0;
-
- if (IS_ERR(old_xattr)) {
- ret = PTR_ERR(old_xattr);
- goto dec_size_out;
- }
-
- ret = 0;
- size = old_xattr->size;
- simple_xattr_free_rcu(old_xattr);
-dec_size_out:
- atomic_sub(size, sz);
-dec_count_out:
- atomic_dec(nr);
- return ret;
-}
-
-static int kernfs_vfs_user_xattr_rm(struct kernfs_node *kn,
- const char *full_name,
- struct simple_xattrs *xattrs,
- const void *value, size_t size, int flags)
-{
- struct kernfs_iattrs *attr = kernfs_iattrs_noalloc(kn);
- atomic_t *sz = &attr->user_xattr_size;
- atomic_t *nr = &attr->nr_user_xattrs;
- struct simple_xattr *old_xattr;
-
- old_xattr = simple_xattr_set(xattrs, full_name, value, size, flags);
- if (!old_xattr)
- return 0;
-
- if (IS_ERR(old_xattr))
- return PTR_ERR(old_xattr);
-
- atomic_sub(old_xattr->size, sz);
- atomic_dec(nr);
- simple_xattr_free_rcu(old_xattr);
- return 0;
-}
-
static int kernfs_vfs_user_xattr_set(const struct xattr_handler *handler,
struct mnt_idmap *idmap,
struct dentry *unused, struct inode *inode,
@@ -440,13 +376,8 @@ static int kernfs_vfs_user_xattr_set(const struct xattr_handler *handler,
if (IS_ERR_OR_NULL(xattrs))
return PTR_ERR(xattrs);
- if (value)
- return kernfs_vfs_user_xattr_add(kn, full_name, xattrs,
- value, size, flags);
- else
- return kernfs_vfs_user_xattr_rm(kn, full_name, xattrs,
- value, size, flags);
-
+ return simple_xattr_set_limited(xattrs, &attrs->xattr_limits,
+ full_name, value, size, flags);
}
static const struct xattr_handler kernfs_trusted_xattr_handler = {
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index 1324ed8c0661..1d3831e3a270 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -27,8 +27,7 @@ struct kernfs_iattrs {
struct timespec64 ia_ctime;
struct simple_xattrs *xattrs;
- atomic_t nr_user_xattrs;
- atomic_t user_xattr_size;
+ struct simple_xattr_limits xattr_limits;
};
struct kernfs_root {
diff --git a/fs/xattr.c b/fs/xattr.c
index 328ed7558dfc..5e559b1c651f 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -1427,6 +1427,71 @@ struct simple_xattr *simple_xattr_set(struct simple_xattrs *xattrs,
return old_xattr;
}
+static inline void simple_xattr_limits_dec(struct simple_xattr_limits *limits,
+ size_t size)
+{
+ atomic_sub(size, &limits->xattr_size);
+ atomic_dec(&limits->nr_xattrs);
+}
+
+static inline int simple_xattr_limits_inc(struct simple_xattr_limits *limits,
+ size_t size)
+{
+ if (atomic_inc_return(&limits->nr_xattrs) > SIMPLE_XATTR_MAX_NR) {
+ atomic_dec(&limits->nr_xattrs);
+ return -ENOSPC;
+ }
+
+ if (atomic_add_return(size, &limits->xattr_size) <= SIMPLE_XATTR_MAX_SIZE)
+ return 0;
+
+ simple_xattr_limits_dec(limits, size);
+ return -ENOSPC;
+}
+
+/**
+ * simple_xattr_set_limited - set an xattr with per-inode user.* limits
+ * @xattrs: the header of the xattr object
+ * @limits: per-inode limit counters for user.* xattrs
+ * @name: the name of the xattr to set or remove
+ * @value: the value to store (NULL to remove)
+ * @size: the size of @value
+ * @flags: XATTR_CREATE, XATTR_REPLACE, or 0
+ *
+ * Like simple_xattr_set(), but enforces per-inode count and total value size
+ * limits for user.* xattrs. Uses speculative pre-increment of the atomic
+ * counters to avoid races without requiring external locks.
+ *
+ * Return: On success zero is returned. On failure a negative error code is
+ * returned.
+ */
+int simple_xattr_set_limited(struct simple_xattrs *xattrs,
+ struct simple_xattr_limits *limits,
+ const char *name, const void *value,
+ size_t size, int flags)
+{
+ struct simple_xattr *old_xattr;
+ int ret;
+
+ if (value) {
+ ret = simple_xattr_limits_inc(limits, size);
+ if (ret)
+ return ret;
+ }
+
+ old_xattr = simple_xattr_set(xattrs, name, value, size, flags);
+ if (IS_ERR(old_xattr)) {
+ if (value)
+ simple_xattr_limits_dec(limits, size);
+ return PTR_ERR(old_xattr);
+ }
+ if (old_xattr) {
+ simple_xattr_limits_dec(limits, old_xattr->size);
+ simple_xattr_free_rcu(old_xattr);
+ }
+ return 0;
+}
+
static bool xattr_is_trusted(const char *name)
{
return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index b5a5f32fdfd1..d8f57f0af5e4 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -99,8 +99,6 @@ enum kernfs_node_type {
#define KERNFS_TYPE_MASK 0x000f
#define KERNFS_FLAG_MASK ~KERNFS_TYPE_MASK
-#define KERNFS_MAX_USER_XATTRS 128
-#define KERNFS_USER_XATTR_SIZE_LIMIT (128 << 10)
enum kernfs_node_flag {
KERNFS_ACTIVATED = 0x0010,
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index f60357d9f938..90a43a117127 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -118,6 +118,20 @@ struct simple_xattr {
char value[];
};
+#define SIMPLE_XATTR_MAX_NR 128
+#define SIMPLE_XATTR_MAX_SIZE (128 << 10)
+
+struct simple_xattr_limits {
+ atomic_t nr_xattrs; /* current user.* xattr count */
+ atomic_t xattr_size; /* current total user.* value bytes */
+};
+
+static inline void simple_xattr_limits_init(struct simple_xattr_limits *limits)
+{
+ atomic_set(&limits->nr_xattrs, 0);
+ atomic_set(&limits->xattr_size, 0);
+}
+
int simple_xattrs_init(struct simple_xattrs *xattrs);
struct simple_xattrs *simple_xattrs_alloc(void);
struct simple_xattrs *simple_xattrs_lazy_alloc(struct simple_xattrs **xattrsp,
@@ -132,6 +146,10 @@ int simple_xattr_get(struct simple_xattrs *xattrs, const char *name,
struct simple_xattr *simple_xattr_set(struct simple_xattrs *xattrs,
const char *name, const void *value,
size_t size, int flags);
+int simple_xattr_set_limited(struct simple_xattrs *xattrs,
+ struct simple_xattr_limits *limits,
+ const char *name, const void *value,
+ size_t size, int flags);
ssize_t simple_xattr_list(struct inode *inode, struct simple_xattrs *xattrs,
char *buffer, size_t size);
int simple_xattr_add(struct simple_xattrs *xattrs,
--
2.47.3 | {
"author": "Christian Brauner <brauner@kernel.org>",
"date": "Mon, 16 Feb 2026 14:32:05 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | Now that we've generalized the infrastructure for user.* xattrs make it
possible to set up to 128 user.* extended attributes on a sockfs inode
or up to 128kib. kernfs (cgroupfs) has the same limits and it has proven
to be quite sufficient for nearly all use-cases.
This will allow containers to label sockets and will e.g., be used by
systemd and Gnome to find various sockets in containers where
high-privilege or more complicated solutions aren't available.
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
net/socket.c | 119 +++++++++++++++++++++++++++++++++++++++++++++--------------
1 file changed, 92 insertions(+), 27 deletions(-)
diff --git a/net/socket.c b/net/socket.c
index 136b98c54fb3..7aa94fce7a8b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -315,45 +315,70 @@ static int move_addr_to_user(struct sockaddr_storage *kaddr, int klen,
static struct kmem_cache *sock_inode_cachep __ro_after_init;
+struct sockfs_inode {
+ struct simple_xattrs *xattrs;
+ struct simple_xattr_limits xattr_limits;
+ struct socket_alloc;
+};
+
+static struct sockfs_inode *SOCKFS_I(struct inode *inode)
+{
+ return container_of(inode, struct sockfs_inode, vfs_inode);
+}
+
static struct inode *sock_alloc_inode(struct super_block *sb)
{
- struct socket_alloc *ei;
+ struct sockfs_inode *si;
- ei = alloc_inode_sb(sb, sock_inode_cachep, GFP_KERNEL);
- if (!ei)
+ si = alloc_inode_sb(sb, sock_inode_cachep, GFP_KERNEL);
+ if (!si)
return NULL;
- init_waitqueue_head(&ei->socket.wq.wait);
- ei->socket.wq.fasync_list = NULL;
- ei->socket.wq.flags = 0;
+ si->xattrs = NULL;
+ simple_xattr_limits_init(&si->xattr_limits);
+
+ init_waitqueue_head(&si->socket.wq.wait);
+ si->socket.wq.fasync_list = NULL;
+ si->socket.wq.flags = 0;
+
+ si->socket.state = SS_UNCONNECTED;
+ si->socket.flags = 0;
+ si->socket.ops = NULL;
+ si->socket.sk = NULL;
+ si->socket.file = NULL;
- ei->socket.state = SS_UNCONNECTED;
- ei->socket.flags = 0;
- ei->socket.ops = NULL;
- ei->socket.sk = NULL;
- ei->socket.file = NULL;
+ return &si->vfs_inode;
+}
+
+static void sock_evict_inode(struct inode *inode)
+{
+ struct sockfs_inode *si = SOCKFS_I(inode);
+ struct simple_xattrs *xattrs = si->xattrs;
- return &ei->vfs_inode;
+ if (xattrs) {
+ simple_xattrs_free(xattrs, NULL);
+ kfree(xattrs);
+ }
+ clear_inode(inode);
}
static void sock_free_inode(struct inode *inode)
{
- struct socket_alloc *ei;
+ struct sockfs_inode *si = SOCKFS_I(inode);
- ei = container_of(inode, struct socket_alloc, vfs_inode);
- kmem_cache_free(sock_inode_cachep, ei);
+ kmem_cache_free(sock_inode_cachep, si);
}
static void init_once(void *foo)
{
- struct socket_alloc *ei = (struct socket_alloc *)foo;
+ struct sockfs_inode *si = (struct sockfs_inode *)foo;
- inode_init_once(&ei->vfs_inode);
+ inode_init_once(&si->vfs_inode);
}
static void init_inodecache(void)
{
sock_inode_cachep = kmem_cache_create("sock_inode_cache",
- sizeof(struct socket_alloc),
+ sizeof(struct sockfs_inode),
0,
(SLAB_HWCACHE_ALIGN |
SLAB_RECLAIM_ACCOUNT |
@@ -365,6 +390,7 @@ static void init_inodecache(void)
static const struct super_operations sockfs_ops = {
.alloc_inode = sock_alloc_inode,
.free_inode = sock_free_inode,
+ .evict_inode = sock_evict_inode,
.statfs = simple_statfs,
};
@@ -417,9 +443,48 @@ static const struct xattr_handler sockfs_security_xattr_handler = {
.set = sockfs_security_xattr_set,
};
+static int sockfs_user_xattr_get(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *suffix, void *value, size_t size)
+{
+ const char *name = xattr_full_name(handler, suffix);
+ struct simple_xattrs *xattrs;
+
+ xattrs = READ_ONCE(SOCKFS_I(inode)->xattrs);
+ if (!xattrs)
+ return -ENODATA;
+
+ return simple_xattr_get(xattrs, name, value, size);
+}
+
+static int sockfs_user_xattr_set(const struct xattr_handler *handler,
+ struct mnt_idmap *idmap,
+ struct dentry *dentry, struct inode *inode,
+ const char *suffix, const void *value,
+ size_t size, int flags)
+{
+ const char *name = xattr_full_name(handler, suffix);
+ struct sockfs_inode *si = SOCKFS_I(inode);
+ struct simple_xattrs *xattrs;
+
+ xattrs = simple_xattrs_lazy_alloc(&si->xattrs, value, flags);
+ if (IS_ERR_OR_NULL(xattrs))
+ return PTR_ERR(xattrs);
+
+ return simple_xattr_set_limited(xattrs, &si->xattr_limits,
+ name, value, size, flags);
+}
+
+static const struct xattr_handler sockfs_user_xattr_handler = {
+ .prefix = XATTR_USER_PREFIX,
+ .get = sockfs_user_xattr_get,
+ .set = sockfs_user_xattr_set,
+};
+
static const struct xattr_handler * const sockfs_xattr_handlers[] = {
&sockfs_xattr_handler,
&sockfs_security_xattr_handler,
+ &sockfs_user_xattr_handler,
NULL
};
@@ -572,26 +637,26 @@ EXPORT_SYMBOL(sockfd_lookup);
static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer,
size_t size)
{
- ssize_t len;
- ssize_t used = 0;
+ struct sockfs_inode *si = SOCKFS_I(d_inode(dentry));
+ ssize_t len, used;
- len = security_inode_listsecurity(d_inode(dentry), buffer, size);
+ len = simple_xattr_list(d_inode(dentry), READ_ONCE(si->xattrs),
+ buffer, size);
if (len < 0)
return len;
- used += len;
+
+ used = len;
if (buffer) {
- if (size < used)
- return -ERANGE;
buffer += len;
+ size -= len;
}
- len = (XATTR_NAME_SOCKPROTONAME_LEN + 1);
+ len = XATTR_NAME_SOCKPROTONAME_LEN + 1;
used += len;
if (buffer) {
- if (size < used)
+ if (size < len)
return -ERANGE;
memcpy(buffer, XATTR_NAME_SOCKPROTONAME, len);
- buffer += len;
}
return used;
--
2.47.3 | {
"author": "Christian Brauner <brauner@kernel.org>",
"date": "Mon, 16 Feb 2026 14:32:06 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | Allow user.* extended attributes on sockets by adding S_IFSOCK to the
xattr_permission() switch statement. Previously user.* xattrs were only
permitted on regular files and directories. Symlinks and special files
including sockets were rejected with -EPERM.
Path-based AF_UNIX sockets have their inodes on the underlying
filesystem (e.g. tmpfs) which already supports user.* xattrs through
simple_xattrs. So for these the permission check was the only thing
missing.
For sockets in sockfs - everything created via socket() including
abstract namespace AF_UNIX sockets - the preceding patch added
simple_xattr storage with per-inode limits. With the permission check
lifted here these sockets can now store user.* xattrs as well.
This enables services to associate metadata with their sockets. For
example, a service using Varlink for IPC can label its socket with
user.varlink=1 allowing eBPF programs to selectively capture traffic
and tools to discover IPC entrypoints by enumerating bound sockets via
netlink. Similarly, protocol negotiation can be performed through xattrs
such as indicating RFC 5424 structured syslog support on /dev/log.
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
fs/xattr.c | 2 ++
1 file changed, 2 insertions(+)
diff --git a/fs/xattr.c b/fs/xattr.c
index 5e559b1c651f..09ecbaaa1660 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -163,6 +163,8 @@ xattr_permission(struct mnt_idmap *idmap, struct inode *inode,
if (inode_owner_or_capable(idmap, inode))
break;
return -EPERM;
+ case S_IFSOCK:
+ break;
default:
return xattr_permission_error(mask);
}
--
2.47.3 | {
"author": "Christian Brauner <brauner@kernel.org>",
"date": "Mon, 16 Feb 2026 14:32:07 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | Test user.* extended attribute operations on path-based Unix domain
sockets (SOCK_STREAM, SOCK_DGRAM, SOCK_SEQPACKET). Path-based sockets
are bound to a filesystem path and their inodes live on the underlying
filesystem (e.g. tmpfs).
Covers set/get/list/remove, persistence, XATTR_CREATE/XATTR_REPLACE
flags, empty values, size queries, buffer-too-small errors, O_PATH fd
operations, and trusted.* xattr handling.
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
.../testing/selftests/filesystems/xattr/.gitignore | 1 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
3 files changed, 477 insertions(+)
diff --git a/tools/testing/selftests/filesystems/xattr/.gitignore b/tools/testing/selftests/filesystems/xattr/.gitignore
new file mode 100644
index 000000000000..5fd015d2257a
--- /dev/null
+++ b/tools/testing/selftests/filesystems/xattr/.gitignore
@@ -0,0 +1 @@
+xattr_socket_test
diff --git a/tools/testing/selftests/filesystems/xattr/Makefile b/tools/testing/selftests/filesystems/xattr/Makefile
new file mode 100644
index 000000000000..e3d8dca80faa
--- /dev/null
+++ b/tools/testing/selftests/filesystems/xattr/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+CFLAGS += $(KHDR_INCLUDES)
+TEST_GEN_PROGS := xattr_socket_test
+
+include ../../lib.mk
diff --git a/tools/testing/selftests/filesystems/xattr/xattr_socket_test.c b/tools/testing/selftests/filesystems/xattr/xattr_socket_test.c
new file mode 100644
index 000000000000..fac0a4c6bc05
--- /dev/null
+++ b/tools/testing/selftests/filesystems/xattr/xattr_socket_test.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2026 Christian Brauner <brauner@kernel.org>
+/*
+ * Test extended attributes on path-based Unix domain sockets.
+ *
+ * Path-based Unix domain sockets are bound to a filesystem path and their
+ * inodes live on the underlying filesystem (e.g. tmpfs). These tests verify
+ * that user.* and trusted.* xattr operations work correctly on them using
+ * path-based syscalls (setxattr, getxattr, etc.).
+ *
+ * Covers SOCK_STREAM, SOCK_DGRAM, and SOCK_SEQPACKET socket types.
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <sys/xattr.h>
+#include <unistd.h>
+
+#include "../../kselftest_harness.h"
+
+#define TEST_XATTR_NAME "user.testattr"
+#define TEST_XATTR_VALUE "testvalue"
+#define TEST_XATTR_VALUE2 "newvalue"
+
+/*
+ * Fixture for path-based Unix domain socket tests.
+ * Creates a SOCK_STREAM socket bound to a path in /tmp (typically tmpfs).
+ */
+FIXTURE(xattr_socket)
+{
+ char socket_path[PATH_MAX];
+ int sockfd;
+};
+
+FIXTURE_VARIANT(xattr_socket)
+{
+ int sock_type;
+ const char *name;
+};
+
+FIXTURE_VARIANT_ADD(xattr_socket, stream) {
+ .sock_type = SOCK_STREAM,
+ .name = "stream",
+};
+
+FIXTURE_VARIANT_ADD(xattr_socket, dgram) {
+ .sock_type = SOCK_DGRAM,
+ .name = "dgram",
+};
+
+FIXTURE_VARIANT_ADD(xattr_socket, seqpacket) {
+ .sock_type = SOCK_SEQPACKET,
+ .name = "seqpacket",
+};
+
+FIXTURE_SETUP(xattr_socket)
+{
+ struct sockaddr_un addr;
+ int ret;
+
+ self->sockfd = -1;
+
+ snprintf(self->socket_path, sizeof(self->socket_path),
+ "/tmp/xattr_socket_test_%s.%d", variant->name, getpid());
+ unlink(self->socket_path);
+
+ self->sockfd = socket(AF_UNIX, variant->sock_type, 0);
+ ASSERT_GE(self->sockfd, 0) {
+ TH_LOG("Failed to create socket: %s", strerror(errno));
+ }
+
+ memset(&addr, 0, sizeof(addr));
+ addr.sun_family = AF_UNIX;
+ strncpy(addr.sun_path, self->socket_path, sizeof(addr.sun_path) - 1);
+
+ ret = bind(self->sockfd, (struct sockaddr *)&addr, sizeof(addr));
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("Failed to bind socket to %s: %s",
+ self->socket_path, strerror(errno));
+ }
+}
+
+FIXTURE_TEARDOWN(xattr_socket)
+{
+ if (self->sockfd >= 0)
+ close(self->sockfd);
+ unlink(self->socket_path);
+}
+
+TEST_F(xattr_socket, set_user_xattr)
+{
+ int ret;
+
+ ret = setxattr(self->socket_path, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("setxattr failed: %s (errno=%d)", strerror(errno), errno);
+ }
+}
+
+TEST_F(xattr_socket, get_user_xattr)
+{
+ char buf[256];
+ ssize_t ret;
+
+ ret = setxattr(self->socket_path, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("setxattr failed: %s", strerror(errno));
+ }
+
+ memset(buf, 0, sizeof(buf));
+ ret = getxattr(self->socket_path, TEST_XATTR_NAME, buf, sizeof(buf));
+ ASSERT_EQ(ret, (ssize_t)strlen(TEST_XATTR_VALUE)) {
+ TH_LOG("getxattr returned %zd, expected %zu: %s",
+ ret, strlen(TEST_XATTR_VALUE), strerror(errno));
+ }
+ ASSERT_STREQ(buf, TEST_XATTR_VALUE);
+}
+
+TEST_F(xattr_socket, list_user_xattr)
+{
+ char list[1024];
+ ssize_t ret;
+ bool found = false;
+ char *ptr;
+
+ ret = setxattr(self->socket_path, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("setxattr failed: %s", strerror(errno));
+ }
+
+ memset(list, 0, sizeof(list));
+ ret = listxattr(self->socket_path, list, sizeof(list));
+ ASSERT_GT(ret, 0) {
+ TH_LOG("listxattr failed: %s", strerror(errno));
+ }
+
+ for (ptr = list; ptr < list + ret; ptr += strlen(ptr) + 1) {
+ if (strcmp(ptr, TEST_XATTR_NAME) == 0) {
+ found = true;
+ break;
+ }
+ }
+ ASSERT_TRUE(found) {
+ TH_LOG("xattr %s not found in list", TEST_XATTR_NAME);
+ }
+}
+
+TEST_F(xattr_socket, remove_user_xattr)
+{
+ char buf[256];
+ ssize_t ret;
+
+ ret = setxattr(self->socket_path, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("setxattr failed: %s", strerror(errno));
+ }
+
+ ret = removexattr(self->socket_path, TEST_XATTR_NAME);
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("removexattr failed: %s", strerror(errno));
+ }
+
+ ret = getxattr(self->socket_path, TEST_XATTR_NAME, buf, sizeof(buf));
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, ENODATA) {
+ TH_LOG("Expected ENODATA, got %s", strerror(errno));
+ }
+}
+
+/*
+ * Test that xattrs persist across socket close and reopen.
+ * The xattr is on the filesystem inode, not the socket fd.
+ */
+TEST_F(xattr_socket, xattr_persistence)
+{
+ char buf[256];
+ ssize_t ret;
+
+ ret = setxattr(self->socket_path, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("setxattr failed: %s", strerror(errno));
+ }
+
+ close(self->sockfd);
+ self->sockfd = -1;
+
+ memset(buf, 0, sizeof(buf));
+ ret = getxattr(self->socket_path, TEST_XATTR_NAME, buf, sizeof(buf));
+ ASSERT_EQ(ret, (ssize_t)strlen(TEST_XATTR_VALUE)) {
+ TH_LOG("getxattr after close failed: %s", strerror(errno));
+ }
+ ASSERT_STREQ(buf, TEST_XATTR_VALUE);
+}
+
+TEST_F(xattr_socket, update_user_xattr)
+{
+ char buf[256];
+ ssize_t ret;
+
+ ret = setxattr(self->socket_path, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0);
+
+ ret = setxattr(self->socket_path, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE2, strlen(TEST_XATTR_VALUE2), 0);
+ ASSERT_EQ(ret, 0);
+
+ memset(buf, 0, sizeof(buf));
+ ret = getxattr(self->socket_path, TEST_XATTR_NAME, buf, sizeof(buf));
+ ASSERT_EQ(ret, (ssize_t)strlen(TEST_XATTR_VALUE2));
+ ASSERT_STREQ(buf, TEST_XATTR_VALUE2);
+}
+
+TEST_F(xattr_socket, xattr_create_flag)
+{
+ int ret;
+
+ ret = setxattr(self->socket_path, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0);
+
+ ret = setxattr(self->socket_path, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE2, strlen(TEST_XATTR_VALUE2), XATTR_CREATE);
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, EEXIST);
+}
+
+TEST_F(xattr_socket, xattr_replace_flag)
+{
+ int ret;
+
+ ret = setxattr(self->socket_path, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), XATTR_REPLACE);
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, ENODATA);
+}
+
+TEST_F(xattr_socket, multiple_xattrs)
+{
+ char buf[256];
+ ssize_t ret;
+ int i;
+ char name[64], value[64];
+ const int num_xattrs = 5;
+
+ for (i = 0; i < num_xattrs; i++) {
+ snprintf(name, sizeof(name), "user.test%d", i);
+ snprintf(value, sizeof(value), "value%d", i);
+ ret = setxattr(self->socket_path, name, value, strlen(value), 0);
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("setxattr %s failed: %s", name, strerror(errno));
+ }
+ }
+
+ for (i = 0; i < num_xattrs; i++) {
+ snprintf(name, sizeof(name), "user.test%d", i);
+ snprintf(value, sizeof(value), "value%d", i);
+ memset(buf, 0, sizeof(buf));
+ ret = getxattr(self->socket_path, name, buf, sizeof(buf));
+ ASSERT_EQ(ret, (ssize_t)strlen(value));
+ ASSERT_STREQ(buf, value);
+ }
+}
+
+TEST_F(xattr_socket, xattr_empty_value)
+{
+ char buf[256];
+ ssize_t ret;
+
+ ret = setxattr(self->socket_path, TEST_XATTR_NAME, "", 0, 0);
+ ASSERT_EQ(ret, 0);
+
+ ret = getxattr(self->socket_path, TEST_XATTR_NAME, buf, sizeof(buf));
+ ASSERT_EQ(ret, 0);
+}
+
+TEST_F(xattr_socket, xattr_get_size)
+{
+ ssize_t ret;
+
+ ret = setxattr(self->socket_path, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0);
+
+ ret = getxattr(self->socket_path, TEST_XATTR_NAME, NULL, 0);
+ ASSERT_EQ(ret, (ssize_t)strlen(TEST_XATTR_VALUE));
+}
+
+TEST_F(xattr_socket, xattr_buffer_too_small)
+{
+ char buf[2];
+ ssize_t ret;
+
+ ret = setxattr(self->socket_path, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0);
+
+ ret = getxattr(self->socket_path, TEST_XATTR_NAME, buf, sizeof(buf));
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, ERANGE);
+}
+
+TEST_F(xattr_socket, xattr_nonexistent)
+{
+ char buf[256];
+ ssize_t ret;
+
+ ret = getxattr(self->socket_path, "user.nonexistent", buf, sizeof(buf));
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, ENODATA);
+}
+
+TEST_F(xattr_socket, remove_nonexistent_xattr)
+{
+ int ret;
+
+ ret = removexattr(self->socket_path, "user.nonexistent");
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, ENODATA);
+}
+
+TEST_F(xattr_socket, large_xattr_value)
+{
+ char large_value[4096];
+ char read_buf[4096];
+ ssize_t ret;
+
+ memset(large_value, 'A', sizeof(large_value));
+
+ ret = setxattr(self->socket_path, TEST_XATTR_NAME,
+ large_value, sizeof(large_value), 0);
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("setxattr with large value failed: %s", strerror(errno));
+ }
+
+ memset(read_buf, 0, sizeof(read_buf));
+ ret = getxattr(self->socket_path, TEST_XATTR_NAME,
+ read_buf, sizeof(read_buf));
+ ASSERT_EQ(ret, (ssize_t)sizeof(large_value));
+ ASSERT_EQ(memcmp(large_value, read_buf, sizeof(large_value)), 0);
+}
+
+/*
+ * Test lsetxattr/lgetxattr (don't follow symlinks).
+ * Socket files aren't symlinks, so this should work the same.
+ */
+TEST_F(xattr_socket, lsetxattr_lgetxattr)
+{
+ char buf[256];
+ ssize_t ret;
+
+ ret = lsetxattr(self->socket_path, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("lsetxattr failed: %s", strerror(errno));
+ }
+
+ memset(buf, 0, sizeof(buf));
+ ret = lgetxattr(self->socket_path, TEST_XATTR_NAME, buf, sizeof(buf));
+ ASSERT_EQ(ret, (ssize_t)strlen(TEST_XATTR_VALUE));
+ ASSERT_STREQ(buf, TEST_XATTR_VALUE);
+}
+
+/*
+ * Fixture for trusted.* xattr tests.
+ * These require CAP_SYS_ADMIN.
+ */
+FIXTURE(xattr_socket_trusted)
+{
+ char socket_path[PATH_MAX];
+ int sockfd;
+};
+
+FIXTURE_VARIANT(xattr_socket_trusted)
+{
+ int sock_type;
+ const char *name;
+};
+
+FIXTURE_VARIANT_ADD(xattr_socket_trusted, stream) {
+ .sock_type = SOCK_STREAM,
+ .name = "stream",
+};
+
+FIXTURE_VARIANT_ADD(xattr_socket_trusted, dgram) {
+ .sock_type = SOCK_DGRAM,
+ .name = "dgram",
+};
+
+FIXTURE_VARIANT_ADD(xattr_socket_trusted, seqpacket) {
+ .sock_type = SOCK_SEQPACKET,
+ .name = "seqpacket",
+};
+
+FIXTURE_SETUP(xattr_socket_trusted)
+{
+ struct sockaddr_un addr;
+ int ret;
+
+ self->sockfd = -1;
+
+ snprintf(self->socket_path, sizeof(self->socket_path),
+ "/tmp/xattr_socket_trusted_%s.%d", variant->name, getpid());
+ unlink(self->socket_path);
+
+ self->sockfd = socket(AF_UNIX, variant->sock_type, 0);
+ ASSERT_GE(self->sockfd, 0);
+
+ memset(&addr, 0, sizeof(addr));
+ addr.sun_family = AF_UNIX;
+ strncpy(addr.sun_path, self->socket_path, sizeof(addr.sun_path) - 1);
+
+ ret = bind(self->sockfd, (struct sockaddr *)&addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+}
+
+FIXTURE_TEARDOWN(xattr_socket_trusted)
+{
+ if (self->sockfd >= 0)
+ close(self->sockfd);
+ unlink(self->socket_path);
+}
+
+TEST_F(xattr_socket_trusted, set_trusted_xattr)
+{
+ char buf[256];
+ ssize_t len;
+ int ret;
+
+ ret = setxattr(self->socket_path, "trusted.testattr",
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ if (ret == -1 && errno == EPERM)
+ SKIP(return, "Need CAP_SYS_ADMIN for trusted.* xattrs");
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("setxattr trusted.testattr failed: %s", strerror(errno));
+ }
+
+ memset(buf, 0, sizeof(buf));
+ len = getxattr(self->socket_path, "trusted.testattr",
+ buf, sizeof(buf));
+ ASSERT_EQ(len, (ssize_t)strlen(TEST_XATTR_VALUE));
+ ASSERT_STREQ(buf, TEST_XATTR_VALUE);
+}
+
+TEST_F(xattr_socket_trusted, get_trusted_xattr_unprivileged)
+{
+ char buf[256];
+ ssize_t ret;
+
+ ret = getxattr(self->socket_path, "trusted.testattr", buf, sizeof(buf));
+ ASSERT_EQ(ret, -1);
+ ASSERT_TRUE(errno == ENODATA || errno == EPERM) {
+ TH_LOG("Expected ENODATA or EPERM, got %s", strerror(errno));
+ }
+}
+
+TEST_HARNESS_MAIN
--
2.47.3 | {
"author": "Christian Brauner <brauner@kernel.org>",
"date": "Mon, 16 Feb 2026 14:32:08 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | Test user.* extended attribute operations on sockfs sockets. Sockets
created via socket() have their inodes in sockfs, which now supports
user.* xattrs with per-inode limits.
Tests fsetxattr/fgetxattr/flistxattr/fremovexattr operations including
set/get, listing (verifies system.sockprotoname presence), remove,
update, XATTR_CREATE/XATTR_REPLACE flags, empty values, size queries,
and buffer-too-small errors.
Also tests per-inode limit enforcement: maximum 128 xattrs, maximum
128KB total value size, limit recovery after removal, and independent
limits across different sockets.
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
.../testing/selftests/filesystems/xattr/.gitignore | 1 +
tools/testing/selftests/filesystems/xattr/Makefile | 2 +-
.../filesystems/xattr/xattr_sockfs_test.c | 363 +++++++++++++++++++++
3 files changed, 365 insertions(+), 1 deletion(-)
diff --git a/tools/testing/selftests/filesystems/xattr/.gitignore b/tools/testing/selftests/filesystems/xattr/.gitignore
index 5fd015d2257a..00a59c89efab 100644
--- a/tools/testing/selftests/filesystems/xattr/.gitignore
+++ b/tools/testing/selftests/filesystems/xattr/.gitignore
@@ -1 +1,2 @@
xattr_socket_test
+xattr_sockfs_test
diff --git a/tools/testing/selftests/filesystems/xattr/Makefile b/tools/testing/selftests/filesystems/xattr/Makefile
index e3d8dca80faa..2cd722dba47b 100644
--- a/tools/testing/selftests/filesystems/xattr/Makefile
+++ b/tools/testing/selftests/filesystems/xattr/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS += $(KHDR_INCLUDES)
-TEST_GEN_PROGS := xattr_socket_test
+TEST_GEN_PROGS := xattr_socket_test xattr_sockfs_test
include ../../lib.mk
diff --git a/tools/testing/selftests/filesystems/xattr/xattr_sockfs_test.c b/tools/testing/selftests/filesystems/xattr/xattr_sockfs_test.c
new file mode 100644
index 000000000000..b4824b01a86d
--- /dev/null
+++ b/tools/testing/selftests/filesystems/xattr/xattr_sockfs_test.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2026 Christian Brauner <brauner@kernel.org>
+/*
+ * Test extended attributes on sockfs sockets.
+ *
+ * Sockets created via socket() have their inodes in sockfs, which supports
+ * user.* xattrs with per-inode limits: up to 128 xattrs and 128KB total
+ * value size. These tests verify xattr operations via fsetxattr/fgetxattr/
+ * flistxattr/fremovexattr on the socket fd, as well as limit enforcement.
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/xattr.h>
+#include <unistd.h>
+
+#include "../../kselftest_harness.h"
+
+#define TEST_XATTR_NAME "user.testattr"
+#define TEST_XATTR_VALUE "testvalue"
+#define TEST_XATTR_VALUE2 "newvalue"
+
+/* Per-inode limits for user.* xattrs on sockfs (from include/linux/xattr.h) */
+#define SIMPLE_XATTR_MAX_NR 128
+#define SIMPLE_XATTR_MAX_SIZE (128 << 10) /* 128 KB */
+
+#ifndef XATTR_SIZE_MAX
+#define XATTR_SIZE_MAX 65536
+#endif
+
+/*
+ * Fixture for sockfs socket xattr tests.
+ * Creates an AF_UNIX socket (lives in sockfs, not bound to any path).
+ */
+FIXTURE(xattr_sockfs)
+{
+ int sockfd;
+};
+
+FIXTURE_SETUP(xattr_sockfs)
+{
+ self->sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_GE(self->sockfd, 0) {
+ TH_LOG("Failed to create socket: %s", strerror(errno));
+ }
+}
+
+FIXTURE_TEARDOWN(xattr_sockfs)
+{
+ if (self->sockfd >= 0)
+ close(self->sockfd);
+}
+
+TEST_F(xattr_sockfs, set_get_user_xattr)
+{
+ char buf[256];
+ ssize_t ret;
+
+ ret = fsetxattr(self->sockfd, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("fsetxattr failed: %s", strerror(errno));
+ }
+
+ memset(buf, 0, sizeof(buf));
+ ret = fgetxattr(self->sockfd, TEST_XATTR_NAME, buf, sizeof(buf));
+ ASSERT_EQ(ret, (ssize_t)strlen(TEST_XATTR_VALUE)) {
+ TH_LOG("fgetxattr returned %zd: %s", ret, strerror(errno));
+ }
+ ASSERT_STREQ(buf, TEST_XATTR_VALUE);
+}
+
+/*
+ * Test listing xattrs on a sockfs socket.
+ * Should include user.* xattrs and system.sockprotoname.
+ */
+TEST_F(xattr_sockfs, list_user_xattr)
+{
+ char list[4096];
+ ssize_t ret;
+ char *ptr;
+ bool found_user = false;
+ bool found_proto = false;
+
+ ret = fsetxattr(self->sockfd, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("fsetxattr failed: %s", strerror(errno));
+ }
+
+ memset(list, 0, sizeof(list));
+ ret = flistxattr(self->sockfd, list, sizeof(list));
+ ASSERT_GT(ret, 0) {
+ TH_LOG("flistxattr failed: %s", strerror(errno));
+ }
+
+ for (ptr = list; ptr < list + ret; ptr += strlen(ptr) + 1) {
+ if (strcmp(ptr, TEST_XATTR_NAME) == 0)
+ found_user = true;
+ if (strcmp(ptr, "system.sockprotoname") == 0)
+ found_proto = true;
+ }
+ ASSERT_TRUE(found_user) {
+ TH_LOG("user xattr not found in list");
+ }
+ ASSERT_TRUE(found_proto) {
+ TH_LOG("system.sockprotoname not found in list");
+ }
+}
+
+TEST_F(xattr_sockfs, remove_user_xattr)
+{
+ char buf[256];
+ ssize_t ret;
+
+ ret = fsetxattr(self->sockfd, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0);
+
+ ret = fremovexattr(self->sockfd, TEST_XATTR_NAME);
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("fremovexattr failed: %s", strerror(errno));
+ }
+
+ ret = fgetxattr(self->sockfd, TEST_XATTR_NAME, buf, sizeof(buf));
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, ENODATA);
+}
+
+TEST_F(xattr_sockfs, update_user_xattr)
+{
+ char buf[256];
+ ssize_t ret;
+
+ ret = fsetxattr(self->sockfd, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0);
+
+ ret = fsetxattr(self->sockfd, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE2, strlen(TEST_XATTR_VALUE2), 0);
+ ASSERT_EQ(ret, 0);
+
+ memset(buf, 0, sizeof(buf));
+ ret = fgetxattr(self->sockfd, TEST_XATTR_NAME, buf, sizeof(buf));
+ ASSERT_EQ(ret, (ssize_t)strlen(TEST_XATTR_VALUE2));
+ ASSERT_STREQ(buf, TEST_XATTR_VALUE2);
+}
+
+TEST_F(xattr_sockfs, xattr_create_flag)
+{
+ int ret;
+
+ ret = fsetxattr(self->sockfd, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0);
+
+ ret = fsetxattr(self->sockfd, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE2, strlen(TEST_XATTR_VALUE2),
+ XATTR_CREATE);
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, EEXIST);
+}
+
+TEST_F(xattr_sockfs, xattr_replace_flag)
+{
+ int ret;
+
+ ret = fsetxattr(self->sockfd, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE),
+ XATTR_REPLACE);
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, ENODATA);
+}
+
+TEST_F(xattr_sockfs, get_nonexistent)
+{
+ char buf[256];
+ ssize_t ret;
+
+ ret = fgetxattr(self->sockfd, "user.nonexistent", buf, sizeof(buf));
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, ENODATA);
+}
+
+TEST_F(xattr_sockfs, empty_value)
+{
+ ssize_t ret;
+
+ ret = fsetxattr(self->sockfd, TEST_XATTR_NAME, "", 0, 0);
+ ASSERT_EQ(ret, 0);
+
+ ret = fgetxattr(self->sockfd, TEST_XATTR_NAME, NULL, 0);
+ ASSERT_EQ(ret, 0);
+}
+
+TEST_F(xattr_sockfs, get_size)
+{
+ ssize_t ret;
+
+ ret = fsetxattr(self->sockfd, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0);
+
+ ret = fgetxattr(self->sockfd, TEST_XATTR_NAME, NULL, 0);
+ ASSERT_EQ(ret, (ssize_t)strlen(TEST_XATTR_VALUE));
+}
+
+TEST_F(xattr_sockfs, buffer_too_small)
+{
+ char buf[2];
+ ssize_t ret;
+
+ ret = fsetxattr(self->sockfd, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0);
+
+ ret = fgetxattr(self->sockfd, TEST_XATTR_NAME, buf, sizeof(buf));
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, ERANGE);
+}
+
+/*
+ * Test maximum number of user.* xattrs per socket.
+ * The kernel enforces SIMPLE_XATTR_MAX_NR (128), so the 129th should
+ * fail with ENOSPC.
+ */
+TEST_F(xattr_sockfs, max_nr_xattrs)
+{
+ char name[32];
+ int i, ret;
+
+ for (i = 0; i < SIMPLE_XATTR_MAX_NR; i++) {
+ snprintf(name, sizeof(name), "user.test%03d", i);
+ ret = fsetxattr(self->sockfd, name, "v", 1, 0);
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("fsetxattr %s failed at i=%d: %s",
+ name, i, strerror(errno));
+ }
+ }
+
+ ret = fsetxattr(self->sockfd, "user.overflow", "v", 1, 0);
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, ENOSPC) {
+ TH_LOG("Expected ENOSPC for xattr %d, got %s",
+ SIMPLE_XATTR_MAX_NR + 1, strerror(errno));
+ }
+}
+
+/*
+ * Test maximum total value size for user.* xattrs.
+ * The kernel enforces SIMPLE_XATTR_MAX_SIZE (128KB). Individual xattr
+ * values are limited to XATTR_SIZE_MAX (64KB) by the VFS, so we need
+ * at least two xattrs to hit the total limit.
+ */
+TEST_F(xattr_sockfs, max_xattr_size)
+{
+ char *value;
+ int ret;
+
+ value = malloc(XATTR_SIZE_MAX);
+ ASSERT_NE(value, NULL);
+ memset(value, 'A', XATTR_SIZE_MAX);
+
+ /* First 64KB xattr - total = 64KB */
+ ret = fsetxattr(self->sockfd, "user.big1", value, XATTR_SIZE_MAX, 0);
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("first large xattr failed: %s", strerror(errno));
+ }
+
+ /* Second 64KB xattr - total = 128KB (exactly at limit) */
+ ret = fsetxattr(self->sockfd, "user.big2", value, XATTR_SIZE_MAX, 0);
+ free(value);
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("second large xattr failed: %s", strerror(errno));
+ }
+
+ /* Third xattr with 1 byte - total > 128KB, should fail */
+ ret = fsetxattr(self->sockfd, "user.big3", "v", 1, 0);
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, ENOSPC) {
+ TH_LOG("Expected ENOSPC when exceeding size limit, got %s",
+ strerror(errno));
+ }
+}
+
+/*
+ * Test that removing an xattr frees limit space, allowing re-addition.
+ */
+TEST_F(xattr_sockfs, limit_remove_readd)
+{
+ char name[32];
+ int i, ret;
+
+ /* Fill up to the maximum count */
+ for (i = 0; i < SIMPLE_XATTR_MAX_NR; i++) {
+ snprintf(name, sizeof(name), "user.test%03d", i);
+ ret = fsetxattr(self->sockfd, name, "v", 1, 0);
+ ASSERT_EQ(ret, 0);
+ }
+
+ /* Verify we're at the limit */
+ ret = fsetxattr(self->sockfd, "user.overflow", "v", 1, 0);
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, ENOSPC);
+
+ /* Remove one xattr */
+ ret = fremovexattr(self->sockfd, "user.test000");
+ ASSERT_EQ(ret, 0);
+
+ /* Now we should be able to add one more */
+ ret = fsetxattr(self->sockfd, "user.newattr", "v", 1, 0);
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("re-add after remove failed: %s", strerror(errno));
+ }
+}
+
+/*
+ * Test that two different sockets have independent xattr limits.
+ */
+TEST_F(xattr_sockfs, limits_per_inode)
+{
+ char buf[256];
+ int sock2;
+ ssize_t ret;
+
+ sock2 = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_GE(sock2, 0);
+
+ /* Set xattr on first socket */
+ ret = fsetxattr(self->sockfd, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0);
+
+ /* First socket's xattr should not be visible on second socket */
+ ret = fgetxattr(sock2, TEST_XATTR_NAME, NULL, 0);
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, ENODATA);
+
+ /* Second socket should independently accept xattrs */
+ ret = fsetxattr(sock2, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE2, strlen(TEST_XATTR_VALUE2), 0);
+ ASSERT_EQ(ret, 0);
+
+ /* Verify each socket has its own value */
+ memset(buf, 0, sizeof(buf));
+ ret = fgetxattr(self->sockfd, TEST_XATTR_NAME, buf, sizeof(buf));
+ ASSERT_EQ(ret, (ssize_t)strlen(TEST_XATTR_VALUE));
+ ASSERT_STREQ(buf, TEST_XATTR_VALUE);
+
+ memset(buf, 0, sizeof(buf));
+ ret = fgetxattr(sock2, TEST_XATTR_NAME, buf, sizeof(buf));
+ ASSERT_EQ(ret, (ssize_t)strlen(TEST_XATTR_VALUE2));
+ ASSERT_STREQ(buf, TEST_XATTR_VALUE2);
+
+ close(sock2);
+}
+
+TEST_HARNESS_MAIN
--
2.47.3 | {
"author": "Christian Brauner <brauner@kernel.org>",
"date": "Mon, 16 Feb 2026 14:32:09 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | Test user.* xattr operations on sockets from different address families:
AF_INET, AF_INET6, AF_NETLINK, and AF_PACKET. All socket types use
sockfs for their inodes, so user.* xattrs should work regardless of
address family.
Each fixture creates a socket (no bind needed) and verifies the full
fsetxattr/fgetxattr/flistxattr/fremovexattr cycle. AF_INET6 skips if
not supported; AF_PACKET skips if CAP_NET_RAW is unavailable.
Also tests abstract namespace AF_UNIX sockets, which live in sockfs
(not on a filesystem) and should support user.* xattrs.
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
.../testing/selftests/filesystems/xattr/.gitignore | 1 +
tools/testing/selftests/filesystems/xattr/Makefile | 2 +-
.../filesystems/xattr/xattr_socket_types_test.c | 177 +++++++++++++++++++++
3 files changed, 179 insertions(+), 1 deletion(-)
diff --git a/tools/testing/selftests/filesystems/xattr/.gitignore b/tools/testing/selftests/filesystems/xattr/.gitignore
index 00a59c89efab..092d14094c0f 100644
--- a/tools/testing/selftests/filesystems/xattr/.gitignore
+++ b/tools/testing/selftests/filesystems/xattr/.gitignore
@@ -1,2 +1,3 @@
xattr_socket_test
xattr_sockfs_test
+xattr_socket_types_test
diff --git a/tools/testing/selftests/filesystems/xattr/Makefile b/tools/testing/selftests/filesystems/xattr/Makefile
index 2cd722dba47b..95364ffb10e9 100644
--- a/tools/testing/selftests/filesystems/xattr/Makefile
+++ b/tools/testing/selftests/filesystems/xattr/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS += $(KHDR_INCLUDES)
-TEST_GEN_PROGS := xattr_socket_test xattr_sockfs_test
+TEST_GEN_PROGS := xattr_socket_test xattr_sockfs_test xattr_socket_types_test
include ../../lib.mk
diff --git a/tools/testing/selftests/filesystems/xattr/xattr_socket_types_test.c b/tools/testing/selftests/filesystems/xattr/xattr_socket_types_test.c
new file mode 100644
index 000000000000..bfabe91b2ed1
--- /dev/null
+++ b/tools/testing/selftests/filesystems/xattr/xattr_socket_types_test.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2026 Christian Brauner <brauner@kernel.org>
+/*
+ * Test user.* xattrs on various socket families.
+ *
+ * All socket types use sockfs for their inodes, so user.* xattrs should
+ * work on any socket regardless of address family. This tests AF_INET,
+ * AF_INET6, AF_NETLINK, AF_PACKET, and abstract namespace AF_UNIX sockets.
+ */
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <sys/xattr.h>
+#include <linux/netlink.h>
+#include <unistd.h>
+
+#include "../../kselftest_harness.h"
+
+#define TEST_XATTR_NAME "user.testattr"
+#define TEST_XATTR_VALUE "testvalue"
+
+FIXTURE(xattr_socket_types)
+{
+ int sockfd;
+};
+
+FIXTURE_VARIANT(xattr_socket_types)
+{
+ int family;
+ int type;
+ int protocol;
+};
+
+FIXTURE_VARIANT_ADD(xattr_socket_types, inet) {
+ .family = AF_INET,
+ .type = SOCK_STREAM,
+ .protocol = 0,
+};
+
+FIXTURE_VARIANT_ADD(xattr_socket_types, inet6) {
+ .family = AF_INET6,
+ .type = SOCK_STREAM,
+ .protocol = 0,
+};
+
+FIXTURE_VARIANT_ADD(xattr_socket_types, netlink) {
+ .family = AF_NETLINK,
+ .type = SOCK_RAW,
+ .protocol = NETLINK_USERSOCK,
+};
+
+FIXTURE_VARIANT_ADD(xattr_socket_types, packet) {
+ .family = AF_PACKET,
+ .type = SOCK_DGRAM,
+ .protocol = 0,
+};
+
+FIXTURE_SETUP(xattr_socket_types)
+{
+ self->sockfd = socket(variant->family, variant->type,
+ variant->protocol);
+ if (self->sockfd < 0 &&
+ (errno == EAFNOSUPPORT || errno == EPERM || errno == EACCES))
+ SKIP(return, "socket(%d, %d, %d) not available: %s",
+ variant->family, variant->type, variant->protocol,
+ strerror(errno));
+ ASSERT_GE(self->sockfd, 0) {
+ TH_LOG("Failed to create socket(%d, %d, %d): %s",
+ variant->family, variant->type, variant->protocol,
+ strerror(errno));
+ }
+}
+
+FIXTURE_TEARDOWN(xattr_socket_types)
+{
+ if (self->sockfd >= 0)
+ close(self->sockfd);
+}
+
+TEST_F(xattr_socket_types, set_get_list_remove)
+{
+ char buf[256], list[4096], *ptr;
+ ssize_t ret;
+ bool found;
+
+ ret = fsetxattr(self->sockfd, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("fsetxattr failed: %s", strerror(errno));
+ }
+
+ memset(buf, 0, sizeof(buf));
+ ret = fgetxattr(self->sockfd, TEST_XATTR_NAME, buf, sizeof(buf));
+ ASSERT_EQ(ret, (ssize_t)strlen(TEST_XATTR_VALUE));
+ ASSERT_STREQ(buf, TEST_XATTR_VALUE);
+
+ memset(list, 0, sizeof(list));
+ ret = flistxattr(self->sockfd, list, sizeof(list));
+ ASSERT_GT(ret, 0);
+ found = false;
+ for (ptr = list; ptr < list + ret; ptr += strlen(ptr) + 1) {
+ if (strcmp(ptr, TEST_XATTR_NAME) == 0)
+ found = true;
+ }
+ ASSERT_TRUE(found);
+
+ ret = fremovexattr(self->sockfd, TEST_XATTR_NAME);
+ ASSERT_EQ(ret, 0);
+
+ ret = fgetxattr(self->sockfd, TEST_XATTR_NAME, buf, sizeof(buf));
+ ASSERT_EQ(ret, -1);
+ ASSERT_EQ(errno, ENODATA);
+}
+
+/*
+ * Test abstract namespace AF_UNIX socket.
+ * Abstract sockets don't have a filesystem path; their inodes live in
+ * sockfs so user.* xattrs should work via fsetxattr/fgetxattr.
+ */
+FIXTURE(xattr_abstract)
+{
+ int sockfd;
+};
+
+FIXTURE_SETUP(xattr_abstract)
+{
+ struct sockaddr_un addr;
+ char name[64];
+ int ret, len;
+
+ self->sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
+ ASSERT_GE(self->sockfd, 0);
+
+ len = snprintf(name, sizeof(name), "xattr_test_abstract_%d", getpid());
+
+ memset(&addr, 0, sizeof(addr));
+ addr.sun_family = AF_UNIX;
+ addr.sun_path[0] = '\0';
+ memcpy(&addr.sun_path[1], name, len);
+
+ ret = bind(self->sockfd, (struct sockaddr *)&addr,
+ offsetof(struct sockaddr_un, sun_path) + 1 + len);
+ ASSERT_EQ(ret, 0);
+}
+
+FIXTURE_TEARDOWN(xattr_abstract)
+{
+ if (self->sockfd >= 0)
+ close(self->sockfd);
+}
+
+TEST_F(xattr_abstract, set_get)
+{
+ char buf[256];
+ ssize_t ret;
+
+ ret = fsetxattr(self->sockfd, TEST_XATTR_NAME,
+ TEST_XATTR_VALUE, strlen(TEST_XATTR_VALUE), 0);
+ ASSERT_EQ(ret, 0) {
+ TH_LOG("fsetxattr on abstract socket failed: %s",
+ strerror(errno));
+ }
+
+ memset(buf, 0, sizeof(buf));
+ ret = fgetxattr(self->sockfd, TEST_XATTR_NAME, buf, sizeof(buf));
+ ASSERT_EQ(ret, (ssize_t)strlen(TEST_XATTR_VALUE));
+ ASSERT_STREQ(buf, TEST_XATTR_VALUE);
+}
+
+TEST_HARNESS_MAIN
--
2.47.3 | {
"author": "Christian Brauner <brauner@kernel.org>",
"date": "Mon, 16 Feb 2026 14:32:10 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Mon, Feb 16, 2026 at 02:31:56PM +0100, Christian Brauner wrote:
Patches 1-6 look ok to me, at least in the sense that nothing stood out
to me as obviously wrong, so
Acked-by: "Darrick J. Wong" <djwong@kernel.org>
Hum. I suppose there's never going to be a central varlink broker, is
there? That doesn't sound great for discoverability, unless the plan is
to try to concentrate them in (say) /run/varlink? But even then, could
you have N services that share the same otherwise private tmpfs in order
to talk to each other via a varlink socket? I suppose in that case, the
N services probably don't care/want others to discover their socket.
Who gets to set xattrs? Can a malicious varlink socket user who has
connect() abilities also delete user.varlink to mess with everyone who
comes afterwards?
--D | {
"author": "\"Darrick J. Wong\" <djwong@kernel.org>",
"date": "Thu, 19 Feb 2026 16:44:54 -0800",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Thu, Feb 19, 2026 at 04:44:54PM -0800, Darrick J. Wong wrote:
Varlink was explicitly designed to avoid having to have a broker.
Practically it would have been one option to have a a central registry
maintained as a bpf socket map. My naive take had always been something
like: systemd can have a global socket map. sockets are picked up
whenver the appropriate xattr is set and deleted from the map once the
socket goes away (or the xattr is unset). Right now this is something
that would require capabilities. Once signed bpf is more common it is
easy to load that on per-container basis. But...
... the future is already here :)
https://github.com/systemd/systemd/pull/40590
All public varlink services that are supposed to be announced are now
symlinked into:
/run/varlink/registry
There are of-course non-public interfaces such as the interface
between PID 1 and oomd. Such interfaces are not exposed.
It's also possible to have per user registries at e.g.:
/run/user/1000/varlink/registry/
Such varlink services can now also be listed via:
valinkctl list-services
This then ties very neatly into the varlink bridge we're currently
building:
https://github.com/mvo5/varlink-http-bridge
It takes a directory with varlink sockets (or symlinks to varlink
sockets) like /run/varlink/registry as the argument and will serve
whatever it finds in there. Sockets can be added or removed dynamically
in the dir as needed:
curl -s http://localhost:8080/sockets | jq
{
"sockets": [
"io.systemd.Login",
"io.systemd.Hostname",
"io.systemd.sysext",
"io.systemd.BootControl",
"io.systemd.Import",
"io.systemd.Repart",
"io.systemd.MuteConsole",
"io.systemd.FactoryReset",
"io.systemd.Credentials",
"io.systemd.AskPassword",
"io.systemd.Manager",
"io.systemd.ManagedOOM"
]
}
The xattrs allow to have a completely global view of such services and
the per-user sessions all have their own sub-view.
Yeah sure that's one way.
The main focus is AF_UNIX sockets of course so a varlink service does:
fd = socket(AF_UNIX)
umask(0117);
bind(fd, "/run/foobar");
umask(original_umask);
chown("/run/foobar", -1, MYACCESSGID);
setxattr("/run/foobar", "user.varlink", "1");
For non-path based sockets the inodes for client and server are
inherently distinct so they cannot interfer with each other. But even
then a chmod() + chown(-1, MYACCESSGID) on the sockfs socket fd will
protect this.
Thanks for the review. Please keep going. :) | {
"author": "Christian Brauner <brauner@kernel.org>",
"date": "Fri, 20 Feb 2026 10:23:55 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Mon, Feb 16, 2026 at 02:32:05PM +0100, Christian Brauner wrote:
I know you're just moving code around and that looks ok, but:
I guess this means you can't have more than 128 xattrs total, and
sum(values) must be less than 128k? The fixed limit is a little odd,
but it's all pinned kernel memory, right?
(IOWs, you haven't done anything wild ala xfile.c to make it possible to
swap that out to disk?)
--D | {
"author": "\"Darrick J. Wong\" <djwong@kernel.org>",
"date": "Fri, 20 Feb 2026 16:03:26 -0800",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Fri, Feb 20, 2026 at 10:23:55AM +0100, Christian Brauner wrote:
The rest look fine too, modulo my comments about the fixed limits.
Acked-by: "Darrick J. Wong" <djwong@kernel.org>
--D | {
"author": "\"Darrick J. Wong\" <djwong@kernel.org>",
"date": "Fri, 20 Feb 2026 16:14:57 -0800",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Fri, Feb 20, 2026 at 04:03:26PM -0800, Darrick J. Wong wrote:
Yeah, it's all pinned kernel memory.
First time I've seen that. Very creative. But no, I've not done that.
cgroupfs has been fine with a fixed limit for a long time no so for now
it's fine to assume sockfs will be as well. | {
"author": "Christian Brauner <brauner@kernel.org>",
"date": "Mon, 23 Feb 2026 13:13:13 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Mon 16-02-26 14:31:57, Christian Brauner wrote:
Looks good. Feel free to add:
Reviewed-by: Jan Kara <jack@suse.cz>
Honza
--
Jan Kara <jack@suse.com>
SUSE Labs, CR | {
"author": "Jan Kara <jack@suse.cz>",
"date": "Fri, 27 Feb 2026 15:43:28 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Mon 16-02-26 14:31:58, Christian Brauner wrote:
Looks good. Feel free to add:
Reviewed-by: Jan Kara <jack@suse.cz>
Honza
--
Jan Kara <jack@suse.com>
SUSE Labs, CR | {
"author": "Jan Kara <jack@suse.cz>",
"date": "Fri, 27 Feb 2026 15:43:41 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Mon 16-02-26 14:31:59, Christian Brauner wrote:
Looks good. Feel free to add:
Reviewed-by: Jan Kara <jack@suse.cz>
Honza
--
Jan Kara <jack@suse.com>
SUSE Labs, CR | {
"author": "Jan Kara <jack@suse.cz>",
"date": "Fri, 27 Feb 2026 15:48:40 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Mon 16-02-26 14:32:00, Christian Brauner wrote:
...
This is a slight change in the lifetime rules because previously kernfs
xattrs could be safely accessed only under RCU but after this change you
have to hold inode reference *and* RCU to safely access them. I don't think
anybody would be accessing xattrs without holding inode reference so this
should be safe but it would be good to mention this in the changelog.
Otherwise feel free to add:
Reviewed-by: Jan Kara <jack@suse.cz>
Honza
--
Jan Kara <jack@suse.com>
SUSE Labs, CR | {
"author": "Jan Kara <jack@suse.cz>",
"date": "Fri, 27 Feb 2026 16:00:37 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Mon 16-02-26 14:32:01, Christian Brauner wrote:
One question below:
So you bother with postponing the freeing to a scheduled work because
put_pid() can be called from a context where acquiring rcu to iterate
rhashtable would not be possible? Frankly I have hard time imagining such
context (where previous rbtree code wouldn't have issues as well), in
particular because AFAIR rcu is safe to arbitrarily nest. What am I
missing?
Honza
--
Jan Kara <jack@suse.com>
SUSE Labs, CR | {
"author": "Jan Kara <jack@suse.cz>",
"date": "Fri, 27 Feb 2026 16:09:15 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Mon 16-02-26 14:32:02, Christian Brauner wrote:
Looks good. Feel free to add:
Reviewed-by: Jan Kara <jack@suse.cz>
Honza
--
Jan Kara <jack@suse.com>
SUSE Labs, CR | {
"author": "Jan Kara <jack@suse.cz>",
"date": "Fri, 27 Feb 2026 16:14:34 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Mon 16-02-26 14:32:03, Christian Brauner wrote:
Looks good. Feel free to add:
Reviewed-by: Jan Kara <jack@suse.cz>
Honza
--
Jan Kara <jack@suse.com>
SUSE Labs, CR | {
"author": "Jan Kara <jack@suse.cz>",
"date": "Fri, 27 Feb 2026 16:15:17 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Fri 27-02-26 16:09:15, Jan Kara wrote:
Ah, I've now found out rhashtable_free_and_destroy() can sleep and that's
likely the reason. OK. Feel free to add:
Reviewed-by: Jan Kara <jack@suse.cz>
Honza
--
Jan Kara <jack@suse.com>
SUSE Labs, CR | {
"author": "Jan Kara <jack@suse.cz>",
"date": "Fri, 27 Feb 2026 16:16:04 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Mon 16-02-26 14:32:04, Christian Brauner wrote:
Looks good. Feel free to add:
Reviewed-by: Jan Kara <jack@suse.cz>
Honza
--
Jan Kara <jack@suse.com>
SUSE Labs, CR | {
"author": "Jan Kara <jack@suse.cz>",
"date": "Fri, 27 Feb 2026 16:17:42 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Mon 16-02-26 14:32:05, Christian Brauner wrote:
Looks good. Feel free to add:
Reviewed-by: Jan Kara <jack@suse.cz>
Honza
--
Jan Kara <jack@suse.com>
SUSE Labs, CR | {
"author": "Jan Kara <jack@suse.cz>",
"date": "Fri, 27 Feb 2026 16:20:36 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Mon 16-02-26 14:32:06, Christian Brauner wrote:
Looks good. Feel free to add:
Reviewed-by: Jan Kara <jack@suse.cz>
Honza
--
Jan Kara <jack@suse.com>
SUSE Labs, CR | {
"author": "Jan Kara <jack@suse.cz>",
"date": "Fri, 27 Feb 2026 16:25:14 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Mon 16-02-26 14:32:07, Christian Brauner wrote:
OK. Feel free to add:
Reviewed-by: Jan Kara <jack@suse.cz>
Honza
--
Jan Kara <jack@suse.com>
SUSE Labs, CR | {
"author": "Jan Kara <jack@suse.cz>",
"date": "Fri, 27 Feb 2026 16:26:07 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Mon 16-02-26 14:32:08, Christian Brauner wrote:
Looks good. Feel free to add:
Reviewed-by: Jan Kara <jack@suse.cz>
Honza
--
Jan Kara <jack@suse.com>
SUSE Labs, CR | {
"author": "Jan Kara <jack@suse.cz>",
"date": "Fri, 27 Feb 2026 16:29:08 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Mon 16-02-26 14:32:09, Christian Brauner wrote:
Looks good. Feel free to add:
Reviewed-by: Jan Kara <jack@suse.cz>
Honza
--
Jan Kara <jack@suse.com>
SUSE Labs, CR | {
"author": "Jan Kara <jack@suse.cz>",
"date": "Fri, 27 Feb 2026 16:30:53 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | Hey,
This reworks the simple_xattr infrastructure and adds support for
user.* extended attributes on sockets.
The simple_xattr subsystem currently uses an rbtree protected by a
reader-writer spinlock. This series replaces the rbtree with an
rhashtable giving O(1) average-case lookup with RCU-based lockless
reads. This sped up concurrent access patterns on tmpfs quite a bit and
it's an overall easy enough conversion to do and gets rid or rwlock_t.
The conversion is done incrementally: a new rhashtable path is added
alongside the existing rbtree, consumers are migrated one at a time
(shmem, kernfs, pidfs), and then the rbtree code is removed. All three
consumers switch from embedded structs to pointer-based lazy allocation
so the rhashtable overhead is only paid for inodes that actually use
xattrs.
With this infrastructure in place the series adds support for user.*
xattrs on sockets. Path-based AF_UNIX sockets inherit xattr support
from the underlying filesystem (e.g. tmpfs) but sockets in sockfs -
that is everything created via socket() including abstract namespace
AF_UNIX sockets - had no xattr support at all.
The xattr_permission() checks are reworked to allow user.* xattrs on
S_IFSOCK inodes. Sockfs sockets get per-inode limits of 128 xattrs and
128KB total value size matching the limits already in use for kernfs.
The practical motivation comes from several directions. systemd and
GNOME are expanding their use of Varlink as an IPC mechanism. For D-Bus
there are tools like dbus-monitor that can observe IPC traffic across
the system but this only works because D-Bus has a central broker. For
Varlink there is no broker and there is currently no way to identify
which sockets speak Varlink. With user.* xattrs on sockets a service
can label its socket with the IPC protocol it speaks (e.g.,
user.varlink=1) and an eBPF program can then selectively capture
traffic on those sockets. Enumerating bound sockets via netlink combined
with these xattr labels gives a way to discover all Varlink IPC
entrypoints for debugging and introspection.
Similarly, systemd-journald wants to use xattrs on the /dev/log socket
for protocol negotiation to indicate whether RFC 5424 structured syslog
is supported or whether only the legacy RFC 3164 format should be used.
In containers these labels are particularly useful as high-privilege or
more complicated solutions for socket identification aren't available.
The series comes with comprehensive selftests covering path-based
AF_UNIX sockets, sockfs socket operations, per-inode limit enforcement,
and xattr operations across multiple address families (AF_INET,
AF_INET6, AF_NETLINK, AF_PACKET).
Christian
Signed-off-by: Christian Brauner <brauner@kernel.org>
---
Christian Brauner (14):
xattr: add rcu_head and rhash_head to struct simple_xattr
xattr: add rhashtable-based simple_xattr infrastructure
shmem: adapt to rhashtable-based simple_xattrs with lazy allocation
kernfs: adapt to rhashtable-based simple_xattrs with lazy allocation
pidfs: adapt to rhashtable-based simple_xattrs
xattr: remove rbtree-based simple_xattr infrastructure
xattr: add xattr_permission_error()
xattr: switch xattr_permission() to switch statement
xattr: move user limits for xattrs to generic infra
xattr,net: support limited amount of extended attributes on sockfs sockets
xattr: support extended attributes on sockets
selftests/xattr: path-based AF_UNIX socket xattr tests
selftests/xattr: sockfs socket xattr tests
selftests/xattr: test xattrs on various socket families
fs/kernfs/dir.c | 15 +-
fs/kernfs/inode.c | 99 +----
fs/kernfs/kernfs-internal.h | 5 +-
fs/pidfs.c | 65 +--
fs/xattr.c | 423 +++++++++++++------
include/linux/kernfs.h | 2 -
include/linux/shmem_fs.h | 2 +-
include/linux/xattr.h | 47 ++-
mm/shmem.c | 46 +-
net/socket.c | 119 ++++--
.../testing/selftests/filesystems/xattr/.gitignore | 3 +
tools/testing/selftests/filesystems/xattr/Makefile | 6 +
.../filesystems/xattr/xattr_socket_test.c | 470 +++++++++++++++++++++
.../filesystems/xattr/xattr_socket_types_test.c | 177 ++++++++
.../filesystems/xattr/xattr_sockfs_test.c | 363 ++++++++++++++++
15 files changed, 1547 insertions(+), 295 deletions(-)
---
base-commit: 72c395024dac5e215136cbff793455f065603b06
change-id: 20260211-work-xattr-socket-c85f4d3b8847
| null | null | null | [PATCH 00/14] xattr: rework simple xattrs and support user.*
xattrs on sockets | On Mon 16-02-26 14:32:10, Christian Brauner wrote:
Looks good. Feel free to add:
Reviewed-by: Jan Kara <jack@suse.cz>
Honza
--
Jan Kara <jack@suse.com>
SUSE Labs, CR | {
"author": "Jan Kara <jack@suse.cz>",
"date": "Fri, 27 Feb 2026 16:32:22 +0100",
"is_openbsd": false,
"thread_id": "yqxanlt3h4h2dwtpzgywsrzozdry3oe3c4yg22x6wqm2grntbu@pazi2ufzrwfv.mbox.gz"
} |
lkml_critique | lkml | The GEM MAC provides four read-only, clear-on-read LPI statistics
registers at offsets 0x270-0x27c:
GEM_RXLPI (0x270): RX LPI transition count (16-bit)
GEM_RXLPITIME (0x274): cumulative RX LPI time (24-bit)
GEM_TXLPI (0x278): TX LPI transition count (16-bit)
GEM_TXLPITIME (0x27c): cumulative TX LPI time (24-bit)
Add register offset definitions, extend struct gem_stats with
corresponding u64 software accumulators, and register the four
counters in gem_statistics[] so they appear in ethtool -S output.
Because the hardware counters clear on read, the existing
macb_update_stats() path accumulates them into the u64 fields on
every stats poll, preventing loss between userspace reads.
These registers are present on SAMA5D2, SAME70, PIC32CZ, and RP1
variants of the Cadence GEM IP and have been confirmed on RP1 via
devmem reads.
Reviewed-by: Théo Lebrun <theo.lebrun@bootlin.com>
Signed-off-by: Nicolai Buchwitz <nb@tipi-net.de>
---
drivers/net/ethernet/cadence/macb.h | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 87414a2ddf6e..19aa98d01c8c 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -170,6 +170,10 @@
#define GEM_PCSANNPTX 0x021c /* PCS AN Next Page TX */
#define GEM_PCSANNPLP 0x0220 /* PCS AN Next Page LP */
#define GEM_PCSANEXTSTS 0x023c /* PCS AN Extended Status */
+#define GEM_RXLPI 0x0270 /* RX LPI Transitions */
+#define GEM_RXLPITIME 0x0274 /* RX LPI Time */
+#define GEM_TXLPI 0x0278 /* TX LPI Transitions */
+#define GEM_TXLPITIME 0x027c /* TX LPI Time */
#define GEM_DCFG1 0x0280 /* Design Config 1 */
#define GEM_DCFG2 0x0284 /* Design Config 2 */
#define GEM_DCFG3 0x0288 /* Design Config 3 */
@@ -1043,6 +1047,10 @@ struct gem_stats {
u64 rx_ip_header_checksum_errors;
u64 rx_tcp_checksum_errors;
u64 rx_udp_checksum_errors;
+ u64 rx_lpi_transitions;
+ u64 rx_lpi_time;
+ u64 tx_lpi_transitions;
+ u64 tx_lpi_time;
};
/* Describes the name and offset of an individual statistic register, as
@@ -1142,6 +1150,10 @@ static const struct gem_statistic gem_statistics[] = {
GEM_BIT(NDS_RXERR)),
GEM_STAT_TITLE_BITS(RXUDPCCNT, "rx_udp_checksum_errors",
GEM_BIT(NDS_RXERR)),
+ GEM_STAT_TITLE(RXLPI, "rx_lpi_transitions"),
+ GEM_STAT_TITLE(RXLPITIME, "rx_lpi_time"),
+ GEM_STAT_TITLE(TXLPI, "tx_lpi_transitions"),
+ GEM_STAT_TITLE(TXLPITIME, "tx_lpi_time"),
};
#define GEM_STATS_LEN ARRAY_SIZE(gem_statistics)
--
2.51.0
| null | null | null | [PATCH net-next v5 1/5] net: cadence: macb: add EEE LPI statistics counters | Set MACB_CAPS_EEE for the Raspberry Pi 5 RP1 southbridge
(Cadence GEM_GXL rev 0x00070109 paired with BCM54213PE PHY).
EEE has been verified on RP1 hardware: the LPI counter registers
at 0x270-0x27c return valid data, the TXLPIEN bit in NCR (bit 19)
controls LPI transmission correctly, and ethtool --show-eee reports
the negotiated state after link-up.
Other GEM variants that share the same LPI register layout (SAMA5D2,
SAME70, PIC32CZ) can be enabled by adding MACB_CAPS_EEE to their
respective config entries once tested.
Reviewed-by: Théo Lebrun <theo.lebrun@bootlin.com>
Signed-off-by: Nicolai Buchwitz <nb@tipi-net.de>
---
drivers/net/ethernet/cadence/macb_main.c | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 3e724417d444..0196a13c0688 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -5529,7 +5529,8 @@ static const struct macb_config eyeq5_config = {
static const struct macb_config raspberrypi_rp1_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP,
+ MACB_CAPS_GEM_HAS_PTP |
+ MACB_CAPS_EEE,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
--
2.51.0 | {
"author": "Nicolai Buchwitz <nb@tipi-net.de>",
"date": "Fri, 27 Feb 2026 16:06:09 +0100",
"is_openbsd": false,
"thread_id": "20260227150610.242215-2-nb@tipi-net.de.mbox.gz"
} |
lkml_critique | lkml | The GEM MAC provides four read-only, clear-on-read LPI statistics
registers at offsets 0x270-0x27c:
GEM_RXLPI (0x270): RX LPI transition count (16-bit)
GEM_RXLPITIME (0x274): cumulative RX LPI time (24-bit)
GEM_TXLPI (0x278): TX LPI transition count (16-bit)
GEM_TXLPITIME (0x27c): cumulative TX LPI time (24-bit)
Add register offset definitions, extend struct gem_stats with
corresponding u64 software accumulators, and register the four
counters in gem_statistics[] so they appear in ethtool -S output.
Because the hardware counters clear on read, the existing
macb_update_stats() path accumulates them into the u64 fields on
every stats poll, preventing loss between userspace reads.
These registers are present on SAMA5D2, SAME70, PIC32CZ, and RP1
variants of the Cadence GEM IP and have been confirmed on RP1 via
devmem reads.
Reviewed-by: Théo Lebrun <theo.lebrun@bootlin.com>
Signed-off-by: Nicolai Buchwitz <nb@tipi-net.de>
---
drivers/net/ethernet/cadence/macb.h | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 87414a2ddf6e..19aa98d01c8c 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -170,6 +170,10 @@
#define GEM_PCSANNPTX 0x021c /* PCS AN Next Page TX */
#define GEM_PCSANNPLP 0x0220 /* PCS AN Next Page LP */
#define GEM_PCSANEXTSTS 0x023c /* PCS AN Extended Status */
+#define GEM_RXLPI 0x0270 /* RX LPI Transitions */
+#define GEM_RXLPITIME 0x0274 /* RX LPI Time */
+#define GEM_TXLPI 0x0278 /* TX LPI Transitions */
+#define GEM_TXLPITIME 0x027c /* TX LPI Time */
#define GEM_DCFG1 0x0280 /* Design Config 1 */
#define GEM_DCFG2 0x0284 /* Design Config 2 */
#define GEM_DCFG3 0x0288 /* Design Config 3 */
@@ -1043,6 +1047,10 @@ struct gem_stats {
u64 rx_ip_header_checksum_errors;
u64 rx_tcp_checksum_errors;
u64 rx_udp_checksum_errors;
+ u64 rx_lpi_transitions;
+ u64 rx_lpi_time;
+ u64 tx_lpi_transitions;
+ u64 tx_lpi_time;
};
/* Describes the name and offset of an individual statistic register, as
@@ -1142,6 +1150,10 @@ static const struct gem_statistic gem_statistics[] = {
GEM_BIT(NDS_RXERR)),
GEM_STAT_TITLE_BITS(RXUDPCCNT, "rx_udp_checksum_errors",
GEM_BIT(NDS_RXERR)),
+ GEM_STAT_TITLE(RXLPI, "rx_lpi_transitions"),
+ GEM_STAT_TITLE(RXLPITIME, "rx_lpi_time"),
+ GEM_STAT_TITLE(TXLPI, "tx_lpi_transitions"),
+ GEM_STAT_TITLE(TXLPITIME, "tx_lpi_time"),
};
#define GEM_STATS_LEN ARRAY_SIZE(gem_statistics)
--
2.51.0
| null | null | null | [PATCH net-next v5 1/5] net: cadence: macb: add EEE LPI statistics counters | Add Energy Efficient Ethernet (IEEE 802.3az) support to the Cadence GEM
(macb) driver using phylink's managed EEE framework. The GEM MAC has
hardware LPI registers but no built-in idle timer, so the driver
implements software-managed TX LPI using a delayed_work timer while
delegating EEE negotiation and ethtool state to phylink.
Changes from v4:
- Removed redundant MACB_CAPS_EEE guards from macb_get_eee/set_eee;
phylink already returns -EOPNOTSUPP when lpi_capabilities and
lpi_interfaces are not populated. Based on feedback from Russell King.
- Added patch 5 enabling EEE for Mobileye EyeQ5, tested by Théo Lebrun
using a hardware loopback.
Changes from v3:
- Dropped the register-definitions-only patch; LPI counter offsets
(GEM_RXLPI/RXLPITIME/TXLPI/TXLPITIME) now land in the statistics
patch, and TXLPIEN + MACB_CAPS_EEE are introduced alongside the TX
LPI implementation where they are first used. Series is now 4 patches.
- Add Reviewed-by: Théo Lebrun <theo.lebrun@bootlin.com> to all patches.
- Split chained assignment in macb_tx_lpi_set() (suggested by checkpatch).
Changes from v2:
- macb_tx_lpi_set() now returns bool indicating whether the register
value actually changed, avoiding redundant writes.
- Removed tx_lpi_enabled field from struct macb; LPI state is tracked
entirely within the spinlock-protected register read/modify/write.
- macb_tx_lpi_wake() uses the return value of macb_tx_lpi_set() to
skip the cancel/udelay when TXLPIEN was already clear.
All changes based on feedback from Russell King.
Changes from v1:
- Rewrote to use phylink managed EEE (mac_enable_tx_lpi /
mac_disable_tx_lpi callbacks) instead of the obsolete
phy_init_eee() approach, as recommended by Russell King.
- ethtool get_eee/set_eee are now pure phylink passthroughs.
- Removed all manual EEE state tracking from mac_link_up/down;
phylink handles the lifecycle.
The series is structured as follows:
1. LPI statistics: Expose the four hardware EEE counters (RX/TX LPI
transitions and time) through ethtool -S, accumulated in software
since they are clear-on-read. Adds register offset definitions
GEM_RXLPI/RXLPITIME/TXLPI/TXLPITIME (0x270-0x27c).
2. TX LPI engine: Introduces GEM_TXLPIEN (NCR bit 19) and
MACB_CAPS_EEE alongside the implementation that uses them.
phylink mac_enable_tx_lpi / mac_disable_tx_lpi callbacks with a
delayed_work-based idle timer. LPI entry is deferred 1 second
after link-up per IEEE 802.3az. Wake before transmit with a
conservative 50us PHY wake delay (IEEE 802.3az Tw_sys_tx).
3. ethtool EEE ops: get_eee/set_eee delegating to phylink for PHY
negotiation and timer management.
4. RP1 enablement: Set MACB_CAPS_EEE for the Raspberry Pi 5's RP1
southbridge (Cadence GEM_GXL rev 0x00070109 + BCM54213PE PHY).
5. EyeQ5 enablement: Set MACB_CAPS_EEE for the Mobileye EyeQ5 GEM
instance, verified with a hardware loopback by Théo Lebrun.
Tested on Raspberry Pi 5 (1000BASE-T, BCM54213PE PHY, 250ms LPI timer):
iperf3 throughput (no regression):
TCP TX: 937.8 Mbit/s (EEE on) vs 937.0 Mbit/s (EEE off)
TCP RX: 936.5 Mbit/s both
Latency (ping RTT, small expected increase from LPI wake):
1s interval: 0.273 ms (EEE on) vs 0.181 ms (EEE off)
10ms interval: 0.206 ms (EEE on) vs 0.168 ms (EEE off)
flood ping: 0.200 ms (EEE on) vs 0.156 ms (EEE off)
LPI counters (ethtool -S, 1s-interval ping, EEE on):
tx_lpi_transitions: 112
tx_lpi_time: 15574651
Zero packet loss across all tests. Also verified with
ethtool --show-eee / --set-eee and cable unplug/replug cycling.
Nicolai Buchwitz (5):
net: cadence: macb: add EEE LPI statistics counters
net: cadence: macb: implement EEE TX LPI support
net: cadence: macb: add ethtool EEE support
net: cadence: macb: enable EEE for Raspberry Pi RP1
net: cadence: macb: enable EEE for Mobileye EyeQ5
drivers/net/ethernet/cadence/macb.h | 20 +++++
drivers/net/ethernet/cadence/macb_main.c | 133 ++++++++++++++++++++++++++++++-
2 files changed, 151 insertions(+), 2 deletions(-)
--
2.51.0 | {
"author": "Nicolai Buchwitz <nb@tipi-net.de>",
"date": "Fri, 27 Feb 2026 16:06:05 +0100",
"is_openbsd": false,
"thread_id": "20260227150610.242215-2-nb@tipi-net.de.mbox.gz"
} |
lkml_critique | lkml | The GEM MAC provides four read-only, clear-on-read LPI statistics
registers at offsets 0x270-0x27c:
GEM_RXLPI (0x270): RX LPI transition count (16-bit)
GEM_RXLPITIME (0x274): cumulative RX LPI time (24-bit)
GEM_TXLPI (0x278): TX LPI transition count (16-bit)
GEM_TXLPITIME (0x27c): cumulative TX LPI time (24-bit)
Add register offset definitions, extend struct gem_stats with
corresponding u64 software accumulators, and register the four
counters in gem_statistics[] so they appear in ethtool -S output.
Because the hardware counters clear on read, the existing
macb_update_stats() path accumulates them into the u64 fields on
every stats poll, preventing loss between userspace reads.
These registers are present on SAMA5D2, SAME70, PIC32CZ, and RP1
variants of the Cadence GEM IP and have been confirmed on RP1 via
devmem reads.
Reviewed-by: Théo Lebrun <theo.lebrun@bootlin.com>
Signed-off-by: Nicolai Buchwitz <nb@tipi-net.de>
---
drivers/net/ethernet/cadence/macb.h | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 87414a2ddf6e..19aa98d01c8c 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -170,6 +170,10 @@
#define GEM_PCSANNPTX 0x021c /* PCS AN Next Page TX */
#define GEM_PCSANNPLP 0x0220 /* PCS AN Next Page LP */
#define GEM_PCSANEXTSTS 0x023c /* PCS AN Extended Status */
+#define GEM_RXLPI 0x0270 /* RX LPI Transitions */
+#define GEM_RXLPITIME 0x0274 /* RX LPI Time */
+#define GEM_TXLPI 0x0278 /* TX LPI Transitions */
+#define GEM_TXLPITIME 0x027c /* TX LPI Time */
#define GEM_DCFG1 0x0280 /* Design Config 1 */
#define GEM_DCFG2 0x0284 /* Design Config 2 */
#define GEM_DCFG3 0x0288 /* Design Config 3 */
@@ -1043,6 +1047,10 @@ struct gem_stats {
u64 rx_ip_header_checksum_errors;
u64 rx_tcp_checksum_errors;
u64 rx_udp_checksum_errors;
+ u64 rx_lpi_transitions;
+ u64 rx_lpi_time;
+ u64 tx_lpi_transitions;
+ u64 tx_lpi_time;
};
/* Describes the name and offset of an individual statistic register, as
@@ -1142,6 +1150,10 @@ static const struct gem_statistic gem_statistics[] = {
GEM_BIT(NDS_RXERR)),
GEM_STAT_TITLE_BITS(RXUDPCCNT, "rx_udp_checksum_errors",
GEM_BIT(NDS_RXERR)),
+ GEM_STAT_TITLE(RXLPI, "rx_lpi_transitions"),
+ GEM_STAT_TITLE(RXLPITIME, "rx_lpi_time"),
+ GEM_STAT_TITLE(TXLPI, "tx_lpi_transitions"),
+ GEM_STAT_TITLE(TXLPITIME, "tx_lpi_time"),
};
#define GEM_STATS_LEN ARRAY_SIZE(gem_statistics)
--
2.51.0
| null | null | null | [PATCH net-next v5 1/5] net: cadence: macb: add EEE LPI statistics counters | Implement get_eee and set_eee ethtool ops for GEM as simple passthroughs
to phylink_ethtool_get_eee() and phylink_ethtool_set_eee().
No MACB_CAPS_EEE guard is needed: phylink returns -EOPNOTSUPP from both
ops when mac_supports_eee is false, which is the case when
lpi_capabilities and lpi_interfaces are not populated. Those fields are
only set when MACB_CAPS_EEE is present (previous patch), so phylink
already handles the unsupported case correctly.
Reviewed-by: Théo Lebrun <theo.lebrun@bootlin.com>
Signed-off-by: Nicolai Buchwitz <nb@tipi-net.de>
---
drivers/net/ethernet/cadence/macb_main.c | 16 ++++++++++++++++
1 file changed, 16 insertions(+)
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index c23485f049d3..3e724417d444 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4050,6 +4050,20 @@ static const struct ethtool_ops macb_ethtool_ops = {
.set_ringparam = macb_set_ringparam,
};
+static int macb_get_eee(struct net_device *dev, struct ethtool_keee *eee)
+{
+ struct macb *bp = netdev_priv(dev);
+
+ return phylink_ethtool_get_eee(bp->phylink, eee);
+}
+
+static int macb_set_eee(struct net_device *dev, struct ethtool_keee *eee)
+{
+ struct macb *bp = netdev_priv(dev);
+
+ return phylink_ethtool_set_eee(bp->phylink, eee);
+}
+
static const struct ethtool_ops gem_ethtool_ops = {
.get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs,
@@ -4072,6 +4086,8 @@ static const struct ethtool_ops gem_ethtool_ops = {
.set_rxnfc = gem_set_rxnfc,
.get_rx_ring_count = gem_get_rx_ring_count,
.nway_reset = phy_ethtool_nway_reset,
+ .get_eee = macb_get_eee,
+ .set_eee = macb_set_eee,
};
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
--
2.51.0 | {
"author": "Nicolai Buchwitz <nb@tipi-net.de>",
"date": "Fri, 27 Feb 2026 16:06:08 +0100",
"is_openbsd": false,
"thread_id": "20260227150610.242215-2-nb@tipi-net.de.mbox.gz"
} |
lkml_critique | lkml | The GEM MAC provides four read-only, clear-on-read LPI statistics
registers at offsets 0x270-0x27c:
GEM_RXLPI (0x270): RX LPI transition count (16-bit)
GEM_RXLPITIME (0x274): cumulative RX LPI time (24-bit)
GEM_TXLPI (0x278): TX LPI transition count (16-bit)
GEM_TXLPITIME (0x27c): cumulative TX LPI time (24-bit)
Add register offset definitions, extend struct gem_stats with
corresponding u64 software accumulators, and register the four
counters in gem_statistics[] so they appear in ethtool -S output.
Because the hardware counters clear on read, the existing
macb_update_stats() path accumulates them into the u64 fields on
every stats poll, preventing loss between userspace reads.
These registers are present on SAMA5D2, SAME70, PIC32CZ, and RP1
variants of the Cadence GEM IP and have been confirmed on RP1 via
devmem reads.
Reviewed-by: Théo Lebrun <theo.lebrun@bootlin.com>
Signed-off-by: Nicolai Buchwitz <nb@tipi-net.de>
---
drivers/net/ethernet/cadence/macb.h | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 87414a2ddf6e..19aa98d01c8c 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -170,6 +170,10 @@
#define GEM_PCSANNPTX 0x021c /* PCS AN Next Page TX */
#define GEM_PCSANNPLP 0x0220 /* PCS AN Next Page LP */
#define GEM_PCSANEXTSTS 0x023c /* PCS AN Extended Status */
+#define GEM_RXLPI 0x0270 /* RX LPI Transitions */
+#define GEM_RXLPITIME 0x0274 /* RX LPI Time */
+#define GEM_TXLPI 0x0278 /* TX LPI Transitions */
+#define GEM_TXLPITIME 0x027c /* TX LPI Time */
#define GEM_DCFG1 0x0280 /* Design Config 1 */
#define GEM_DCFG2 0x0284 /* Design Config 2 */
#define GEM_DCFG3 0x0288 /* Design Config 3 */
@@ -1043,6 +1047,10 @@ struct gem_stats {
u64 rx_ip_header_checksum_errors;
u64 rx_tcp_checksum_errors;
u64 rx_udp_checksum_errors;
+ u64 rx_lpi_transitions;
+ u64 rx_lpi_time;
+ u64 tx_lpi_transitions;
+ u64 tx_lpi_time;
};
/* Describes the name and offset of an individual statistic register, as
@@ -1142,6 +1150,10 @@ static const struct gem_statistic gem_statistics[] = {
GEM_BIT(NDS_RXERR)),
GEM_STAT_TITLE_BITS(RXUDPCCNT, "rx_udp_checksum_errors",
GEM_BIT(NDS_RXERR)),
+ GEM_STAT_TITLE(RXLPI, "rx_lpi_transitions"),
+ GEM_STAT_TITLE(RXLPITIME, "rx_lpi_time"),
+ GEM_STAT_TITLE(TXLPI, "tx_lpi_transitions"),
+ GEM_STAT_TITLE(TXLPITIME, "tx_lpi_time"),
};
#define GEM_STATS_LEN ARRAY_SIZE(gem_statistics)
--
2.51.0
| null | null | null | [PATCH net-next v5 1/5] net: cadence: macb: add EEE LPI statistics counters | The GEM MAC has hardware LPI registers (NCR bit 19: TXLPIEN) but no
built-in idle timer, so asserting TXLPIEN blocks all TX immediately
with no automatic wake. A software idle timer is required, as noted
in Microchip documentation (section 40.6.19): "It is best to use
firmware to control LPI."
Implement phylink managed EEE using the mac_enable_tx_lpi and
mac_disable_tx_lpi callbacks:
- macb_tx_lpi_set(): atomically sets or clears TXLPIEN under the
existing bp->lock spinlock; returns bool indicating whether the
register actually changed, avoiding redundant writes.
- macb_tx_lpi_work_fn(): delayed_work handler that enters LPI if all
TX queues are idle and EEE is still active.
- macb_tx_lpi_schedule(): arms the work timer using the LPI timer
value provided by phylink (default 250 ms). Called from
macb_tx_complete() after each TX drain so the idle countdown
restarts whenever the ring goes quiet.
- macb_tx_lpi_wake(): called from macb_start_xmit() before TSTART.
Clears TXLPIEN and applies a 50 us udelay for PHY wake (IEEE
802.3az Tw_sys_tx is 16.5 us for 1000BASE-T / 30 us for
100BASE-TX; GEM has no hardware enforcement). Only delays when
TXLPIEN was actually set, avoiding overhead on the common path.
The delay is placed after tx_head is advanced so the work_fn's
queue-idle check sees a non-empty ring and cannot race back into
LPI before the frame is transmitted.
- mac_enable_tx_lpi: stores the timer and sets eee_active, then
defers the first LPI entry by 1 second per IEEE 802.3az section
22.7a.
- mac_disable_tx_lpi: clears eee_active, cancels the work, and
deasserts TXLPIEN.
Populate phylink_config lpi_interfaces (MII, GMII, RGMII variants)
and lpi_capabilities (MAC_100FD | MAC_1000FD) so phylink can
negotiate EEE with the PHY and call the callbacks appropriately.
Set lpi_timer_default to 250000 us and eee_enabled_default to true.
Reviewed-by: Théo Lebrun <theo.lebrun@bootlin.com>
Signed-off-by: Nicolai Buchwitz <nb@tipi-net.de>
---
drivers/net/ethernet/cadence/macb.h | 8 ++
drivers/net/ethernet/cadence/macb_main.c | 112 +++++++++++++++++++++++
2 files changed, 120 insertions(+)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 19aa98d01c8c..c69828b27dae 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -309,6 +309,8 @@
#define MACB_IRXFCS_SIZE 1
/* GEM specific NCR bitfields. */
+#define GEM_TXLPIEN_OFFSET 19
+#define GEM_TXLPIEN_SIZE 1
#define GEM_ENABLE_HS_MAC_OFFSET 31
#define GEM_ENABLE_HS_MAC_SIZE 1
@@ -783,6 +785,7 @@
#define MACB_CAPS_DMA_PTP BIT(22)
#define MACB_CAPS_RSC BIT(23)
#define MACB_CAPS_NO_LSO BIT(24)
+#define MACB_CAPS_EEE BIT(25)
/* LSO settings */
#define MACB_LSO_UFO_ENABLE 0x01
@@ -1369,6 +1372,11 @@ struct macb {
struct work_struct hresp_err_bh_work;
+ /* EEE / LPI state */
+ bool eee_active;
+ struct delayed_work tx_lpi_work;
+ u32 tx_lpi_timer;
+
int rx_bd_rd_prefetch;
int tx_bd_rd_prefetch;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 02eab26fd98b..c23485f049d3 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -10,6 +10,7 @@
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include <linux/crc32.h>
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
#include <linux/firmware/xlnx-zynqmp.h>
@@ -621,6 +622,94 @@ static const struct phylink_pcs_ops macb_phylink_pcs_ops = {
.pcs_config = macb_pcs_config,
};
+static bool macb_tx_lpi_set(struct macb *bp, bool enable)
+{
+ unsigned long flags;
+ u32 old, ncr;
+
+ spin_lock_irqsave(&bp->lock, flags);
+ ncr = macb_readl(bp, NCR);
+ old = ncr;
+ if (enable)
+ ncr |= GEM_BIT(TXLPIEN);
+ else
+ ncr &= ~GEM_BIT(TXLPIEN);
+ if (old != ncr)
+ macb_writel(bp, NCR, ncr);
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ return old != ncr;
+}
+
+static bool macb_tx_all_queues_idle(struct macb *bp)
+{
+ unsigned int q;
+
+ for (q = 0; q < bp->num_queues; q++) {
+ struct macb_queue *queue = &bp->queues[q];
+
+ if (queue->tx_head != queue->tx_tail)
+ return false;
+ }
+ return true;
+}
+
+static void macb_tx_lpi_work_fn(struct work_struct *work)
+{
+ struct macb *bp = container_of(work, struct macb, tx_lpi_work.work);
+
+ if (bp->eee_active && macb_tx_all_queues_idle(bp))
+ macb_tx_lpi_set(bp, true);
+}
+
+static void macb_tx_lpi_schedule(struct macb *bp)
+{
+ if (bp->eee_active)
+ mod_delayed_work(system_wq, &bp->tx_lpi_work,
+ usecs_to_jiffies(bp->tx_lpi_timer));
+}
+
+/* Wake from LPI before transmitting. The MAC must deassert TXLPIEN
+ * and wait for the PHY to exit LPI before any frame can be sent.
+ * IEEE 802.3az Tw_sys is ~17us for 1000BASE-T, ~30us for 100BASE-TX;
+ * we use a conservative 50us.
+ */
+static void macb_tx_lpi_wake(struct macb *bp)
+{
+ if (!macb_tx_lpi_set(bp, false))
+ return;
+
+ cancel_delayed_work(&bp->tx_lpi_work);
+ udelay(50);
+}
+
+static void macb_mac_disable_tx_lpi(struct phylink_config *config)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct macb *bp = netdev_priv(ndev);
+
+ bp->eee_active = false;
+ cancel_delayed_work_sync(&bp->tx_lpi_work);
+ macb_tx_lpi_set(bp, false);
+}
+
+static int macb_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
+ bool tx_clk_stop)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct macb *bp = netdev_priv(ndev);
+
+ bp->tx_lpi_timer = timer;
+ bp->eee_active = true;
+
+ /* Defer initial LPI entry by 1 second after link-up per
+ * IEEE 802.3az section 22.7a.
+ */
+ mod_delayed_work(system_wq, &bp->tx_lpi_work, msecs_to_jiffies(1000));
+
+ return 0;
+}
+
static void macb_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -769,6 +858,8 @@ static const struct phylink_mac_ops macb_phylink_ops = {
.mac_config = macb_mac_config,
.mac_link_down = macb_mac_link_down,
.mac_link_up = macb_mac_link_up,
+ .mac_disable_tx_lpi = macb_mac_disable_tx_lpi,
+ .mac_enable_tx_lpi = macb_mac_enable_tx_lpi,
};
static bool macb_phy_handle_exists(struct device_node *dn)
@@ -864,6 +955,18 @@ static int macb_mii_probe(struct net_device *dev)
}
}
+ /* Configure EEE LPI if supported */
+ if (bp->caps & MACB_CAPS_EEE) {
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ bp->phylink_config.lpi_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ bp->phylink_config.lpi_interfaces);
+ phy_interface_set_rgmii(bp->phylink_config.lpi_interfaces);
+ bp->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD;
+ bp->phylink_config.lpi_timer_default = 250000;
+ bp->phylink_config.eee_enabled_default = true;
+ }
+
bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
bp->phy_interface, &macb_phylink_ops);
if (IS_ERR(bp->phylink)) {
@@ -1260,6 +1363,9 @@ static int macb_tx_complete(struct macb_queue *queue, int budget)
netif_wake_subqueue(bp->dev, queue_index);
spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
+ if (packets)
+ macb_tx_lpi_schedule(bp);
+
return packets;
}
@@ -2365,6 +2471,8 @@ static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index),
skb->len);
+ macb_tx_lpi_wake(bp);
+
spin_lock(&bp->lock);
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
spin_unlock(&bp->lock);
@@ -3026,6 +3134,8 @@ static int macb_close(struct net_device *dev)
netdev_tx_reset_queue(netdev_get_tx_queue(dev, q));
}
+ cancel_delayed_work_sync(&bp->tx_lpi_work);
+
phylink_stop(bp->phylink);
phylink_disconnect_phy(bp->phylink);
@@ -5633,6 +5743,7 @@ static int macb_probe(struct platform_device *pdev)
}
INIT_WORK(&bp->hresp_err_bh_work, macb_hresp_error_task);
+ INIT_DELAYED_WORK(&bp->tx_lpi_work, macb_tx_lpi_work_fn);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
@@ -5676,6 +5787,7 @@ static void macb_remove(struct platform_device *pdev)
mdiobus_free(bp->mii_bus);
device_set_wakeup_enable(&bp->pdev->dev, 0);
+ cancel_delayed_work_sync(&bp->tx_lpi_work);
cancel_work_sync(&bp->hresp_err_bh_work);
pm_runtime_disable(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
--
2.51.0 | {
"author": "Nicolai Buchwitz <nb@tipi-net.de>",
"date": "Fri, 27 Feb 2026 16:06:07 +0100",
"is_openbsd": false,
"thread_id": "20260227150610.242215-2-nb@tipi-net.de.mbox.gz"
} |
lkml_critique | lkml | The GEM MAC provides four read-only, clear-on-read LPI statistics
registers at offsets 0x270-0x27c:
GEM_RXLPI (0x270): RX LPI transition count (16-bit)
GEM_RXLPITIME (0x274): cumulative RX LPI time (24-bit)
GEM_TXLPI (0x278): TX LPI transition count (16-bit)
GEM_TXLPITIME (0x27c): cumulative TX LPI time (24-bit)
Add register offset definitions, extend struct gem_stats with
corresponding u64 software accumulators, and register the four
counters in gem_statistics[] so they appear in ethtool -S output.
Because the hardware counters clear on read, the existing
macb_update_stats() path accumulates them into the u64 fields on
every stats poll, preventing loss between userspace reads.
These registers are present on SAMA5D2, SAME70, PIC32CZ, and RP1
variants of the Cadence GEM IP and have been confirmed on RP1 via
devmem reads.
Reviewed-by: Théo Lebrun <theo.lebrun@bootlin.com>
Signed-off-by: Nicolai Buchwitz <nb@tipi-net.de>
---
drivers/net/ethernet/cadence/macb.h | 12 ++++++++++++
1 file changed, 12 insertions(+)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 87414a2ddf6e..19aa98d01c8c 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -170,6 +170,10 @@
#define GEM_PCSANNPTX 0x021c /* PCS AN Next Page TX */
#define GEM_PCSANNPLP 0x0220 /* PCS AN Next Page LP */
#define GEM_PCSANEXTSTS 0x023c /* PCS AN Extended Status */
+#define GEM_RXLPI 0x0270 /* RX LPI Transitions */
+#define GEM_RXLPITIME 0x0274 /* RX LPI Time */
+#define GEM_TXLPI 0x0278 /* TX LPI Transitions */
+#define GEM_TXLPITIME 0x027c /* TX LPI Time */
#define GEM_DCFG1 0x0280 /* Design Config 1 */
#define GEM_DCFG2 0x0284 /* Design Config 2 */
#define GEM_DCFG3 0x0288 /* Design Config 3 */
@@ -1043,6 +1047,10 @@ struct gem_stats {
u64 rx_ip_header_checksum_errors;
u64 rx_tcp_checksum_errors;
u64 rx_udp_checksum_errors;
+ u64 rx_lpi_transitions;
+ u64 rx_lpi_time;
+ u64 tx_lpi_transitions;
+ u64 tx_lpi_time;
};
/* Describes the name and offset of an individual statistic register, as
@@ -1142,6 +1150,10 @@ static const struct gem_statistic gem_statistics[] = {
GEM_BIT(NDS_RXERR)),
GEM_STAT_TITLE_BITS(RXUDPCCNT, "rx_udp_checksum_errors",
GEM_BIT(NDS_RXERR)),
+ GEM_STAT_TITLE(RXLPI, "rx_lpi_transitions"),
+ GEM_STAT_TITLE(RXLPITIME, "rx_lpi_time"),
+ GEM_STAT_TITLE(TXLPI, "tx_lpi_transitions"),
+ GEM_STAT_TITLE(TXLPITIME, "tx_lpi_time"),
};
#define GEM_STATS_LEN ARRAY_SIZE(gem_statistics)
--
2.51.0
| null | null | null | [PATCH net-next v5 1/5] net: cadence: macb: add EEE LPI statistics counters | Set MACB_CAPS_EEE for the Mobileye EyeQ5 GEM instance. EEE has been
verified on EyeQ5 hardware using a loopback setup with ethtool
--show-eee confirming EEE active on both ends at 100baseT/Full and
1000baseT/Full.
Tested-by: Théo Lebrun <theo.lebrun@bootlin.com>
Signed-off-by: Nicolai Buchwitz <nb@tipi-net.de>
---
drivers/net/ethernet/cadence/macb_main.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 0196a13c0688..58a265ee9f9e 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -5518,7 +5518,7 @@ static const struct macb_config versal_config = {
static const struct macb_config eyeq5_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_QUEUE_DISABLE |
- MACB_CAPS_NO_LSO,
+ MACB_CAPS_NO_LSO | MACB_CAPS_EEE,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = eyeq5_init,
--
2.51.0 | {
"author": "Nicolai Buchwitz <nb@tipi-net.de>",
"date": "Fri, 27 Feb 2026 16:06:10 +0100",
"is_openbsd": false,
"thread_id": "20260227150610.242215-2-nb@tipi-net.de.mbox.gz"
} |
lkml_critique | lkml | When large folio is enabled and the initial folio offset exceeds
PAGE_SIZE, e.g. the position resides in the second page of a large
folio, after the folio copying the offset (in the page) won't be updated
to 0 even though the expected range is successfully copied until the end
of the folio. In this case fuse_fill_write_pages() exits prematurelly
before the request has reached the max_write/max_pages limit.
Fix this by eliminating page offset entirely and use folio offset
instead.
Fixes: d60a6015e1a2 ("fuse: support large folios for writethrough writes")
Reviewed-by: Horst Birthelmer <hbirthelmer@ddn.com>
Reviewed-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jingbo Xu <jefflexu@linux.alibaba.com>
---
changes since v2:
- drop stable CC tag; add Reviewed-by tag by Joanne
v1: https://lore.kernel.org/all/20260114055615.17903-1-jefflexu@linux.alibaba.com/
v2: https://lore.kernel.org/all/20260114124514.62998-1-jefflexu@linux.alibaba.com/
---
fs/fuse/file.c | 10 ++++------
1 file changed, 4 insertions(+), 6 deletions(-)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 625d236b881b..6aafb32338b6 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1272,7 +1272,6 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
{
struct fuse_args_pages *ap = &ia->ap;
struct fuse_conn *fc = get_fuse_conn(mapping->host);
- unsigned offset = pos & (PAGE_SIZE - 1);
size_t count = 0;
unsigned int num;
int err = 0;
@@ -1299,7 +1298,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
if (mapping_writably_mapped(mapping))
flush_dcache_folio(folio);
- folio_offset = ((index - folio->index) << PAGE_SHIFT) + offset;
+ folio_offset = offset_in_folio(folio, pos);
bytes = min(folio_size(folio) - folio_offset, num);
tmp = copy_folio_from_iter_atomic(folio, folio_offset, bytes, ii);
@@ -1329,9 +1328,6 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
count += tmp;
pos += tmp;
num -= tmp;
- offset += tmp;
- if (offset == folio_size(folio))
- offset = 0;
/* If we copied full folio, mark it uptodate */
if (tmp == folio_size(folio))
@@ -1343,7 +1339,9 @@ static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia,
ia->write.folio_locked = true;
break;
}
- if (!fc->big_writes || offset != 0)
+ if (!fc->big_writes)
+ break;
+ if (folio_offset + tmp != folio_size(folio))
break;
}
--
2.19.1.6.gb485710b
| null | null | null | [PATCH v3] fuse: fix premature writetrhough request for large folio | On Thu, 15 Jan 2026 at 03:36, Jingbo Xu <jefflexu@linux.alibaba.com> wrote:
Applied, thanks.
Miklos | {
"author": "Miklos Szeredi <miklos@szeredi.hu>",
"date": "Fri, 27 Feb 2026 16:23:35 +0100",
"is_openbsd": false,
"thread_id": "CAJfpegts6NiEokGSC7t+bXKHWHadYhxjYZDZ0+PabNSTnWVDLg@mail.gmail.com.mbox.gz"
} |
lkml_critique | lkml | The series trys to add USB2 PHY support for SpacemiT K3 SoC,
while patch [2/3] implement a disconnect function which is
needed during next connection.
No DTS part has been inclueded in this series, instead I plan
to submit while adding USB host support.
Signed-off-by: Yixun Lan <dlan@kernel.org>
---
Changes in v2:
- collect ACK
- add Fixes tag
- implement separate phy_ops
- Link to v1: https://lore.kernel.org/r/20260212-11-k3-usb2-phy-v1-0-43578592405d@kernel.org
---
Yixun Lan (3):
dt-bindings: phy: spacemit: k3: add USB2 PHY support
phy: k1-usb: add disconnect function support
phy: k1-usb: k3: add USB2 PHY support
.../devicetree/bindings/phy/spacemit,usb2-phy.yaml | 6 ++-
drivers/phy/spacemit/phy-k1-usb2.c | 44 ++++++++++++++++++++--
2 files changed, 45 insertions(+), 5 deletions(-)
---
base-commit: dd39930f3d9c1d74a40b79d368e1f3d1555e919c
change-id: 20260124-11-k3-usb2-phy-c4630b990b1f
Best regards,
--
Yixun Lan <dlan@kernel.org>
| null | null | null | [PATCH v2 0/3] phy: spacemit: Add USB2 PHY support for K3 SoC | Introduce a compatible string for the USB2 PHY in SpacemiT K3 SoC. The IP
of USB2 PHY mostly shares the same functionalities with K1 SoC, while has
some register layout changes.
Acked-by: Krzysztof Kozlowski <krzysztof.kozlowski@oss.qualcomm.com>
Signed-off-by: Yixun Lan <dlan@kernel.org>
---
Documentation/devicetree/bindings/phy/spacemit,usb2-phy.yaml | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/Documentation/devicetree/bindings/phy/spacemit,usb2-phy.yaml b/Documentation/devicetree/bindings/phy/spacemit,usb2-phy.yaml
index 43eaca90d88c..18025e5f60d6 100644
--- a/Documentation/devicetree/bindings/phy/spacemit,usb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/spacemit,usb2-phy.yaml
@@ -4,14 +4,16 @@
$id: http://devicetree.org/schemas/phy/spacemit,usb2-phy.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: SpacemiT K1 SoC USB 2.0 PHY
+title: SpacemiT K1/K3 SoC USB 2.0 PHY
maintainers:
- Ze Huang <huang.ze@linux.dev>
properties:
compatible:
- const: spacemit,k1-usb2-phy
+ enum:
+ - spacemit,k1-usb2-phy
+ - spacemit,k3-usb2-phy
reg:
maxItems: 1
--
2.52.0 | {
"author": "Yixun Lan <dlan@kernel.org>",
"date": "Sat, 14 Feb 2026 20:29:14 +0800",
"is_openbsd": false,
"thread_id": "aaGxX_3FXjM1LT6y@vaman.mbox.gz"
} |
lkml_critique | lkml | The series trys to add USB2 PHY support for SpacemiT K3 SoC,
while patch [2/3] implement a disconnect function which is
needed during next connection.
No DTS part has been inclueded in this series, instead I plan
to submit while adding USB host support.
Signed-off-by: Yixun Lan <dlan@kernel.org>
---
Changes in v2:
- collect ACK
- add Fixes tag
- implement separate phy_ops
- Link to v1: https://lore.kernel.org/r/20260212-11-k3-usb2-phy-v1-0-43578592405d@kernel.org
---
Yixun Lan (3):
dt-bindings: phy: spacemit: k3: add USB2 PHY support
phy: k1-usb: add disconnect function support
phy: k1-usb: k3: add USB2 PHY support
.../devicetree/bindings/phy/spacemit,usb2-phy.yaml | 6 ++-
drivers/phy/spacemit/phy-k1-usb2.c | 44 ++++++++++++++++++++--
2 files changed, 45 insertions(+), 5 deletions(-)
---
base-commit: dd39930f3d9c1d74a40b79d368e1f3d1555e919c
change-id: 20260124-11-k3-usb2-phy-c4630b990b1f
Best regards,
--
Yixun Lan <dlan@kernel.org>
| null | null | null | [PATCH v2 0/3] phy: spacemit: Add USB2 PHY support for K3 SoC | A disconnect status BIT of USB2 PHY need to be cleared, otherwise
it will fail to work properly during next connection when devices
connect to roothub directly.
Fixes: fe4bc1a08638 ("phy: spacemit: support K1 USB2.0 PHY controller")
Signed-off-by: Yixun Lan <dlan@kernel.org>
---
drivers/phy/spacemit/phy-k1-usb2.c | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/drivers/phy/spacemit/phy-k1-usb2.c b/drivers/phy/spacemit/phy-k1-usb2.c
index 342061380012..959bf79c7a72 100644
--- a/drivers/phy/spacemit/phy-k1-usb2.c
+++ b/drivers/phy/spacemit/phy-k1-usb2.c
@@ -48,6 +48,9 @@
#define PHY_CLK_HSTXP_EN BIT(3) /* clock hstxp enable */
#define PHY_HSTXP_MODE BIT(4) /* 0: force en_txp to be 1; 1: no force */
+#define PHY_K1_HS_HOST_DISC 0x40
+#define PHY_K1_HS_HOST_DISC_CLR BIT(0)
+
#define PHY_PLL_DIV_CFG 0x98
#define PHY_FDIV_FRACT_8_15 GENMASK(7, 0)
#define PHY_FDIV_FRACT_16_19 GENMASK(11, 8)
@@ -142,9 +145,20 @@ static int spacemit_usb2phy_exit(struct phy *phy)
return 0;
}
+static int spacemit_usb2phy_disconnect(struct phy *phy, int port)
+{
+ struct spacemit_usb2phy *sphy = phy_get_drvdata(phy);
+
+ regmap_update_bits(sphy->regmap_base, PHY_K1_HS_HOST_DISC,
+ PHY_K1_HS_HOST_DISC_CLR, PHY_K1_HS_HOST_DISC_CLR);
+
+ return 0;
+}
+
static const struct phy_ops spacemit_usb2phy_ops = {
.init = spacemit_usb2phy_init,
.exit = spacemit_usb2phy_exit,
+ .disconnect = spacemit_usb2phy_disconnect,
.owner = THIS_MODULE,
};
--
2.52.0 | {
"author": "Yixun Lan <dlan@kernel.org>",
"date": "Sat, 14 Feb 2026 20:29:15 +0800",
"is_openbsd": false,
"thread_id": "aaGxX_3FXjM1LT6y@vaman.mbox.gz"
} |
lkml_critique | lkml | The series trys to add USB2 PHY support for SpacemiT K3 SoC,
while patch [2/3] implement a disconnect function which is
needed during next connection.
No DTS part has been inclueded in this series, instead I plan
to submit while adding USB host support.
Signed-off-by: Yixun Lan <dlan@kernel.org>
---
Changes in v2:
- collect ACK
- add Fixes tag
- implement separate phy_ops
- Link to v1: https://lore.kernel.org/r/20260212-11-k3-usb2-phy-v1-0-43578592405d@kernel.org
---
Yixun Lan (3):
dt-bindings: phy: spacemit: k3: add USB2 PHY support
phy: k1-usb: add disconnect function support
phy: k1-usb: k3: add USB2 PHY support
.../devicetree/bindings/phy/spacemit,usb2-phy.yaml | 6 ++-
drivers/phy/spacemit/phy-k1-usb2.c | 44 ++++++++++++++++++++--
2 files changed, 45 insertions(+), 5 deletions(-)
---
base-commit: dd39930f3d9c1d74a40b79d368e1f3d1555e919c
change-id: 20260124-11-k3-usb2-phy-c4630b990b1f
Best regards,
--
Yixun Lan <dlan@kernel.org>
| null | null | null | [PATCH v2 0/3] phy: spacemit: Add USB2 PHY support for K3 SoC | Add USB2 PHY support for SpacemiT K3 SoC.
Register layout of handling USB disconnect operation has been changed,
So introducing a platform data to distinguish the different SoCs.
Signed-off-by: Yixun Lan <dlan@kernel.org>
---
drivers/phy/spacemit/phy-k1-usb2.c | 34 +++++++++++++++++++++++++++++-----
1 file changed, 29 insertions(+), 5 deletions(-)
diff --git a/drivers/phy/spacemit/phy-k1-usb2.c b/drivers/phy/spacemit/phy-k1-usb2.c
index 959bf79c7a72..b4ba97481ddd 100644
--- a/drivers/phy/spacemit/phy-k1-usb2.c
+++ b/drivers/phy/spacemit/phy-k1-usb2.c
@@ -51,6 +51,9 @@
#define PHY_K1_HS_HOST_DISC 0x40
#define PHY_K1_HS_HOST_DISC_CLR BIT(0)
+#define PHY_K3_HS_HOST_DISC 0x20
+#define PHY_K3_HS_HOST_DISC_CLR BIT(8)
+
#define PHY_PLL_DIV_CFG 0x98
#define PHY_FDIV_FRACT_8_15 GENMASK(7, 0)
#define PHY_FDIV_FRACT_16_19 GENMASK(11, 8)
@@ -145,7 +148,7 @@ static int spacemit_usb2phy_exit(struct phy *phy)
return 0;
}
-static int spacemit_usb2phy_disconnect(struct phy *phy, int port)
+static int spacemit_k1_usb2phy_disconnect(struct phy *phy, int port)
{
struct spacemit_usb2phy *sphy = phy_get_drvdata(phy);
@@ -155,10 +158,27 @@ static int spacemit_usb2phy_disconnect(struct phy *phy, int port)
return 0;
}
-static const struct phy_ops spacemit_usb2phy_ops = {
+static int spacemit_k3_usb2phy_disconnect(struct phy *phy, int port)
+{
+ struct spacemit_usb2phy *sphy = phy_get_drvdata(phy);
+
+ regmap_update_bits(sphy->regmap_base, PHY_K3_HS_HOST_DISC,
+ PHY_K3_HS_HOST_DISC_CLR, PHY_K3_HS_HOST_DISC_CLR);
+
+ return 0;
+}
+
+static const struct phy_ops spacemit_k1_usb2phy_ops = {
.init = spacemit_usb2phy_init,
.exit = spacemit_usb2phy_exit,
- .disconnect = spacemit_usb2phy_disconnect,
+ .disconnect = spacemit_k1_usb2phy_disconnect,
+ .owner = THIS_MODULE,
+};
+
+static const struct phy_ops spacemit_k3_usb2phy_ops = {
+ .init = spacemit_usb2phy_init,
+ .exit = spacemit_usb2phy_exit,
+ .disconnect = spacemit_k3_usb2phy_disconnect,
.owner = THIS_MODULE,
};
@@ -167,12 +187,15 @@ static int spacemit_usb2phy_probe(struct platform_device *pdev)
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
struct spacemit_usb2phy *sphy;
+ const struct phy_ops *ops;
void __iomem *base;
sphy = devm_kzalloc(dev, sizeof(*sphy), GFP_KERNEL);
if (!sphy)
return -ENOMEM;
+ ops = device_get_match_data(dev);
+
sphy->clk = devm_clk_get_prepared(&pdev->dev, NULL);
if (IS_ERR(sphy->clk))
return dev_err_probe(dev, PTR_ERR(sphy->clk), "Failed to get clock\n");
@@ -185,7 +208,7 @@ static int spacemit_usb2phy_probe(struct platform_device *pdev)
if (IS_ERR(sphy->regmap_base))
return dev_err_probe(dev, PTR_ERR(sphy->regmap_base), "Failed to init regmap\n");
- sphy->phy = devm_phy_create(dev, NULL, &spacemit_usb2phy_ops);
+ sphy->phy = devm_phy_create(dev, NULL, ops);
if (IS_ERR(sphy->phy))
return dev_err_probe(dev, PTR_ERR(sphy->phy), "Failed to create phy\n");
@@ -196,7 +219,8 @@ static int spacemit_usb2phy_probe(struct platform_device *pdev)
}
static const struct of_device_id spacemit_usb2phy_dt_match[] = {
- { .compatible = "spacemit,k1-usb2-phy", },
+ { .compatible = "spacemit,k1-usb2-phy", .data = &spacemit_k1_usb2phy_ops },
+ { .compatible = "spacemit,k3-usb2-phy", .data = &spacemit_k3_usb2phy_ops },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, spacemit_usb2phy_dt_match);
--
2.52.0 | {
"author": "Yixun Lan <dlan@kernel.org>",
"date": "Sat, 14 Feb 2026 20:29:16 +0800",
"is_openbsd": false,
"thread_id": "aaGxX_3FXjM1LT6y@vaman.mbox.gz"
} |
lkml_critique | lkml | The series trys to add USB2 PHY support for SpacemiT K3 SoC,
while patch [2/3] implement a disconnect function which is
needed during next connection.
No DTS part has been inclueded in this series, instead I plan
to submit while adding USB host support.
Signed-off-by: Yixun Lan <dlan@kernel.org>
---
Changes in v2:
- collect ACK
- add Fixes tag
- implement separate phy_ops
- Link to v1: https://lore.kernel.org/r/20260212-11-k3-usb2-phy-v1-0-43578592405d@kernel.org
---
Yixun Lan (3):
dt-bindings: phy: spacemit: k3: add USB2 PHY support
phy: k1-usb: add disconnect function support
phy: k1-usb: k3: add USB2 PHY support
.../devicetree/bindings/phy/spacemit,usb2-phy.yaml | 6 ++-
drivers/phy/spacemit/phy-k1-usb2.c | 44 ++++++++++++++++++++--
2 files changed, 45 insertions(+), 5 deletions(-)
---
base-commit: dd39930f3d9c1d74a40b79d368e1f3d1555e919c
change-id: 20260124-11-k3-usb2-phy-c4630b990b1f
Best regards,
--
Yixun Lan <dlan@kernel.org>
| null | null | null | [PATCH v2 0/3] phy: spacemit: Add USB2 PHY support for K3 SoC | Hello Yixun,
On Sat, Feb 14, 2026 at 08:29:15PM +0800, Yixun Lan wrote:
Please align function arguments to the open parenthesis.
Since we are in the merge window, it is likely that new features will
not be picked up at this stage.
But this seems to be a fix for existing SpacemiT K1 support, currently
in the linux-phy/next branch. The linux-phy pull request hasn't been
sent yet, so if you can resend just this patch and we can get an ACK for
it in time, perhaps it can be included for v7.0.
The K3 support should be resent after the merge window. | {
"author": "Vladimir Oltean <olteanv@gmail.com>",
"date": "Mon, 16 Feb 2026 11:01:12 +0200",
"is_openbsd": false,
"thread_id": "aaGxX_3FXjM1LT6y@vaman.mbox.gz"
} |
lkml_critique | lkml | The series trys to add USB2 PHY support for SpacemiT K3 SoC,
while patch [2/3] implement a disconnect function which is
needed during next connection.
No DTS part has been inclueded in this series, instead I plan
to submit while adding USB host support.
Signed-off-by: Yixun Lan <dlan@kernel.org>
---
Changes in v2:
- collect ACK
- add Fixes tag
- implement separate phy_ops
- Link to v1: https://lore.kernel.org/r/20260212-11-k3-usb2-phy-v1-0-43578592405d@kernel.org
---
Yixun Lan (3):
dt-bindings: phy: spacemit: k3: add USB2 PHY support
phy: k1-usb: add disconnect function support
phy: k1-usb: k3: add USB2 PHY support
.../devicetree/bindings/phy/spacemit,usb2-phy.yaml | 6 ++-
drivers/phy/spacemit/phy-k1-usb2.c | 44 ++++++++++++++++++++--
2 files changed, 45 insertions(+), 5 deletions(-)
---
base-commit: dd39930f3d9c1d74a40b79d368e1f3d1555e919c
change-id: 20260124-11-k3-usb2-phy-c4630b990b1f
Best regards,
--
Yixun Lan <dlan@kernel.org>
| null | null | null | [PATCH v2 0/3] phy: spacemit: Add USB2 PHY support for K3 SoC | On 11:01 Mon 16 Feb , Vladimir Oltean wrote:
Ok
Sure, no problem and I expect this is normal..
Ok, done
http://lore.kernel.org/r/20260216152653.25244-1-dlan@kernel.org
will do once new -rc1 is tagged
--
Yixun Lan (dlan) | {
"author": "Yixun Lan <dlan@gentoo.org>",
"date": "Mon, 16 Feb 2026 23:29:54 +0800",
"is_openbsd": false,
"thread_id": "aaGxX_3FXjM1LT6y@vaman.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Fri Sep 5, 2025 at 4:00 PM CEST, Benno Lossin wrote:
I forgot to test with the right configuration and found some errors with
existing code. Here are their fixes. If I don't need to re-send, I will
add them on apply (if you want a v2, let me know).
---
Cheers,
Benno
diff --git a/rust/kernel/devres.rs b/rust/kernel/devres.rs
index da18091143a6..91dbf3f4b166 100644
--- a/rust/kernel/devres.rs
+++ b/rust/kernel/devres.rs
@@ -134,11 +134,9 @@ pub fn new<'a, E>(
T: 'a,
Error: From<E>,
{
- let callback = Self::devres_callback;
-
try_pin_init!(&this in Self {
dev: dev.into(),
- callback,
+ callback: Self::devres_callback,
// INVARIANT: `inner` is properly initialized.
inner <- {
// SAFETY: `this` is a valid pointer to uninitialized memory.
@@ -151,7 +149,7 @@ pub fn new<'a, E>(
// properly initialized, because we require `dev` (i.e. the *bound* device) to
// live at least as long as the returned `impl PinInit<Self, Error>`.
to_result(unsafe {
- bindings::devm_add_action(dev.as_raw(), Some(callback), inner.cast())
+ bindings::devm_add_action(dev.as_raw(), Some(*callback), inner.cast())
})?;
Opaque::pin_init(try_pin_init!(Inner {
diff --git a/samples/rust/rust_driver_pci.rs b/samples/rust/rust_driver_pci.rs
index 606946ff4d7f..1ac0b06fa3b3 100644
--- a/samples/rust/rust_driver_pci.rs
+++ b/samples/rust/rust_driver_pci.rs
@@ -78,8 +78,8 @@ fn probe(pdev: &pci::Device<Core>, info: &Self::IdInfo) -> Result<Pin<KBox<Self>
let drvdata = KBox::pin_init(
try_pin_init!(Self {
- pdev: pdev.into(),
bar <- pdev.iomap_region_sized::<{ Regs::END }>(0, c_str!("rust_driver_pci")),
+ pdev: pdev.into(),
index: *info,
}),
GFP_KERNEL, | {
"author": "\"Benno Lossin\" <lossin@kernel.org>",
"date": "Fri, 05 Sep 2025 19:18:25 +0200",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Fri, Sep 05, 2025 at 04:00:46PM +0200, Benno Lossin wrote:
It's hard to review because of lack of examples. Could you or Danilo
share some sample usages? Thanks!
Regards,
Boqun
[...] | {
"author": "Boqun Feng <boqun.feng@gmail.com>",
"date": "Fri, 5 Sep 2025 10:21:50 -0700",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Fri, Sep 05, 2025 at 07:18:25PM +0200, Benno Lossin wrote:
[...]
Ok, this example is good enough for me to express the concern here: the
variable shadowing behavior seems not straightforward (maybe because in
normal Rust initalization expression, no binding is created for
previous variables, neither do we have a `let` here).
Would the future inplace initialization have the similar behavior? I
asked because a natural resolution is adding a special syntax like:
let a = ..;
try_pin_init!(Self {
b: a,
let a = a.into(); // create the new binding here.
c: a, // <- use the previous initalized `a`.
}
(Since I lost tracking of the discussion a bit, maybe there is a
previous discussion I've missed here?)
Regards,
Boqun | {
"author": "Boqun Feng <boqun.feng@gmail.com>",
"date": "Fri, 5 Sep 2025 10:44:23 -0700",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | (Cc: Alex)
On Fri Sep 5, 2025 at 7:21 PM CEST, Boqun Feng wrote:
Sure, here's an example. Eventually, it's going to be a bit more complicated,
but basically that's it.
#[pin_data(PinnedDrop)]
pub(crate) struct Gpu {
spec: Spec,
bar: Arc<Devres<Bar0>>,
sysmem_flush: SysmemFlush,
gsp_falcon: Falcon<Gsp>,
sec2_falcon: Falcon<Sec2>,
#[pin]
gsp: Gsp,
}
impl Gpu {
pub(crate) fn new(
dev: &Device<Bound>,
bar: Arc<Devres<Bar0>>,
) -> impl PinInit<Self, Error> + '_ {
try_pin_init(Self {
bar,
spec: Spec::new(bar.access(dev)?)?,
gsp_falcon: Falcon::<Gsp>::new(dev, spec.chipset)?,
sec2_falcon: Falcon::<Sec2>::new(dev, spec.chipset)?,
sysmem_flush: SysmemFlush::register(dev, bar.access(dev)?, spec.chipset)?
gsp <- Gsp::new(gsp_falcon, sec2_falcon, sysmem_flush)?,
})
}
}
Imagine how much unsafe pointer mess this needs without this patch. :) | {
"author": "\"Danilo Krummrich\" <dakr@kernel.org>",
"date": "Fri, 05 Sep 2025 20:38:11 +0200",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Fri Sep 5, 2025 at 7:44 PM CEST, Boqun Feng wrote:
Can you please clarify the example? I'm a bit confused that this is not a field
of Self, so currently this can just be written as:
try_pin_init!(Self {
b: a,
c: a.into,
})
Of course assuming that a is Clone, as the code above does as well.
So, if we are concerned by the variable shadowing, which I'm less concerned
about, maybe we can do this:
// The "original" `a` and `b`.
let a: A = ...;
let b: B = ...;
try_pin_init!(Self {
a, // Initialize the field only.
let b <- b, // Initialize the field and create a `&B` named `b`.
c: a.into(), // That's the "original" `a`.
d <- D::new(b), // Not the original `b`, but the pin-init one.
}) | {
"author": "\"Danilo Krummrich\" <dakr@kernel.org>",
"date": "Sat, 06 Sep 2025 12:52:22 +0200",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | Hi Benno,
kernel test robot noticed the following build errors:
[auto build test ERROR on 8f5ae30d69d7543eee0d70083daf4de8fe15d585]
url: https://github.com/intel-lab-lkp/linux/commits/Benno-Lossin/rust-pin-init-add-references-to-previously-initialized-fields/20250905-220242
base: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
patch link: https://lore.kernel.org/r/20250905140047.3325945-1-lossin%40kernel.org
patch subject: [PATCH] rust: pin-init: add references to previously initialized fields
config: x86_64-rhel-9.4-rust (https://download.01.org/0day-ci/archive/20250906/202509062218.Mo9Wsmcd-lkp@intel.com/config)
compiler: clang version 20.1.8 (https://github.com/llvm/llvm-project 87f0227cb60147a26a1eeb4fb06e3b505e9c7261)
rustc: rustc 1.88.0 (6b00bc388 2025-06-23)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250906/202509062218.Mo9Wsmcd-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202509062218.Mo9Wsmcd-lkp@intel.com/
All errors (new ones prefixed by >>):
--> rust/kernel/devres.rs:154:66
|
154 | bindings::devm_add_action(dev.as_raw(), Some(callback), inner.cast())
| ---- ^^^^^^^^ expected fn pointer, found `&mut unsafe extern "C" fn(*mut c_void)`
| |
| arguments to this enum variant are incorrect
|
= note: expected fn pointer `unsafe extern "C" fn(_)`
found mutable reference `&mut unsafe extern "C" fn(_)`
help: the type constructed contains `&mut unsafe extern "C" fn(*mut ffi::c_void)` due to the type of the argument passed
--> rust/kernel/devres.rs:154:61
|
154 | bindings::devm_add_action(dev.as_raw(), Some(callback), inner.cast())
| ^^^^^--------^
| |
| this argument influences the type of `Some`
note: tuple variant defined here
--> /opt/cross/rustc-1.88.0-bindgen-0.72.0/rustup/toolchains/1.88.0-x86_64-unknown-linux-gnu/lib/rustlib/src/rust/library/core/src/option.rs:597:5
|
597 | Some(#[stable(feature = "rust1", since = "1.0.0")] T),
| ^^^^
help: consider dereferencing the borrow
|
154 | bindings::devm_add_action(dev.as_raw(), Some(*callback), inner.cast())
| +
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki | {
"author": "kernel test robot <lkp@intel.com>",
"date": "Sat, 6 Sep 2025 22:23:15 +0800",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Sat, Sep 06, 2025 at 12:52:22PM +0200, Danilo Krummrich wrote:
Oh, I could have been more clear: `a` is a field of `Self`, and the
`let` part initalizes it.
I'm not that concerned to block this, but it does look to me like we are
inventing a new way (and even a different syntax because normal Rust
initialization doesn't create new bindings) to create binding, so I
think I should bring it up.
This looks good to me as well. But I'm curious what the syntax would be
like in the in-place placement language feature in the future.
Regards,
Boqun | {
"author": "Boqun Feng <boqun.feng@gmail.com>",
"date": "Sat, 6 Sep 2025 18:57:04 -0700",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Sat, Sep 06, 2025 at 06:57:04PM -0700, Boqun Feng wrote:
Another idea is using `&this`:
try_pin_init!(&this in Self {
a, // Initialize the field only.
b <- b, // Initialize the field only.
c: a.into(), // That's the "original" `a`.
d <- D::new(this->b), // Not the original `b`, but the pin-init one.
})
, like a special field projection during initialization.
Regards,
Boqun | {
"author": "Boqun Feng <boqun.feng@gmail.com>",
"date": "Sat, 6 Sep 2025 19:07:05 -0700",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Sun Sep 7, 2025 at 4:07 AM CEST, Boqun Feng wrote:
The main issue with new syntax is the difficulty of implementing it. The
let one is fine, but it's pretty jarring & doesn't get formatted by
rustfmt (which I want to eventually have). Using `this` does look better
IMO, but it's near impossible to implement using declarative macros
(even using `syn` it seems difficult to me). So either we find some way
to express it in existing rust syntax (maybe use an attribute?), or we
just keep it this way.
Maybe Gary has some ideas on how to implement it.
---
Cheers,
Benno | {
"author": "\"Benno Lossin\" <lossin@kernel.org>",
"date": "Sun, 07 Sep 2025 10:41:48 +0200",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Sun Sep 7, 2025 at 10:41 AM CEST, Benno Lossin wrote:
I also thought about reusing `this`, but I think we should not reuse it. We
still need it to get pointers to uninitialized fields.
Surely, we could say that we provide this.as_ptr() to get the NonNull `this`
is currently defined to be and otherwise make it expose only the initialized
fields for a certain scope.
But as you say, that sounds tricky to implement and is probably not very
intuitive either. I'd rather say keep it as it is, if we don't want something
like the `let b <- b` syntax I proposed for formatting reasons. | {
"author": "\"Danilo Krummrich\" <dakr@kernel.org>",
"date": "Sun, 07 Sep 2025 19:29:45 +0200",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Sun Sep 7, 2025 at 7:29 PM CEST, Danilo Krummrich wrote:
I have some ideas of changing the syntax to be more closure-esque:
init!(|this| -> Result<MyStruct, Error> {
let x = 42;
MyStruct {
x,
}
})
There we could add another parameter, that would then serve this
purpose. We should also probably rename `this` to `slot` & then use
`this` for the initialized version.
But as I said before, implementing the `this` thing from a macro
perspective is rather difficult (I have two ideas on how to do it and
both are bad...).
I don't feel like that's conveying the correct thing, it looks as if you
are only declaring a local variable.
---
Cheers,
Benno | {
"author": "\"Benno Lossin\" <lossin@kernel.org>",
"date": "Sun, 07 Sep 2025 23:06:21 +0200",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Sun Sep 7, 2025 at 11:06 PM CEST, Benno Lossin wrote:
I think that's a pretty good idea, but the part that I think is a little bit
confusing remains: `this` will need to have different fields depending on where
it's accessed.
Yeah, it's not great, but given that it's a custom syntax it also does not
create wrong expectations I'd say.
Anyways, I'm fine with either. For now we probably want to land the version as
it is and revisit once you settle on the syntax rework you mentioned above. | {
"author": "\"Danilo Krummrich\" <dakr@kernel.org>",
"date": "Sun, 07 Sep 2025 23:39:13 +0200",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Sun Sep 7, 2025 at 11:39 PM CEST, Danilo Krummrich wrote:
Yeah (that's also the main issue with the macro implementation).
I'd say it looks like combining the `<-` operation already used by the
`init!` macro & a `let` binding. Thus introducing a local variable
that's (pin) initialized in-place. Not a field of the current struct.
I actually came up with a third option that looks best IMO:
init!(MyStruct {
x: 42,
#[with_binding]
y: 24,
z: *y,
})
The `#[with_binding]` attribute makes the macro generate a variable `y`.
`x` & `z` don't give access to their value. (we of course should come up
with a better name).
Any thoughts?
---
Cheers,
Benno | {
"author": "\"Benno Lossin\" <lossin@kernel.org>",
"date": "Mon, 08 Sep 2025 00:51:02 +0200",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Mon Sep 8, 2025 at 12:51 AM CEST, Benno Lossin wrote:
It may be a bit verbose is some cases, but it makes things pretty obvious, so
LGTM.
How about just #[bind] or #[access]? | {
"author": "\"Danilo Krummrich\" <dakr@kernel.org>",
"date": "Mon, 08 Sep 2025 01:33:26 +0200",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Mon, Sep 08, 2025 at 01:33:26AM +0200, Danilo Krummrich wrote:
#[shadow] or #[maybe_rebind] ? Or #[pin_ref], the last one is clear
about the purpose.
Regards,
Boqun | {
"author": "Boqun Feng <boqun.feng@gmail.com>",
"date": "Sun, 7 Sep 2025 19:08:29 -0700",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Mon Sep 8, 2025 at 4:08 AM CEST, Boqun Feng wrote:
I like `#[bind]`.
Hmm in `init!` it's never pinned.
---
Cheers,
Benno | {
"author": "\"Benno Lossin\" <lossin@kernel.org>",
"date": "Mon, 08 Sep 2025 10:27:40 +0200",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Mon Sep 8, 2025 at 10:27 AM CEST, Benno Lossin wrote:
I thought about #[shadow] as well, but it is not really accurate I think, as we
might not shadow anything. #[maybe_rebind] sounds a bit like it conditionally
rebinds, as in "it may not do anything", but it always binds.
So, I think it should one clear instruction, i.e. #[bind], #[access], #[ref],
#[use], #[let], etc. | {
"author": "\"Danilo Krummrich\" <dakr@kernel.org>",
"date": "Mon, 08 Sep 2025 10:57:36 +0200",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Mon, Sep 08, 2025 at 10:57:36AM +0200, Danilo Krummrich wrote:
In that sense I think `#[let]` is best? Because it indicates this field
initialization works as a `let`-statement (in term of creating a new
binding), of course I don't have strong ojections against other options.
Regards,
Boqun | {
"author": "Boqun Feng <boqun.feng@gmail.com>",
"date": "Mon, 8 Sep 2025 12:38:04 -0700",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Mon Sep 8, 2025 at 9:38 PM CEST, Boqun Feng wrote:
Ultimately I decided to go with `#[bind]`, since I felt like `#[let]`
might be confused with just having a let statement (ie replacing the
assignment with a let binding).
`#[bind]` also might be confused with some device binding I guess, but
we can rename it's too bad.
---
Cheers,
Benno | {
"author": "\"Benno Lossin\" <lossin@kernel.org>",
"date": "Wed, 10 Sep 2025 12:12:23 +0200",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Fri Sep 5, 2025 at 4:00 PM CEST, Benno Lossin wrote:
Applied to pin-init-next, thanks everyone!
Included the fixes to devres & rust_driver_pci.
---
Cheers,
Benno | {
"author": "\"Benno Lossin\" <lossin@kernel.org>",
"date": "Thu, 11 Sep 2025 23:35:53 +0200",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Fri, Sep 05, 2025 at 04:00:46PM +0200, Benno Lossin wrote:
just as a heads up: creating references broke part of the agx firmware
init structs which uses a `#[repr(C, packed)]` struct as field in
another struct. This fails with
| error[E0793]: reference to packed field is unaligned
| --> ../drivers/gpu/drm/asahi/initdata.rs:722:28
| |
| 722 | sub <- try_init!(raw::GlobalsSub::ver {
| | ____________________________^
| 723 | | unk_54: cfg.global_unk_54,
| 724 | | unk_56: 40,
| 725 | | unk_58: 0xffff,
| ... |
| 731 | | ..Zeroable::init_zeroed()
| 732 | | }),
| | |______________________^
| |
| = note: packed structs are only aligned by one byte, and many modern architectures penalize unaligned field accesses
| = note: creating a misaligned reference is undefined behavior (even if that reference is never dereferenced)
| = help: copy the field contents to a local variable, or replace the reference with a raw pointer and use `read_unaligned`/`write_unaligned` (loads and stores via `*p` must be properly aligned even when using raw pointers)
| = note: this error originates in the macro `$crate::__init_internal` which comes from the expansion of the macro `try_init` (in Nightly builds, run with -Z macro-backtrace for more info)
This was easy enough to work around, I don't see how this embedding of a
`#[repr(C, packed)]` struct was necessary or at least helpful. The code
is not expected to be included in the upstream driver so it no worth
spending effort on this.
I don't think it's likely that anyone else will run into this but I
thought I mention it at least.
The asahi driver also ran into the discussed variable shadowing issue (a
variable was used to initialize a field of the same name and was later
used to initialize another field). This was trivially fixed by renaming
the variable.
Janne | {
"author": "Janne Grunau <j@jannau.net>",
"date": "Wed, 3 Dec 2025 23:05:16 +0100",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Wed Dec 3, 2025 at 11:05 PM CET, Janne Grunau wrote:
Thanks for the report, I expected the latter kind of error, but was not
aware of the packed struct issue. If anyone needs a proper workaround
from pin-init, let me know.
Cheers,
Benno | {
"author": "\"Benno Lossin\" <lossin@kernel.org>",
"date": "Sat, 06 Dec 2025 09:23:08 +0100",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Sat, Dec 06, 2025 at 09:23:08AM +0100, Benno Lossin wrote:
I spoke too soon. The packed struct issue is also present in the capture
driver for Macbook microphones (aop_audio). Working around this issue
there is less obvious and more effort. I think it might be enough to use
unaligned u32 / u64 types already present in asahi [1].
I'm not sure how prevalent packed structs are outside of Apple's
firmware interfaces. I was surprised running into the same issue in a
second driver but I shouldn't have been. There are plans for another
driver where this isssue will be present.
A workaround on pin-init side would be appreciated. Due to the nature of
these packed structs I do not see a need to have access to previously
initialized fields. An optional way to supress the references would be
good enough for the cases I'm aware off.
Thanks
Janne
1: https://github.com/AsahiLinux/linux/blob/asahi-6.17.9-1/drivers/gpu/drm/asahi/fw/types.rs#L48 | {
"author": "Janne Grunau <j@jannau.net>",
"date": "Sat, 6 Dec 2025 18:02:14 +0100",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Sat Dec 6, 2025 at 6:02 PM CET, Janne Grunau wrote:
FYI, I am merging a workaround this cycle, see [1]. Gary had a good idea
for a future patch series which I am tracking in [2].
Cheers,
Benno
[1]: https://lore.kernel.org/all/20260111122554.2662175-14-lossin@kernel.org
[2]: https://github.com/Rust-for-Linux/pin-init/issues/98 | {
"author": "\"Benno Lossin\" <lossin@kernel.org>",
"date": "Sun, 11 Jan 2026 18:06:34 +0100",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml | After initializing a field in an initializer macro, create a variable
holding a reference that points at that field. The type is either
`Pin<&mut T>` or `&mut T` depending on the field's structural pinning
kind.
Link: https://github.com/Rust-for-Linux/pin-init/pull/83/commits/0f658594c39398f58cd5cb99a8141e370e225e74
Signed-off-by: Benno Lossin <lossin@kernel.org>
---
rust/pin-init/src/macros.rs | 149 ++++++++++++++++++++++++++++--------
1 file changed, 115 insertions(+), 34 deletions(-)
diff --git a/rust/pin-init/src/macros.rs b/rust/pin-init/src/macros.rs
index 9ced630737b8..1100c5a0b3de 100644
--- a/rust/pin-init/src/macros.rs
+++ b/rust/pin-init/src/macros.rs
@@ -988,38 +988,56 @@ fn drop(&mut self) {
@pinned($($(#[$($p_attr:tt)*])* $pvis:vis $p_field:ident : $p_type:ty),* $(,)?),
@not_pinned($($(#[$($attr:tt)*])* $fvis:vis $field:ident : $type:ty),* $(,)?),
) => {
- // For every field, we create a projection function according to its projection type. If a
- // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
- // structurally pinned, then it can be initialized via `Init`.
- //
- // The functions are `unsafe` to prevent accidentally calling them.
- #[allow(dead_code)]
- #[expect(clippy::missing_safety_doc)]
- impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
- where $($whr)*
- {
- $(
- $(#[$($p_attr)*])*
- $pvis unsafe fn $p_field<E>(
- self,
- slot: *mut $p_type,
- init: impl $crate::PinInit<$p_type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::PinInit::__pinned_init(init, slot) }
- }
- )*
- $(
- $(#[$($attr)*])*
- $fvis unsafe fn $field<E>(
- self,
- slot: *mut $type,
- init: impl $crate::Init<$type, E>,
- ) -> ::core::result::Result<(), E> {
- // SAFETY: TODO.
- unsafe { $crate::Init::__init(init, slot) }
- }
- )*
+ $crate::macros::paste! {
+ // For every field, we create a projection function according to its projection type. If a
+ // field is structurally pinned, then it must be initialized via `PinInit`, if it is not
+ // structurally pinned, then it can be initialized via `Init`.
+ //
+ // The functions are `unsafe` to prevent accidentally calling them.
+ #[allow(dead_code)]
+ #[expect(clippy::missing_safety_doc)]
+ impl<$($impl_generics)*> $pin_data<$($ty_generics)*>
+ where $($whr)*
+ {
+ $(
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn $p_field<E>(
+ self,
+ slot: *mut $p_type,
+ init: impl $crate::PinInit<$p_type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::PinInit::__pinned_init(init, slot) }
+ }
+
+ $(#[$($p_attr)*])*
+ $pvis unsafe fn [<__project_ $p_field>]<'__slot>(
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
+ ::core::pin::Pin::new_unchecked(slot)
+ }
+ )*
+ $(
+ $(#[$($attr)*])*
+ $fvis unsafe fn $field<E>(
+ self,
+ slot: *mut $type,
+ init: impl $crate::Init<$type, E>,
+ ) -> ::core::result::Result<(), E> {
+ // SAFETY: TODO.
+ unsafe { $crate::Init::__init(init, slot) }
+ }
+
+ $(#[$($attr)*])*
+ $fvis unsafe fn [<__project_ $field>]<'__slot>(
+ self,
+ slot: &'__slot mut $type,
+ ) -> &'__slot mut $type {
+ slot
+ }
+ )*
+ }
}
};
}
@@ -1216,6 +1234,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// return when an error/panic occurs.
// We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1247,6 +1272,14 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: `slot` is valid, because we are inside of an initializer closure, we
// return when an error/panic occurs.
unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = unsafe { &mut (*$slot).$field };
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1265,7 +1298,48 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
);
}
};
- (init_slot($($use_data:ident)?):
+ (init_slot(): // No `use_data`, so all fields are not structurally pinned
+ @data($data:ident),
+ @slot($slot:ident),
+ @guards($($guards:ident,)*),
+ // Init by-value.
+ @munch_fields($field:ident $(: $val:expr)?, $($rest:tt)*),
+ ) => {
+ {
+ $(let $field = $val;)?
+ // Initialize the field.
+ //
+ // SAFETY: The memory at `slot` is uninitialized.
+ unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+ }
+
+ #[allow(unused_variables)]
+ // SAFETY:
+ // - the field is not structurally pinned, since no `use_data` was required to create this
+ // initializer,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+ //
+ // We rely on macro hygiene to make it impossible for users to access this local variable.
+ // We use `paste!` to create new hygiene for `$field`.
+ $crate::macros::paste! {
+ // SAFETY: We forget the guard later when initialization has succeeded.
+ let [< __ $field _guard >] = unsafe {
+ $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+ };
+
+ $crate::__init_internal!(init_slot():
+ @data($data),
+ @slot($slot),
+ @guards([< __ $field _guard >], $($guards,)*),
+ @munch_fields($($rest)*),
+ );
+ }
+ };
+ (init_slot($use_data:ident):
@data($data:ident),
@slot($slot:ident),
@guards($($guards:ident,)*),
@@ -1279,6 +1353,13 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
// SAFETY: The memory at `slot` is uninitialized.
unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
}
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+ #[allow(unused_variables)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
// Create the drop guard:
//
// We rely on macro hygiene to make it impossible for users to access this local variable.
@@ -1289,7 +1370,7 @@ fn assert_zeroable<T: $crate::Zeroable>(_: *mut T) {}
$crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
};
- $crate::__init_internal!(init_slot($($use_data)?):
+ $crate::__init_internal!(init_slot($use_data):
@data($data),
@slot($slot),
@guards([< __ $field _guard >], $($guards,)*),
base-commit: 8f5ae30d69d7543eee0d70083daf4de8fe15d585
--
2.50.1
| null | null | null | [PATCH] rust: pin-init: add references to previously initialized fields | On Sun Jan 11, 2026 at 6:06 PM CET, Benno Lossin wrote:
Update on this situation, we have a problem: packed struct and the
current version of pin-init are unsound, so I'll sadly have to remove
the current workaround attribute. That is because the `[Pin]Init` trait
requires an aligned pointer as the input. Fixing this requires
introducing a new hierarchy for the Init trait that supports misaligned
writes.
If you really require this feature, then we can work something out.
Would you mind giving me a pointer to the code that you're currently
using or that you would like to support?
Cheers,
Benno | {
"author": "\"Benno Lossin\" <lossin@kernel.org>",
"date": "Fri, 27 Feb 2026 16:02:43 +0100",
"is_openbsd": false,
"thread_id": "DGPU3530XZA3.3SY27R9VIC9Y9@kernel.org.mbox.gz"
} |
lkml_critique | lkml |
[ with 6.18 being an LTS release, it might be a good time for this ]
The introduction of PREEMPT_LAZY was for multiple reasons:
- PREEMPT_RT suffered from over-scheduling, hurting performance compared to
!PREEMPT_RT.
- the introduction of (more) features that rely on preemption; like
folio_zero_user() which can do large memset() without preemption checks.
(Xen already had a horrible hack to deal with long running hypercalls)
- the endless and uncontrolled sprinkling of cond_resched() -- mostly cargo
cult or in response to poor to replicate workloads.
By moving to a model that is fundamentally preemptable these things become
manageable and avoid needing to introduce more horrible hacks.
Since this is a requirement; limit PREEMPT_NONE to architectures that do not
support preemption at all. Further limit PREEMPT_VOLUNTARY to those
architectures that do not yet have PREEMPT_LAZY support (with the eventual goal
to make this the empty set and completely remove voluntary preemption and
cond_resched() -- notably VOLUNTARY is already limited to !ARCH_NO_PREEMPT.)
This leaves up-to-date architectures (arm64, loongarch, powerpc, riscv, s390,
x86) with only two preemption models: full and lazy (like PREEMPT_RT).
While Lazy has been the recommended setting for a while, not all distributions
have managed to make the switch yet. Force things along. Keep the patch minimal
in case of hard to address regressions that might pop up.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/Kconfig.preempt | 3 +++
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -16,11 +16,13 @@ config ARCH_HAS_PREEMPT_LAZY
choice
prompt "Preemption Model"
+ default PREEMPT_LAZY if ARCH_HAS_PREEMPT_LAZY
default PREEMPT_NONE
config PREEMPT_NONE
bool "No Forced Preemption (Server)"
depends on !PREEMPT_RT
+ depends on ARCH_NO_PREEMPT
select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC
help
This is the traditional Linux preemption model, geared towards
@@ -35,6 +37,7 @@ config PREEMPT_NONE
config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
+ depends on !ARCH_HAS_PREEMPT_LAZY
depends on !ARCH_NO_PREEMPT
depends on !PREEMPT_RT
select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7553,7 +7553,7 @@ int preempt_dynamic_mode = preempt_dynam
int sched_dynamic_mode(const char *str)
{
-# ifndef CONFIG_PREEMPT_RT
+# if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
if (!strcmp(str, "none"))
return preempt_dynamic_none;
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -243,7 +243,7 @@ static ssize_t sched_dynamic_write(struc
static int sched_dynamic_show(struct seq_file *m, void *v)
{
- int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
+ int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
int j;
/* Count entries in NULL terminated preempt_modes */
| null | null | null | [PATCH] sched: Further restrict the preemption modes | On Fri, 19 Dec 2025 11:15:02 +0100
Peter Zijlstra <peterz@infradead.org> wrote:
I think you can just make this:
default PREEMPT_LAZY
and remove the PREEMPT_NONE.
As PREEMPT_NONE now depends on ARCH_NO_PREEMPT and all the other options
depend on !ARCH_NO_PREEMPT, the default will be PREEMPT_LAZY if it's
available, but it will never be PREEMPT_NONE if it isn't unless
PREEMPT_NONE is the only option available.
I added default PREEMPT_LAZY and did a:
$ mkdir /tmp/build
$ make O=/tmp/build ARCH=alpha defconfig
And the result is:
CONFIG_PREEMPT_NONE_BUILD=y
CONFIG_PREEMPT_NONE=y
-- Steve | {
"author": "Steven Rostedt <rostedt@goodmis.org>",
"date": "Tue, 6 Jan 2026 11:40:51 -0500",
"is_openbsd": false,
"thread_id": "b0e30f81-b06c-451b-abdc-ede71fa4a96b@linux.ibm.com.mbox.gz"
} |
lkml_critique | lkml |
[ with 6.18 being an LTS release, it might be a good time for this ]
The introduction of PREEMPT_LAZY was for multiple reasons:
- PREEMPT_RT suffered from over-scheduling, hurting performance compared to
!PREEMPT_RT.
- the introduction of (more) features that rely on preemption; like
folio_zero_user() which can do large memset() without preemption checks.
(Xen already had a horrible hack to deal with long running hypercalls)
- the endless and uncontrolled sprinkling of cond_resched() -- mostly cargo
cult or in response to poor to replicate workloads.
By moving to a model that is fundamentally preemptable these things become
manageable and avoid needing to introduce more horrible hacks.
Since this is a requirement; limit PREEMPT_NONE to architectures that do not
support preemption at all. Further limit PREEMPT_VOLUNTARY to those
architectures that do not yet have PREEMPT_LAZY support (with the eventual goal
to make this the empty set and completely remove voluntary preemption and
cond_resched() -- notably VOLUNTARY is already limited to !ARCH_NO_PREEMPT.)
This leaves up-to-date architectures (arm64, loongarch, powerpc, riscv, s390,
x86) with only two preemption models: full and lazy (like PREEMPT_RT).
While Lazy has been the recommended setting for a while, not all distributions
have managed to make the switch yet. Force things along. Keep the patch minimal
in case of hard to address regressions that might pop up.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/Kconfig.preempt | 3 +++
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -16,11 +16,13 @@ config ARCH_HAS_PREEMPT_LAZY
choice
prompt "Preemption Model"
+ default PREEMPT_LAZY if ARCH_HAS_PREEMPT_LAZY
default PREEMPT_NONE
config PREEMPT_NONE
bool "No Forced Preemption (Server)"
depends on !PREEMPT_RT
+ depends on ARCH_NO_PREEMPT
select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC
help
This is the traditional Linux preemption model, geared towards
@@ -35,6 +37,7 @@ config PREEMPT_NONE
config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
+ depends on !ARCH_HAS_PREEMPT_LAZY
depends on !ARCH_NO_PREEMPT
depends on !PREEMPT_RT
select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7553,7 +7553,7 @@ int preempt_dynamic_mode = preempt_dynam
int sched_dynamic_mode(const char *str)
{
-# ifndef CONFIG_PREEMPT_RT
+# if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
if (!strcmp(str, "none"))
return preempt_dynamic_none;
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -243,7 +243,7 @@ static ssize_t sched_dynamic_write(struc
static int sched_dynamic_show(struct seq_file *m, void *v)
{
- int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
+ int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
int j;
/* Count entries in NULL terminated preempt_modes */
| null | null | null | [PATCH] sched: Further restrict the preemption modes | Hi Peter.
On 12/19/25 3:45 PM, Peter Zijlstra wrote:
Maybe only change the default to LAZY, but keep other options possible
via dynamic update?
- When the kernel changes to lazy being the default, the scheduling
pattern can change and it may affect the workloads. having ability to
dynamically change to none/voluntary could help one to figure out where
it is regressing. we could document cases where regression is expected.
- with preempt=full/lazy we will likely never see softlockups. How are
we going to find out longer kernel paths(some maybe design, some may be
bugs) apart from observing workload regression?
Also, is softlockup code is of any use in preempt=full/lazy? | {
"author": "Shrikanth Hegde <sshegde@linux.ibm.com>",
"date": "Fri, 9 Jan 2026 16:53:04 +0530",
"is_openbsd": false,
"thread_id": "b0e30f81-b06c-451b-abdc-ede71fa4a96b@linux.ibm.com.mbox.gz"
} |
lkml_critique | lkml |
[ with 6.18 being an LTS release, it might be a good time for this ]
The introduction of PREEMPT_LAZY was for multiple reasons:
- PREEMPT_RT suffered from over-scheduling, hurting performance compared to
!PREEMPT_RT.
- the introduction of (more) features that rely on preemption; like
folio_zero_user() which can do large memset() without preemption checks.
(Xen already had a horrible hack to deal with long running hypercalls)
- the endless and uncontrolled sprinkling of cond_resched() -- mostly cargo
cult or in response to poor to replicate workloads.
By moving to a model that is fundamentally preemptable these things become
manageable and avoid needing to introduce more horrible hacks.
Since this is a requirement; limit PREEMPT_NONE to architectures that do not
support preemption at all. Further limit PREEMPT_VOLUNTARY to those
architectures that do not yet have PREEMPT_LAZY support (with the eventual goal
to make this the empty set and completely remove voluntary preemption and
cond_resched() -- notably VOLUNTARY is already limited to !ARCH_NO_PREEMPT.)
This leaves up-to-date architectures (arm64, loongarch, powerpc, riscv, s390,
x86) with only two preemption models: full and lazy (like PREEMPT_RT).
While Lazy has been the recommended setting for a while, not all distributions
have managed to make the switch yet. Force things along. Keep the patch minimal
in case of hard to address regressions that might pop up.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/Kconfig.preempt | 3 +++
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -16,11 +16,13 @@ config ARCH_HAS_PREEMPT_LAZY
choice
prompt "Preemption Model"
+ default PREEMPT_LAZY if ARCH_HAS_PREEMPT_LAZY
default PREEMPT_NONE
config PREEMPT_NONE
bool "No Forced Preemption (Server)"
depends on !PREEMPT_RT
+ depends on ARCH_NO_PREEMPT
select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC
help
This is the traditional Linux preemption model, geared towards
@@ -35,6 +37,7 @@ config PREEMPT_NONE
config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
+ depends on !ARCH_HAS_PREEMPT_LAZY
depends on !ARCH_NO_PREEMPT
depends on !PREEMPT_RT
select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7553,7 +7553,7 @@ int preempt_dynamic_mode = preempt_dynam
int sched_dynamic_mode(const char *str)
{
-# ifndef CONFIG_PREEMPT_RT
+# if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
if (!strcmp(str, "none"))
return preempt_dynamic_none;
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -243,7 +243,7 @@ static ssize_t sched_dynamic_write(struc
static int sched_dynamic_show(struct seq_file *m, void *v)
{
- int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
+ int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
int j;
/* Count entries in NULL terminated preempt_modes */
| null | null | null | [PATCH] sched: Further restrict the preemption modes | The following commit has been merged into the sched/core branch of tip:
Commit-ID: 7dadeaa6e851e7d67733f3e24fc53ee107781d0f
Gitweb: https://git.kernel.org/tip/7dadeaa6e851e7d67733f3e24fc53ee107781d0f
Author: Peter Zijlstra <peterz@infradead.org>
AuthorDate: Thu, 18 Dec 2025 15:25:10 +01:00
Committer: Peter Zijlstra <peterz@infradead.org>
CommitterDate: Thu, 08 Jan 2026 12:43:57 +01:00
sched: Further restrict the preemption modes
The introduction of PREEMPT_LAZY was for multiple reasons:
- PREEMPT_RT suffered from over-scheduling, hurting performance compared to
!PREEMPT_RT.
- the introduction of (more) features that rely on preemption; like
folio_zero_user() which can do large memset() without preemption checks.
(Xen already had a horrible hack to deal with long running hypercalls)
- the endless and uncontrolled sprinkling of cond_resched() -- mostly cargo
cult or in response to poor to replicate workloads.
By moving to a model that is fundamentally preemptable these things become
managable and avoid needing to introduce more horrible hacks.
Since this is a requirement; limit PREEMPT_NONE to architectures that do not
support preemption at all. Further limit PREEMPT_VOLUNTARY to those
architectures that do not yet have PREEMPT_LAZY support (with the eventual goal
to make this the empty set and completely remove voluntary preemption and
cond_resched() -- notably VOLUNTARY is already limited to !ARCH_NO_PREEMPT.)
This leaves up-to-date architectures (arm64, loongarch, powerpc, riscv, s390,
x86) with only two preemption models: full and lazy.
While Lazy has been the recommended setting for a while, not all distributions
have managed to make the switch yet. Force things along. Keep the patch minimal
in case of hard to address regressions that might pop up.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Valentin Schneider <vschneid@redhat.com>
Link: https://patch.msgid.link/20251219101502.GB1132199@noisy.programming.kicks-ass.net
---
kernel/Kconfig.preempt | 3 +++
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index da32680..88c594c 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -16,11 +16,13 @@ config ARCH_HAS_PREEMPT_LAZY
choice
prompt "Preemption Model"
+ default PREEMPT_LAZY if ARCH_HAS_PREEMPT_LAZY
default PREEMPT_NONE
config PREEMPT_NONE
bool "No Forced Preemption (Server)"
depends on !PREEMPT_RT
+ depends on ARCH_NO_PREEMPT
select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC
help
This is the traditional Linux preemption model, geared towards
@@ -35,6 +37,7 @@ config PREEMPT_NONE
config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
+ depends on !ARCH_HAS_PREEMPT_LAZY
depends on !ARCH_NO_PREEMPT
depends on !PREEMPT_RT
select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5b17d8e..fa72075 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7553,7 +7553,7 @@ int preempt_dynamic_mode = preempt_dynamic_undefined;
int sched_dynamic_mode(const char *str)
{
-# ifndef CONFIG_PREEMPT_RT
+# if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
if (!strcmp(str, "none"))
return preempt_dynamic_none;
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 41caa22..5f9b771 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -243,7 +243,7 @@ static ssize_t sched_dynamic_write(struct file *filp, const char __user *ubuf,
static int sched_dynamic_show(struct seq_file *m, void *v)
{
- int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
+ int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
int j;
/* Count entries in NULL terminated preempt_modes */ | {
"author": "\"tip-bot2 for Peter Zijlstra\" <tip-bot2@linutronix.de>",
"date": "Mon, 12 Jan 2026 08:03:23 -0000",
"is_openbsd": false,
"thread_id": "b0e30f81-b06c-451b-abdc-ede71fa4a96b@linux.ibm.com.mbox.gz"
} |
lkml_critique | lkml |
[ with 6.18 being an LTS release, it might be a good time for this ]
The introduction of PREEMPT_LAZY was for multiple reasons:
- PREEMPT_RT suffered from over-scheduling, hurting performance compared to
!PREEMPT_RT.
- the introduction of (more) features that rely on preemption; like
folio_zero_user() which can do large memset() without preemption checks.
(Xen already had a horrible hack to deal with long running hypercalls)
- the endless and uncontrolled sprinkling of cond_resched() -- mostly cargo
cult or in response to poor to replicate workloads.
By moving to a model that is fundamentally preemptable these things become
manageable and avoid needing to introduce more horrible hacks.
Since this is a requirement; limit PREEMPT_NONE to architectures that do not
support preemption at all. Further limit PREEMPT_VOLUNTARY to those
architectures that do not yet have PREEMPT_LAZY support (with the eventual goal
to make this the empty set and completely remove voluntary preemption and
cond_resched() -- notably VOLUNTARY is already limited to !ARCH_NO_PREEMPT.)
This leaves up-to-date architectures (arm64, loongarch, powerpc, riscv, s390,
x86) with only two preemption models: full and lazy (like PREEMPT_RT).
While Lazy has been the recommended setting for a while, not all distributions
have managed to make the switch yet. Force things along. Keep the patch minimal
in case of hard to address regressions that might pop up.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/Kconfig.preempt | 3 +++
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -16,11 +16,13 @@ config ARCH_HAS_PREEMPT_LAZY
choice
prompt "Preemption Model"
+ default PREEMPT_LAZY if ARCH_HAS_PREEMPT_LAZY
default PREEMPT_NONE
config PREEMPT_NONE
bool "No Forced Preemption (Server)"
depends on !PREEMPT_RT
+ depends on ARCH_NO_PREEMPT
select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC
help
This is the traditional Linux preemption model, geared towards
@@ -35,6 +37,7 @@ config PREEMPT_NONE
config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
+ depends on !ARCH_HAS_PREEMPT_LAZY
depends on !ARCH_NO_PREEMPT
depends on !PREEMPT_RT
select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7553,7 +7553,7 @@ int preempt_dynamic_mode = preempt_dynam
int sched_dynamic_mode(const char *str)
{
-# ifndef CONFIG_PREEMPT_RT
+# if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
if (!strcmp(str, "none"))
return preempt_dynamic_none;
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -243,7 +243,7 @@ static ssize_t sched_dynamic_write(struc
static int sched_dynamic_show(struct seq_file *m, void *v)
{
- int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
+ int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
int j;
/* Count entries in NULL terminated preempt_modes */
| null | null | null | [PATCH] sched: Further restrict the preemption modes | On 19/12/2025 10:15, Peter Zijlstra wrote:
Hi Peter,
We are observing a performance regression on s390 since enabling PREEMPT_LAZY.
Test Environment
Architecture: s390
Setup:
Single KVM host running two identical guests
Guests are connected virtually via Open vSwitch
Workload: uperf streaming read test with 50 parallel connections
One guest acts as the uperf client, the other as the server
Open vSwitch configuration:
OVS bridge with two ports
Guests attached via virtio‑net
Each guest configured with 4 vhost‑queues
Problem Description
When comparing PREEMPT_LAZY against full PREEMPT, we see a substantial drop in throughput—on some systems up to 50%.
Observed Behaviour
By tracing packets inside Open vSwitch (ovs_do_execute_action), we see:
Packet drops
Retransmissions
Reductions in packet size (from 64K down to 32K)
Capturing traffic inside the VM and inspecting it in Wireshark shows the following TCP‑level differences between PREEMPT_FULL and PREEMPT_LAZY:
|--------------------------------------+--------------+--------------+------------------|
| Wireshark Warning / Note | PREEMPT_FULL | PREEMPT_LAZY | (lazy vs full) |
|--------------------------------------+--------------+--------------+------------------|
| D-SACK Sequence | 309 | 2603 | ×8.4 |
| Partial Acknowledgement of a segment | 54 | 279 | ×5.2 |
| Ambiguous ACK (Karn) | 32 | 747 | ×23 |
| (Suspected) spurious retransmission | 205 | 857 | ×4.2 |
| (Suspected) fast retransmission | 54 | 1622 | ×30 |
| Duplicate ACK | 504 | 3446 | ×6.8 |
| Packet length exceeds MSS (TSO/GRO) | 13172 | 34790 | ×2.6 |
| Previous segment(s) not captured | 9205 | 6730 | -27% |
| ACKed segment that wasn't captured | 7022 | 8272 | +18% |
| (Suspected) out-of-order segment | 436 | 303 | -31% |
|--------------------------------------+--------------+--------------+------------------|
This pattern indicates reordering, loss, or scheduling‑related delays, but it is still unclear why PREEMPT_LAZY is causing this behaviour in this workload.
Additional observations:
Monitoring the guest CPU run time shows that it drops from 16% with PREEMPT_FULL to 9% with PREEMPT_LAZY.
The workload is dominated by voluntary preemption (schedule()), and PREEMPT_LAZY is, as far as I understand, mainly concerned with forced preemption.
It is therefore not obvious why PREEMPT_LAZY has an impact here.
Changing guest configuration to disable mergeable RX buffers:
<host mrg_rxbuf="off"/>
had a clear effect on throughput:
PREEMPT_LAZY: throughput improved from 40 Gb/s → 60 Gb/s | {
"author": "Ciunas Bennett <ciunas@linux.ibm.com>",
"date": "Tue, 24 Feb 2026 15:45:39 +0000",
"is_openbsd": false,
"thread_id": "b0e30f81-b06c-451b-abdc-ede71fa4a96b@linux.ibm.com.mbox.gz"
} |
lkml_critique | lkml |
[ with 6.18 being an LTS release, it might be a good time for this ]
The introduction of PREEMPT_LAZY was for multiple reasons:
- PREEMPT_RT suffered from over-scheduling, hurting performance compared to
!PREEMPT_RT.
- the introduction of (more) features that rely on preemption; like
folio_zero_user() which can do large memset() without preemption checks.
(Xen already had a horrible hack to deal with long running hypercalls)
- the endless and uncontrolled sprinkling of cond_resched() -- mostly cargo
cult or in response to poor to replicate workloads.
By moving to a model that is fundamentally preemptable these things become
manageable and avoid needing to introduce more horrible hacks.
Since this is a requirement; limit PREEMPT_NONE to architectures that do not
support preemption at all. Further limit PREEMPT_VOLUNTARY to those
architectures that do not yet have PREEMPT_LAZY support (with the eventual goal
to make this the empty set and completely remove voluntary preemption and
cond_resched() -- notably VOLUNTARY is already limited to !ARCH_NO_PREEMPT.)
This leaves up-to-date architectures (arm64, loongarch, powerpc, riscv, s390,
x86) with only two preemption models: full and lazy (like PREEMPT_RT).
While Lazy has been the recommended setting for a while, not all distributions
have managed to make the switch yet. Force things along. Keep the patch minimal
in case of hard to address regressions that might pop up.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/Kconfig.preempt | 3 +++
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -16,11 +16,13 @@ config ARCH_HAS_PREEMPT_LAZY
choice
prompt "Preemption Model"
+ default PREEMPT_LAZY if ARCH_HAS_PREEMPT_LAZY
default PREEMPT_NONE
config PREEMPT_NONE
bool "No Forced Preemption (Server)"
depends on !PREEMPT_RT
+ depends on ARCH_NO_PREEMPT
select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC
help
This is the traditional Linux preemption model, geared towards
@@ -35,6 +37,7 @@ config PREEMPT_NONE
config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
+ depends on !ARCH_HAS_PREEMPT_LAZY
depends on !ARCH_NO_PREEMPT
depends on !PREEMPT_RT
select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7553,7 +7553,7 @@ int preempt_dynamic_mode = preempt_dynam
int sched_dynamic_mode(const char *str)
{
-# ifndef CONFIG_PREEMPT_RT
+# if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
if (!strcmp(str, "none"))
return preempt_dynamic_none;
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -243,7 +243,7 @@ static ssize_t sched_dynamic_write(struc
static int sched_dynamic_show(struct seq_file *m, void *v)
{
- int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
+ int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
int j;
/* Count entries in NULL terminated preempt_modes */
| null | null | null | [PATCH] sched: Further restrict the preemption modes | On 2026-02-24 15:45:39 [+0000], Ciunas Bennett wrote:
PREEMPT_FULL schedules immediately if there is a preemption request
either due to a wake up of a task, or because the time slice is used up
(while in kernel).
PREEMPT_LAZY delays the preemption request, caused by the scheduling
event, either until the task returns to userland or the next HZ tick.
The voluntary schedule() invocation shouldn't be effected by FULL-> LAZY
but I guess FULL scheduled more often after a wake up which is in
favour.
Brings this the workload/ test to PREEMPT_FULL level?
Sebastian | {
"author": "Sebastian Andrzej Siewior <bigeasy@linutronix.de>",
"date": "Tue, 24 Feb 2026 18:11:11 +0100",
"is_openbsd": false,
"thread_id": "b0e30f81-b06c-451b-abdc-ede71fa4a96b@linux.ibm.com.mbox.gz"
} |
lkml_critique | lkml |
[ with 6.18 being an LTS release, it might be a good time for this ]
The introduction of PREEMPT_LAZY was for multiple reasons:
- PREEMPT_RT suffered from over-scheduling, hurting performance compared to
!PREEMPT_RT.
- the introduction of (more) features that rely on preemption; like
folio_zero_user() which can do large memset() without preemption checks.
(Xen already had a horrible hack to deal with long running hypercalls)
- the endless and uncontrolled sprinkling of cond_resched() -- mostly cargo
cult or in response to poor to replicate workloads.
By moving to a model that is fundamentally preemptable these things become
manageable and avoid needing to introduce more horrible hacks.
Since this is a requirement; limit PREEMPT_NONE to architectures that do not
support preemption at all. Further limit PREEMPT_VOLUNTARY to those
architectures that do not yet have PREEMPT_LAZY support (with the eventual goal
to make this the empty set and completely remove voluntary preemption and
cond_resched() -- notably VOLUNTARY is already limited to !ARCH_NO_PREEMPT.)
This leaves up-to-date architectures (arm64, loongarch, powerpc, riscv, s390,
x86) with only two preemption models: full and lazy (like PREEMPT_RT).
While Lazy has been the recommended setting for a while, not all distributions
have managed to make the switch yet. Force things along. Keep the patch minimal
in case of hard to address regressions that might pop up.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/Kconfig.preempt | 3 +++
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -16,11 +16,13 @@ config ARCH_HAS_PREEMPT_LAZY
choice
prompt "Preemption Model"
+ default PREEMPT_LAZY if ARCH_HAS_PREEMPT_LAZY
default PREEMPT_NONE
config PREEMPT_NONE
bool "No Forced Preemption (Server)"
depends on !PREEMPT_RT
+ depends on ARCH_NO_PREEMPT
select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC
help
This is the traditional Linux preemption model, geared towards
@@ -35,6 +37,7 @@ config PREEMPT_NONE
config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
+ depends on !ARCH_HAS_PREEMPT_LAZY
depends on !ARCH_NO_PREEMPT
depends on !PREEMPT_RT
select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7553,7 +7553,7 @@ int preempt_dynamic_mode = preempt_dynam
int sched_dynamic_mode(const char *str)
{
-# ifndef CONFIG_PREEMPT_RT
+# if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
if (!strcmp(str, "none"))
return preempt_dynamic_none;
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -243,7 +243,7 @@ static ssize_t sched_dynamic_write(struc
static int sched_dynamic_show(struct seq_file *m, void *v)
{
- int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
+ int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
int j;
/* Count entries in NULL terminated preempt_modes */
| null | null | null | [PATCH] sched: Further restrict the preemption modes | On 2/24/26 16:45, Ciunas Bennett wrote:
When I look at top sched_switch kstacks on s390 with this workload, 20%
of them are worker_thread() -> schedule(), both with CONFIG_PREEMPT and
CONFIG_PREEMPT_LAZY. The others are vhost and idle.
On x86 I see only vhost and idle, but not worker_thread().
According to runqlat.bt, average run queue latency goes up from 4us to
18us when switching from CONFIG_PREEMPT to CONFIG_PREEMPT_LAZY.
I modified the script to show per-comm latencies, and it shows
that worker_thread() is disproportionately penalized: the latency
increases from 2us to 60us!
For vhost it's better: 5us -> 2us, and for KVM it's better too: 8us -> 2us.
Finally, what is the worker doing? I looked at __queue_work() kstacks,
and they all come from irqfd_wakeup().
irqfd_wakeup() calls arch-specific kvm_arch_set_irq_inatomic(), which is
implemented on x86 and not implemented on s390.
This may explain why we on s390 are the first to see this.
Christian, do you think if it would make sense to
implement kvm_arch_set_irq_inatomic() on s390? | {
"author": "Ilya Leoshkevich <iii@linux.ibm.com>",
"date": "Wed, 25 Feb 2026 03:30:04 +0100",
"is_openbsd": false,
"thread_id": "b0e30f81-b06c-451b-abdc-ede71fa4a96b@linux.ibm.com.mbox.gz"
} |
lkml_critique | lkml |
[ with 6.18 being an LTS release, it might be a good time for this ]
The introduction of PREEMPT_LAZY was for multiple reasons:
- PREEMPT_RT suffered from over-scheduling, hurting performance compared to
!PREEMPT_RT.
- the introduction of (more) features that rely on preemption; like
folio_zero_user() which can do large memset() without preemption checks.
(Xen already had a horrible hack to deal with long running hypercalls)
- the endless and uncontrolled sprinkling of cond_resched() -- mostly cargo
cult or in response to poor to replicate workloads.
By moving to a model that is fundamentally preemptable these things become
manageable and avoid needing to introduce more horrible hacks.
Since this is a requirement; limit PREEMPT_NONE to architectures that do not
support preemption at all. Further limit PREEMPT_VOLUNTARY to those
architectures that do not yet have PREEMPT_LAZY support (with the eventual goal
to make this the empty set and completely remove voluntary preemption and
cond_resched() -- notably VOLUNTARY is already limited to !ARCH_NO_PREEMPT.)
This leaves up-to-date architectures (arm64, loongarch, powerpc, riscv, s390,
x86) with only two preemption models: full and lazy (like PREEMPT_RT).
While Lazy has been the recommended setting for a while, not all distributions
have managed to make the switch yet. Force things along. Keep the patch minimal
in case of hard to address regressions that might pop up.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/Kconfig.preempt | 3 +++
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -16,11 +16,13 @@ config ARCH_HAS_PREEMPT_LAZY
choice
prompt "Preemption Model"
+ default PREEMPT_LAZY if ARCH_HAS_PREEMPT_LAZY
default PREEMPT_NONE
config PREEMPT_NONE
bool "No Forced Preemption (Server)"
depends on !PREEMPT_RT
+ depends on ARCH_NO_PREEMPT
select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC
help
This is the traditional Linux preemption model, geared towards
@@ -35,6 +37,7 @@ config PREEMPT_NONE
config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
+ depends on !ARCH_HAS_PREEMPT_LAZY
depends on !ARCH_NO_PREEMPT
depends on !PREEMPT_RT
select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7553,7 +7553,7 @@ int preempt_dynamic_mode = preempt_dynam
int sched_dynamic_mode(const char *str)
{
-# ifndef CONFIG_PREEMPT_RT
+# if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
if (!strcmp(str, "none"))
return preempt_dynamic_none;
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -243,7 +243,7 @@ static ssize_t sched_dynamic_write(struc
static int sched_dynamic_show(struct seq_file *m, void *v)
{
- int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
+ int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
int j;
/* Count entries in NULL terminated preempt_modes */
| null | null | null | [PATCH] sched: Further restrict the preemption modes | On 24/02/2026 17:11, Sebastian Andrzej Siewior wrote:
Sorry was not clear here, so when I enable this there is also an improvement in PREEMPT_FULL
from 55Gb/s -> 60Gb/s
So I see an improvement in both test cases.
PREEMPT_LAZY: throughput improved from 40 Gb/s → 60 Gb/s
PREEMPT_FULL: throughput improved from 55 Gb/s → 60 Gb/s | {
"author": "Ciunas Bennett <ciunas@linux.ibm.com>",
"date": "Wed, 25 Feb 2026 09:56:29 +0000",
"is_openbsd": false,
"thread_id": "b0e30f81-b06c-451b-abdc-ede71fa4a96b@linux.ibm.com.mbox.gz"
} |
lkml_critique | lkml |
[ with 6.18 being an LTS release, it might be a good time for this ]
The introduction of PREEMPT_LAZY was for multiple reasons:
- PREEMPT_RT suffered from over-scheduling, hurting performance compared to
!PREEMPT_RT.
- the introduction of (more) features that rely on preemption; like
folio_zero_user() which can do large memset() without preemption checks.
(Xen already had a horrible hack to deal with long running hypercalls)
- the endless and uncontrolled sprinkling of cond_resched() -- mostly cargo
cult or in response to poor to replicate workloads.
By moving to a model that is fundamentally preemptable these things become
manageable and avoid needing to introduce more horrible hacks.
Since this is a requirement; limit PREEMPT_NONE to architectures that do not
support preemption at all. Further limit PREEMPT_VOLUNTARY to those
architectures that do not yet have PREEMPT_LAZY support (with the eventual goal
to make this the empty set and completely remove voluntary preemption and
cond_resched() -- notably VOLUNTARY is already limited to !ARCH_NO_PREEMPT.)
This leaves up-to-date architectures (arm64, loongarch, powerpc, riscv, s390,
x86) with only two preemption models: full and lazy (like PREEMPT_RT).
While Lazy has been the recommended setting for a while, not all distributions
have managed to make the switch yet. Force things along. Keep the patch minimal
in case of hard to address regressions that might pop up.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/Kconfig.preempt | 3 +++
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -16,11 +16,13 @@ config ARCH_HAS_PREEMPT_LAZY
choice
prompt "Preemption Model"
+ default PREEMPT_LAZY if ARCH_HAS_PREEMPT_LAZY
default PREEMPT_NONE
config PREEMPT_NONE
bool "No Forced Preemption (Server)"
depends on !PREEMPT_RT
+ depends on ARCH_NO_PREEMPT
select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC
help
This is the traditional Linux preemption model, geared towards
@@ -35,6 +37,7 @@ config PREEMPT_NONE
config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
+ depends on !ARCH_HAS_PREEMPT_LAZY
depends on !ARCH_NO_PREEMPT
depends on !PREEMPT_RT
select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7553,7 +7553,7 @@ int preempt_dynamic_mode = preempt_dynam
int sched_dynamic_mode(const char *str)
{
-# ifndef CONFIG_PREEMPT_RT
+# if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
if (!strcmp(str, "none"))
return preempt_dynamic_none;
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -243,7 +243,7 @@ static ssize_t sched_dynamic_write(struc
static int sched_dynamic_show(struct seq_file *m, void *v)
{
- int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
+ int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
int j;
/* Count entries in NULL terminated preempt_modes */
| null | null | null | [PATCH] sched: Further restrict the preemption modes | On Fri, Jan 09, 2026 at 04:53:04PM +0530, Shrikanth Hegde wrote:
I suppose we could do this. I just worry people will end up with 'echo
volatile > /debug/sched/preempt' in their startup script, rather than
trying to actually debug their issues.
Anybody with enough knowledge to be useful, can edit this line on their
own, rebuild the kernel and go forth.
Also, I've already heard people are interested in compile-time removing
of cond_resched() infrastructure for ARCH_HAS_PREEMPT_LAZY, so this
would be short lived indeed.
Given the utter cargo cult placement of cond_resched(); I don't think
we've actually lost much here. You wouldn't have seen the softlockup
thing anyway, because of cond_resched().
Anyway, you can always build on top of function graph tracing, create a
flame graph of stuff and see just where all your runtime went. I'm sure
there's tools that do this already. Perhaps if you're handy with the BPF
stuff you can even create a 'watchdog' of sorts that will scream if any
function takes longer than X us to run or whatever.
Oh, that reminds me, Steve, would it make sense to have
task_struct::se.sum_exec_runtime as a trace-clock?
Softlockup has always seemed of dubious value to me -- then again, I've
been running preempt=y kernels from about the day that became an option
:-)
I think it still trips if you loose a wakeup or something. | {
"author": "Peter Zijlstra <peterz@infradead.org>",
"date": "Wed, 25 Feb 2026 11:53:45 +0100",
"is_openbsd": false,
"thread_id": "b0e30f81-b06c-451b-abdc-ede71fa4a96b@linux.ibm.com.mbox.gz"
} |
lkml_critique | lkml |
[ with 6.18 being an LTS release, it might be a good time for this ]
The introduction of PREEMPT_LAZY was for multiple reasons:
- PREEMPT_RT suffered from over-scheduling, hurting performance compared to
!PREEMPT_RT.
- the introduction of (more) features that rely on preemption; like
folio_zero_user() which can do large memset() without preemption checks.
(Xen already had a horrible hack to deal with long running hypercalls)
- the endless and uncontrolled sprinkling of cond_resched() -- mostly cargo
cult or in response to poor to replicate workloads.
By moving to a model that is fundamentally preemptable these things become
manageable and avoid needing to introduce more horrible hacks.
Since this is a requirement; limit PREEMPT_NONE to architectures that do not
support preemption at all. Further limit PREEMPT_VOLUNTARY to those
architectures that do not yet have PREEMPT_LAZY support (with the eventual goal
to make this the empty set and completely remove voluntary preemption and
cond_resched() -- notably VOLUNTARY is already limited to !ARCH_NO_PREEMPT.)
This leaves up-to-date architectures (arm64, loongarch, powerpc, riscv, s390,
x86) with only two preemption models: full and lazy (like PREEMPT_RT).
While Lazy has been the recommended setting for a while, not all distributions
have managed to make the switch yet. Force things along. Keep the patch minimal
in case of hard to address regressions that might pop up.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/Kconfig.preempt | 3 +++
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -16,11 +16,13 @@ config ARCH_HAS_PREEMPT_LAZY
choice
prompt "Preemption Model"
+ default PREEMPT_LAZY if ARCH_HAS_PREEMPT_LAZY
default PREEMPT_NONE
config PREEMPT_NONE
bool "No Forced Preemption (Server)"
depends on !PREEMPT_RT
+ depends on ARCH_NO_PREEMPT
select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC
help
This is the traditional Linux preemption model, geared towards
@@ -35,6 +37,7 @@ config PREEMPT_NONE
config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
+ depends on !ARCH_HAS_PREEMPT_LAZY
depends on !ARCH_NO_PREEMPT
depends on !PREEMPT_RT
select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7553,7 +7553,7 @@ int preempt_dynamic_mode = preempt_dynam
int sched_dynamic_mode(const char *str)
{
-# ifndef CONFIG_PREEMPT_RT
+# if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
if (!strcmp(str, "none"))
return preempt_dynamic_none;
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -243,7 +243,7 @@ static ssize_t sched_dynamic_write(struc
static int sched_dynamic_show(struct seq_file *m, void *v)
{
- int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
+ int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
int j;
/* Count entries in NULL terminated preempt_modes */
| null | null | null | [PATCH] sched: Further restrict the preemption modes | On 2/25/26 4:23 PM, Peter Zijlstra wrote:
Ack.
That's probably hungtask report right?
IIUC that would be independent of preemption model. | {
"author": "Shrikanth Hegde <sshegde@linux.ibm.com>",
"date": "Wed, 25 Feb 2026 18:26:07 +0530",
"is_openbsd": false,
"thread_id": "b0e30f81-b06c-451b-abdc-ede71fa4a96b@linux.ibm.com.mbox.gz"
} |
lkml_critique | lkml |
[ with 6.18 being an LTS release, it might be a good time for this ]
The introduction of PREEMPT_LAZY was for multiple reasons:
- PREEMPT_RT suffered from over-scheduling, hurting performance compared to
!PREEMPT_RT.
- the introduction of (more) features that rely on preemption; like
folio_zero_user() which can do large memset() without preemption checks.
(Xen already had a horrible hack to deal with long running hypercalls)
- the endless and uncontrolled sprinkling of cond_resched() -- mostly cargo
cult or in response to poor to replicate workloads.
By moving to a model that is fundamentally preemptable these things become
manageable and avoid needing to introduce more horrible hacks.
Since this is a requirement; limit PREEMPT_NONE to architectures that do not
support preemption at all. Further limit PREEMPT_VOLUNTARY to those
architectures that do not yet have PREEMPT_LAZY support (with the eventual goal
to make this the empty set and completely remove voluntary preemption and
cond_resched() -- notably VOLUNTARY is already limited to !ARCH_NO_PREEMPT.)
This leaves up-to-date architectures (arm64, loongarch, powerpc, riscv, s390,
x86) with only two preemption models: full and lazy (like PREEMPT_RT).
While Lazy has been the recommended setting for a while, not all distributions
have managed to make the switch yet. Force things along. Keep the patch minimal
in case of hard to address regressions that might pop up.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/Kconfig.preempt | 3 +++
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -16,11 +16,13 @@ config ARCH_HAS_PREEMPT_LAZY
choice
prompt "Preemption Model"
+ default PREEMPT_LAZY if ARCH_HAS_PREEMPT_LAZY
default PREEMPT_NONE
config PREEMPT_NONE
bool "No Forced Preemption (Server)"
depends on !PREEMPT_RT
+ depends on ARCH_NO_PREEMPT
select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC
help
This is the traditional Linux preemption model, geared towards
@@ -35,6 +37,7 @@ config PREEMPT_NONE
config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
+ depends on !ARCH_HAS_PREEMPT_LAZY
depends on !ARCH_NO_PREEMPT
depends on !PREEMPT_RT
select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7553,7 +7553,7 @@ int preempt_dynamic_mode = preempt_dynam
int sched_dynamic_mode(const char *str)
{
-# ifndef CONFIG_PREEMPT_RT
+# if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
if (!strcmp(str, "none"))
return preempt_dynamic_none;
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -243,7 +243,7 @@ static ssize_t sched_dynamic_write(struc
static int sched_dynamic_show(struct seq_file *m, void *v)
{
- int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
+ int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
int j;
/* Count entries in NULL terminated preempt_modes */
| null | null | null | [PATCH] sched: Further restrict the preemption modes | Am 24.02.26 um 21:30 schrieb Ilya Leoshkevich:
So in fact Doug is working on that at the moment. There are some corner
cases where we had concerns as we have to pin the guest pages holding
the interrupt bits. This was secure execution, I need to followup if
we have already solved those cases. But we can try if the current patch
will help this particular problem.
If yes, then we can try to speed up the work on this.
Christian | {
"author": "Christian Borntraeger <borntraeger@linux.ibm.com>",
"date": "Wed, 25 Feb 2026 11:33:31 -0500",
"is_openbsd": false,
"thread_id": "b0e30f81-b06c-451b-abdc-ede71fa4a96b@linux.ibm.com.mbox.gz"
} |
lkml_critique | lkml |
[ with 6.18 being an LTS release, it might be a good time for this ]
The introduction of PREEMPT_LAZY was for multiple reasons:
- PREEMPT_RT suffered from over-scheduling, hurting performance compared to
!PREEMPT_RT.
- the introduction of (more) features that rely on preemption; like
folio_zero_user() which can do large memset() without preemption checks.
(Xen already had a horrible hack to deal with long running hypercalls)
- the endless and uncontrolled sprinkling of cond_resched() -- mostly cargo
cult or in response to poor to replicate workloads.
By moving to a model that is fundamentally preemptable these things become
manageable and avoid needing to introduce more horrible hacks.
Since this is a requirement; limit PREEMPT_NONE to architectures that do not
support preemption at all. Further limit PREEMPT_VOLUNTARY to those
architectures that do not yet have PREEMPT_LAZY support (with the eventual goal
to make this the empty set and completely remove voluntary preemption and
cond_resched() -- notably VOLUNTARY is already limited to !ARCH_NO_PREEMPT.)
This leaves up-to-date architectures (arm64, loongarch, powerpc, riscv, s390,
x86) with only two preemption models: full and lazy (like PREEMPT_RT).
While Lazy has been the recommended setting for a while, not all distributions
have managed to make the switch yet. Force things along. Keep the patch minimal
in case of hard to address regressions that might pop up.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/Kconfig.preempt | 3 +++
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -16,11 +16,13 @@ config ARCH_HAS_PREEMPT_LAZY
choice
prompt "Preemption Model"
+ default PREEMPT_LAZY if ARCH_HAS_PREEMPT_LAZY
default PREEMPT_NONE
config PREEMPT_NONE
bool "No Forced Preemption (Server)"
depends on !PREEMPT_RT
+ depends on ARCH_NO_PREEMPT
select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC
help
This is the traditional Linux preemption model, geared towards
@@ -35,6 +37,7 @@ config PREEMPT_NONE
config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
+ depends on !ARCH_HAS_PREEMPT_LAZY
depends on !ARCH_NO_PREEMPT
depends on !PREEMPT_RT
select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7553,7 +7553,7 @@ int preempt_dynamic_mode = preempt_dynam
int sched_dynamic_mode(const char *str)
{
-# ifndef CONFIG_PREEMPT_RT
+# if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
if (!strcmp(str, "none"))
return preempt_dynamic_none;
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -243,7 +243,7 @@ static ssize_t sched_dynamic_write(struc
static int sched_dynamic_show(struct seq_file *m, void *v)
{
- int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
+ int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
int j;
/* Count entries in NULL terminated preempt_modes */
| null | null | null | [PATCH] sched: Further restrict the preemption modes | On 2/25/26 11:33 AM, Christian Borntraeger wrote:
Christian, the patch is very close to ready. The last step, I rebased on
Master today to pickup the latest changes to interrupt.c. I am building
that now and will test for non-SE and SE environments. I have been
testing my solution for SE environments for a few weeks and it seems to
cover the use cases I have tested. | {
"author": "Douglas Freimuth <freimuth@linux.ibm.com>",
"date": "Wed, 25 Feb 2026 13:30:13 -0500",
"is_openbsd": false,
"thread_id": "b0e30f81-b06c-451b-abdc-ede71fa4a96b@linux.ibm.com.mbox.gz"
} |
lkml_critique | lkml |
[ with 6.18 being an LTS release, it might be a good time for this ]
The introduction of PREEMPT_LAZY was for multiple reasons:
- PREEMPT_RT suffered from over-scheduling, hurting performance compared to
!PREEMPT_RT.
- the introduction of (more) features that rely on preemption; like
folio_zero_user() which can do large memset() without preemption checks.
(Xen already had a horrible hack to deal with long running hypercalls)
- the endless and uncontrolled sprinkling of cond_resched() -- mostly cargo
cult or in response to poor to replicate workloads.
By moving to a model that is fundamentally preemptable these things become
manageable and avoid needing to introduce more horrible hacks.
Since this is a requirement; limit PREEMPT_NONE to architectures that do not
support preemption at all. Further limit PREEMPT_VOLUNTARY to those
architectures that do not yet have PREEMPT_LAZY support (with the eventual goal
to make this the empty set and completely remove voluntary preemption and
cond_resched() -- notably VOLUNTARY is already limited to !ARCH_NO_PREEMPT.)
This leaves up-to-date architectures (arm64, loongarch, powerpc, riscv, s390,
x86) with only two preemption models: full and lazy (like PREEMPT_RT).
While Lazy has been the recommended setting for a while, not all distributions
have managed to make the switch yet. Force things along. Keep the patch minimal
in case of hard to address regressions that might pop up.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/Kconfig.preempt | 3 +++
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -16,11 +16,13 @@ config ARCH_HAS_PREEMPT_LAZY
choice
prompt "Preemption Model"
+ default PREEMPT_LAZY if ARCH_HAS_PREEMPT_LAZY
default PREEMPT_NONE
config PREEMPT_NONE
bool "No Forced Preemption (Server)"
depends on !PREEMPT_RT
+ depends on ARCH_NO_PREEMPT
select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC
help
This is the traditional Linux preemption model, geared towards
@@ -35,6 +37,7 @@ config PREEMPT_NONE
config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
+ depends on !ARCH_HAS_PREEMPT_LAZY
depends on !ARCH_NO_PREEMPT
depends on !PREEMPT_RT
select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7553,7 +7553,7 @@ int preempt_dynamic_mode = preempt_dynam
int sched_dynamic_mode(const char *str)
{
-# ifndef CONFIG_PREEMPT_RT
+# if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
if (!strcmp(str, "none"))
return preempt_dynamic_none;
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -243,7 +243,7 @@ static ssize_t sched_dynamic_write(struc
static int sched_dynamic_show(struct seq_file *m, void *v)
{
- int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
+ int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
int j;
/* Count entries in NULL terminated preempt_modes */
| null | null | null | [PATCH] sched: Further restrict the preemption modes | On Wed, 25 Feb 2026 11:53:45 +0100
Peter Zijlstra <peterz@infradead.org> wrote:
That's unique per task right? As tracing is global it requires the
clock to be monotonic, and I'm guessing a single sched_switch will
break that.
Now if one wants to trace how long kernel paths are, I'm sure we could
trivially make a new tracer to do so.
echo max_kernel_time > current_tracer
or something like that, that could act like a latency tracer that
monitors how long any kernel thread runs without being preempted.
-- Steve | {
"author": "Steven Rostedt <rostedt@goodmis.org>",
"date": "Wed, 25 Feb 2026 19:48:09 -0500",
"is_openbsd": false,
"thread_id": "b0e30f81-b06c-451b-abdc-ede71fa4a96b@linux.ibm.com.mbox.gz"
} |
lkml_critique | lkml |
[ with 6.18 being an LTS release, it might be a good time for this ]
The introduction of PREEMPT_LAZY was for multiple reasons:
- PREEMPT_RT suffered from over-scheduling, hurting performance compared to
!PREEMPT_RT.
- the introduction of (more) features that rely on preemption; like
folio_zero_user() which can do large memset() without preemption checks.
(Xen already had a horrible hack to deal with long running hypercalls)
- the endless and uncontrolled sprinkling of cond_resched() -- mostly cargo
cult or in response to poor to replicate workloads.
By moving to a model that is fundamentally preemptable these things become
manageable and avoid needing to introduce more horrible hacks.
Since this is a requirement; limit PREEMPT_NONE to architectures that do not
support preemption at all. Further limit PREEMPT_VOLUNTARY to those
architectures that do not yet have PREEMPT_LAZY support (with the eventual goal
to make this the empty set and completely remove voluntary preemption and
cond_resched() -- notably VOLUNTARY is already limited to !ARCH_NO_PREEMPT.)
This leaves up-to-date architectures (arm64, loongarch, powerpc, riscv, s390,
x86) with only two preemption models: full and lazy (like PREEMPT_RT).
While Lazy has been the recommended setting for a while, not all distributions
have managed to make the switch yet. Force things along. Keep the patch minimal
in case of hard to address regressions that might pop up.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/Kconfig.preempt | 3 +++
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -16,11 +16,13 @@ config ARCH_HAS_PREEMPT_LAZY
choice
prompt "Preemption Model"
+ default PREEMPT_LAZY if ARCH_HAS_PREEMPT_LAZY
default PREEMPT_NONE
config PREEMPT_NONE
bool "No Forced Preemption (Server)"
depends on !PREEMPT_RT
+ depends on ARCH_NO_PREEMPT
select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC
help
This is the traditional Linux preemption model, geared towards
@@ -35,6 +37,7 @@ config PREEMPT_NONE
config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
+ depends on !ARCH_HAS_PREEMPT_LAZY
depends on !ARCH_NO_PREEMPT
depends on !PREEMPT_RT
select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7553,7 +7553,7 @@ int preempt_dynamic_mode = preempt_dynam
int sched_dynamic_mode(const char *str)
{
-# ifndef CONFIG_PREEMPT_RT
+# if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
if (!strcmp(str, "none"))
return preempt_dynamic_none;
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -243,7 +243,7 @@ static ssize_t sched_dynamic_write(struc
static int sched_dynamic_show(struct seq_file *m, void *v)
{
- int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
+ int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
int j;
/* Count entries in NULL terminated preempt_modes */
| null | null | null | [PATCH] sched: Further restrict the preemption modes | On 2/26/26 6:18 AM, Steven Rostedt wrote:
That is good idea.
With preempt=full/lazy a long running kernel task can get
preempted if it is running in preemptible section. that's okay.
My intent was to have a tracer that can say, look this kernel task took this much time
before it completed. For some task such as long page walk, we know it is okay since
it is expected to take time, but for some task such as reading watchdog shouldn't take
time. But on large system's doing these global variable update itself may take a long time.
Updating less often was a fix which had fixed that lockup IIRC. So how can we identify such
opportunities. Hopefully I am making sense.
Earlier, one would have got a softlockup when things were making very slow progress(one's
which didn't have a cond_resched)
Now, we don't know unless we see a workload regression.
If we don't have a tracer/mechanism today which gives kernel_tasks > timelimit,
then having a new one would help. | {
"author": "Shrikanth Hegde <sshegde@linux.ibm.com>",
"date": "Thu, 26 Feb 2026 11:00:14 +0530",
"is_openbsd": false,
"thread_id": "b0e30f81-b06c-451b-abdc-ede71fa4a96b@linux.ibm.com.mbox.gz"
} |
lkml_critique | lkml |
[ with 6.18 being an LTS release, it might be a good time for this ]
The introduction of PREEMPT_LAZY was for multiple reasons:
- PREEMPT_RT suffered from over-scheduling, hurting performance compared to
!PREEMPT_RT.
- the introduction of (more) features that rely on preemption; like
folio_zero_user() which can do large memset() without preemption checks.
(Xen already had a horrible hack to deal with long running hypercalls)
- the endless and uncontrolled sprinkling of cond_resched() -- mostly cargo
cult or in response to poor to replicate workloads.
By moving to a model that is fundamentally preemptable these things become
manageable and avoid needing to introduce more horrible hacks.
Since this is a requirement; limit PREEMPT_NONE to architectures that do not
support preemption at all. Further limit PREEMPT_VOLUNTARY to those
architectures that do not yet have PREEMPT_LAZY support (with the eventual goal
to make this the empty set and completely remove voluntary preemption and
cond_resched() -- notably VOLUNTARY is already limited to !ARCH_NO_PREEMPT.)
This leaves up-to-date architectures (arm64, loongarch, powerpc, riscv, s390,
x86) with only two preemption models: full and lazy (like PREEMPT_RT).
While Lazy has been the recommended setting for a while, not all distributions
have managed to make the switch yet. Force things along. Keep the patch minimal
in case of hard to address regressions that might pop up.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/Kconfig.preempt | 3 +++
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -16,11 +16,13 @@ config ARCH_HAS_PREEMPT_LAZY
choice
prompt "Preemption Model"
+ default PREEMPT_LAZY if ARCH_HAS_PREEMPT_LAZY
default PREEMPT_NONE
config PREEMPT_NONE
bool "No Forced Preemption (Server)"
depends on !PREEMPT_RT
+ depends on ARCH_NO_PREEMPT
select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC
help
This is the traditional Linux preemption model, geared towards
@@ -35,6 +37,7 @@ config PREEMPT_NONE
config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
+ depends on !ARCH_HAS_PREEMPT_LAZY
depends on !ARCH_NO_PREEMPT
depends on !PREEMPT_RT
select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7553,7 +7553,7 @@ int preempt_dynamic_mode = preempt_dynam
int sched_dynamic_mode(const char *str)
{
-# ifndef CONFIG_PREEMPT_RT
+# if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
if (!strcmp(str, "none"))
return preempt_dynamic_none;
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -243,7 +243,7 @@ static ssize_t sched_dynamic_write(struc
static int sched_dynamic_show(struct seq_file *m, void *v)
{
- int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
+ int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
int j;
/* Count entries in NULL terminated preempt_modes */
| null | null | null | [PATCH] sched: Further restrict the preemption modes | On Thu, 26 Feb 2026 11:00:14 +0530
Shrikanth Hegde <sshegde@linux.ibm.com> wrote:
Yeah, I think something like this should be added now that LAZY will
prevent us from knowing where in the kernel is really going on for a long
time.
Tracers can be set to only watch a single task. The function and function
graph tracers use set_ftrace_pid. I could extend that to other tracers.
Hmm, that may even be useful for the preemptirq tracer!
Not really. Can you explain in more detail, or specific examples of what
constitutes a path you want to trace and one that you do not?
-- Steve | {
"author": "Steven Rostedt <rostedt@goodmis.org>",
"date": "Thu, 26 Feb 2026 12:22:52 -0500",
"is_openbsd": false,
"thread_id": "b0e30f81-b06c-451b-abdc-ede71fa4a96b@linux.ibm.com.mbox.gz"
} |
lkml_critique | lkml |
[ with 6.18 being an LTS release, it might be a good time for this ]
The introduction of PREEMPT_LAZY was for multiple reasons:
- PREEMPT_RT suffered from over-scheduling, hurting performance compared to
!PREEMPT_RT.
- the introduction of (more) features that rely on preemption; like
folio_zero_user() which can do large memset() without preemption checks.
(Xen already had a horrible hack to deal with long running hypercalls)
- the endless and uncontrolled sprinkling of cond_resched() -- mostly cargo
cult or in response to poor to replicate workloads.
By moving to a model that is fundamentally preemptable these things become
manageable and avoid needing to introduce more horrible hacks.
Since this is a requirement; limit PREEMPT_NONE to architectures that do not
support preemption at all. Further limit PREEMPT_VOLUNTARY to those
architectures that do not yet have PREEMPT_LAZY support (with the eventual goal
to make this the empty set and completely remove voluntary preemption and
cond_resched() -- notably VOLUNTARY is already limited to !ARCH_NO_PREEMPT.)
This leaves up-to-date architectures (arm64, loongarch, powerpc, riscv, s390,
x86) with only two preemption models: full and lazy (like PREEMPT_RT).
While Lazy has been the recommended setting for a while, not all distributions
have managed to make the switch yet. Force things along. Keep the patch minimal
in case of hard to address regressions that might pop up.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/Kconfig.preempt | 3 +++
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -16,11 +16,13 @@ config ARCH_HAS_PREEMPT_LAZY
choice
prompt "Preemption Model"
+ default PREEMPT_LAZY if ARCH_HAS_PREEMPT_LAZY
default PREEMPT_NONE
config PREEMPT_NONE
bool "No Forced Preemption (Server)"
depends on !PREEMPT_RT
+ depends on ARCH_NO_PREEMPT
select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC
help
This is the traditional Linux preemption model, geared towards
@@ -35,6 +37,7 @@ config PREEMPT_NONE
config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
+ depends on !ARCH_HAS_PREEMPT_LAZY
depends on !ARCH_NO_PREEMPT
depends on !PREEMPT_RT
select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7553,7 +7553,7 @@ int preempt_dynamic_mode = preempt_dynam
int sched_dynamic_mode(const char *str)
{
-# ifndef CONFIG_PREEMPT_RT
+# if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
if (!strcmp(str, "none"))
return preempt_dynamic_none;
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -243,7 +243,7 @@ static ssize_t sched_dynamic_write(struc
static int sched_dynamic_show(struct seq_file *m, void *v)
{
- int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
+ int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
int j;
/* Count entries in NULL terminated preempt_modes */
| null | null | null | [PATCH] sched: Further restrict the preemption modes | Hi Steven.
On 2/26/26 10:52 PM, Steven Rostedt wrote:
That would be the goal.
That was a hardlockup. wrong example.
All I was saying, there have been fixes which solved softlockup issues
without using cond_resched. But seeing softlockup was important to know
that issue existed.
Some reference commit I think that did this;
a8c861f401b4 xfs: avoid busy loops in GCD
e1b849cfa6b6 writeback: Avoid contention on wb->list_lock when switching inodes
0ddfb62f5d01 fix the softlockups in attach_recursive_mnt()
I am afraid we will have trace all functions to begin with (which is expensive), but filter
out those which took minimal time (like less than a 1s or so). that would eventually leave only a
few functions that actually took more than 1s(that should have limited overhead). | {
"author": "Shrikanth Hegde <sshegde@linux.ibm.com>",
"date": "Fri, 27 Feb 2026 14:39:42 +0530",
"is_openbsd": false,
"thread_id": "b0e30f81-b06c-451b-abdc-ede71fa4a96b@linux.ibm.com.mbox.gz"
} |
lkml_critique | lkml |
[ with 6.18 being an LTS release, it might be a good time for this ]
The introduction of PREEMPT_LAZY was for multiple reasons:
- PREEMPT_RT suffered from over-scheduling, hurting performance compared to
!PREEMPT_RT.
- the introduction of (more) features that rely on preemption; like
folio_zero_user() which can do large memset() without preemption checks.
(Xen already had a horrible hack to deal with long running hypercalls)
- the endless and uncontrolled sprinkling of cond_resched() -- mostly cargo
cult or in response to poor to replicate workloads.
By moving to a model that is fundamentally preemptable these things become
manageable and avoid needing to introduce more horrible hacks.
Since this is a requirement; limit PREEMPT_NONE to architectures that do not
support preemption at all. Further limit PREEMPT_VOLUNTARY to those
architectures that do not yet have PREEMPT_LAZY support (with the eventual goal
to make this the empty set and completely remove voluntary preemption and
cond_resched() -- notably VOLUNTARY is already limited to !ARCH_NO_PREEMPT.)
This leaves up-to-date architectures (arm64, loongarch, powerpc, riscv, s390,
x86) with only two preemption models: full and lazy (like PREEMPT_RT).
While Lazy has been the recommended setting for a while, not all distributions
have managed to make the switch yet. Force things along. Keep the patch minimal
in case of hard to address regressions that might pop up.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/Kconfig.preempt | 3 +++
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -16,11 +16,13 @@ config ARCH_HAS_PREEMPT_LAZY
choice
prompt "Preemption Model"
+ default PREEMPT_LAZY if ARCH_HAS_PREEMPT_LAZY
default PREEMPT_NONE
config PREEMPT_NONE
bool "No Forced Preemption (Server)"
depends on !PREEMPT_RT
+ depends on ARCH_NO_PREEMPT
select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC
help
This is the traditional Linux preemption model, geared towards
@@ -35,6 +37,7 @@ config PREEMPT_NONE
config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
+ depends on !ARCH_HAS_PREEMPT_LAZY
depends on !ARCH_NO_PREEMPT
depends on !PREEMPT_RT
select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7553,7 +7553,7 @@ int preempt_dynamic_mode = preempt_dynam
int sched_dynamic_mode(const char *str)
{
-# ifndef CONFIG_PREEMPT_RT
+# if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
if (!strcmp(str, "none"))
return preempt_dynamic_none;
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -243,7 +243,7 @@ static ssize_t sched_dynamic_write(struc
static int sched_dynamic_show(struct seq_file *m, void *v)
{
- int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
+ int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
int j;
/* Count entries in NULL terminated preempt_modes */
| null | null | null | [PATCH] sched: Further restrict the preemption modes | On Fri, 27 Feb 2026 14:39:42 +0530
Shrikanth Hegde <sshegde@linux.ibm.com> wrote:
Well, I think the detection can be done with timings between schedules.
What's the longest running task without any voluntary schedule. Then you
can add function graph tracing to it where it can possibly trigger in the
location that detected the issue.
On a detection of a long schedule, a stack trace can be recorded. Using
that stack trace, you could use the function graph tracer to see what is
happening.
Anyway, something to think about, and this could be a topic at this years
Linux Plumbers Tracing MC ;-)
-- Steve | {
"author": "Steven Rostedt <rostedt@goodmis.org>",
"date": "Fri, 27 Feb 2026 09:53:34 -0500",
"is_openbsd": false,
"thread_id": "b0e30f81-b06c-451b-abdc-ede71fa4a96b@linux.ibm.com.mbox.gz"
} |
lkml_critique | lkml |
[ with 6.18 being an LTS release, it might be a good time for this ]
The introduction of PREEMPT_LAZY was for multiple reasons:
- PREEMPT_RT suffered from over-scheduling, hurting performance compared to
!PREEMPT_RT.
- the introduction of (more) features that rely on preemption; like
folio_zero_user() which can do large memset() without preemption checks.
(Xen already had a horrible hack to deal with long running hypercalls)
- the endless and uncontrolled sprinkling of cond_resched() -- mostly cargo
cult or in response to poor to replicate workloads.
By moving to a model that is fundamentally preemptable these things become
manageable and avoid needing to introduce more horrible hacks.
Since this is a requirement; limit PREEMPT_NONE to architectures that do not
support preemption at all. Further limit PREEMPT_VOLUNTARY to those
architectures that do not yet have PREEMPT_LAZY support (with the eventual goal
to make this the empty set and completely remove voluntary preemption and
cond_resched() -- notably VOLUNTARY is already limited to !ARCH_NO_PREEMPT.)
This leaves up-to-date architectures (arm64, loongarch, powerpc, riscv, s390,
x86) with only two preemption models: full and lazy (like PREEMPT_RT).
While Lazy has been the recommended setting for a while, not all distributions
have managed to make the switch yet. Force things along. Keep the patch minimal
in case of hard to address regressions that might pop up.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/Kconfig.preempt | 3 +++
kernel/sched/core.c | 2 +-
kernel/sched/debug.c | 2 +-
3 files changed, 5 insertions(+), 2 deletions(-)
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -16,11 +16,13 @@ config ARCH_HAS_PREEMPT_LAZY
choice
prompt "Preemption Model"
+ default PREEMPT_LAZY if ARCH_HAS_PREEMPT_LAZY
default PREEMPT_NONE
config PREEMPT_NONE
bool "No Forced Preemption (Server)"
depends on !PREEMPT_RT
+ depends on ARCH_NO_PREEMPT
select PREEMPT_NONE_BUILD if !PREEMPT_DYNAMIC
help
This is the traditional Linux preemption model, geared towards
@@ -35,6 +37,7 @@ config PREEMPT_NONE
config PREEMPT_VOLUNTARY
bool "Voluntary Kernel Preemption (Desktop)"
+ depends on !ARCH_HAS_PREEMPT_LAZY
depends on !ARCH_NO_PREEMPT
depends on !PREEMPT_RT
select PREEMPT_VOLUNTARY_BUILD if !PREEMPT_DYNAMIC
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7553,7 +7553,7 @@ int preempt_dynamic_mode = preempt_dynam
int sched_dynamic_mode(const char *str)
{
-# ifndef CONFIG_PREEMPT_RT
+# if !(defined(CONFIG_PREEMPT_RT) || defined(CONFIG_ARCH_HAS_PREEMPT_LAZY))
if (!strcmp(str, "none"))
return preempt_dynamic_none;
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -243,7 +243,7 @@ static ssize_t sched_dynamic_write(struc
static int sched_dynamic_show(struct seq_file *m, void *v)
{
- int i = IS_ENABLED(CONFIG_PREEMPT_RT) * 2;
+ int i = (IS_ENABLED(CONFIG_PREEMPT_RT) || IS_ENABLED(CONFIG_ARCH_HAS_PREEMPT_LAZY)) * 2;
int j;
/* Count entries in NULL terminated preempt_modes */
| null | null | null | [PATCH] sched: Further restrict the preemption modes | Hi Steve.
On 2/27/26 8:23 PM, Steven Rostedt wrote:
Yep. Will try to do this.
Someone from our tracing team wanted to give this a try too. Lets see. | {
"author": "Shrikanth Hegde <sshegde@linux.ibm.com>",
"date": "Fri, 27 Feb 2026 20:58:33 +0530",
"is_openbsd": false,
"thread_id": "b0e30f81-b06c-451b-abdc-ede71fa4a96b@linux.ibm.com.mbox.gz"
} |
lkml_critique | lkml | Hi,
Some endpoint platforms cannot use a GIC ITS-backed MSI domain for
EP-side doorbells. In those cases, endpoint function (EPF) drivers
cannot provide a doorbell to the root complex (RC), and features such as
vNTB may fall back to polling with significantly higher latency.
This series adds an alternate doorbell backend based on the DesignWare
PCIe controller's integrated eDMA interrupt-emulation feature. The RC
rings the doorbell by doing a single 32-bit MMIO write to an eDMA
doorbell location exposed in a BAR window. The EP side receives a Linux
IRQ that EPF drivers can use as a doorbell interrupt, without relying on
MSI message writes reaching the ITS.
To support this, the series:
- Adds an EPC auxiliary resource query API so EPF drivers can discover
controller-integrated resources (DMA MMIO, doorbell MMIO, and DMA LL
memory).
- Updates DesignWare EP controllers to report integrated eDMA
resources via the new API.
- Updates dw-edma to provide a dedicated virtual IRQ for interrupt
emulation and to perform the core-specific deassert sequence.
- Updates pci-epf-test and pci-epf-vntb to reuse a pre-exposed
BAR/offset and to honor per-doorbell IRQ flags.
Many thanks to Frank and Niklas for their continued review and valuable
feedback throughout the development of this series. The Reviewed-by tags
for the last two patches are dropped due to the additional changes
following Niklas' review in the v8 threads. Since the diff is small, I'd
appreciate it if Frank could re-check them.
Dependencies
------------
The following three series are prerequisites for this series:
(1). [PATCH v2 0/4] PCI: endpoint: Doorbell-related fixes
https://lore.kernel.org/linux-pci/20260217063856.3759713-1-den@valinux.co.jp/
(2). [PATCH 0/2] dmaengine: dw-edma: Interrupt-emulation doorbell support
https://lore.kernel.org/dmaengine/20260215152216.3393561-1-den@valinux.co.jp/
(3). [PATCH 0/9] PCI: endpoint differentiate between disabled and reserved BARs
https://lore.kernel.org/linux-pci/20260217212707.2450423-11-cassel@kernel.org/
Regarding (3):
- [PATCH 2/9] and [PATCH 3/9] are strictly the prerequisites for this v9 series.
In fact, they are split out from v8 series.
- With [PATCH 6/9], this v9 series should allow the embedded doorbell fallback
path to pass on RK3588 from the beginning. Given that, picking up the whole
(3) series earlier should be the most streamlined choice.
Tested on
---------
I re-tested the embedded (DMA) doorbell fallback path (via pci-epf-test)
on R-Car Spider boards (with this v9 series):
$ ./pci_endpoint_test -t DOORBELL_TEST
TAP version 13
1..1
# Starting 1 tests from 1 test cases.
# RUN pcie_ep_doorbell.DOORBELL_TEST ...
# OK pcie_ep_doorbell.DOORBELL_TEST
ok 1 pcie_ep_doorbell.DOORBELL_TEST
# PASSED: 1 / 1 tests passed.
# Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0
with the following message observed on the EP side:
[ 82.043715] pci_epf_test pci_epf_test.0: Can't find MSI domain for EPC
[ 82.044382] pci_epf_test pci_epf_test.0: Using embedded (DMA) doorbell fallback
(Note: for the test to pass on R-Car Spider, one of the following was required:
- echo 1048576 > functions/pci_epf_test/func1/pci_epf_test.0/bar2_size
- apply https://lore.kernel.org/linux-pci/20260210160315.2272930-1-den@valinux.co.jp/)
Performance test: vNTB ping latency
-----------------------------------
Setup:
- configfs (R-Car Spider in EP mode):
cd /sys/kernel/config/pci_ep/
mkdir functions/pci_epf_vntb/func1
echo 0x1912 > functions/pci_epf_vntb/func1/vendorid
echo 0x0030 > functions/pci_epf_vntb/func1/deviceid
echo 32 > functions/pci_epf_vntb/func1/msi_interrupts
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_count
echo 128 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/spad_count
echo 1 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/num_mws
echo 0x100000 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1
echo 0x1912 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_vid
echo 0x0030 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_pid
echo 0x10 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vbus_number
echo 0 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/ctrl_bar
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_bar [*]
echo 2 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1_bar
ln -s controllers/e65d0000.pcie-ep functions/pci_epf_vntb/func1/primary/
echo 1 > controllers/e65d0000.pcie-ep/start
[*]: On R-Car Spider, a hack is currently needed to use BAR4 for
the doorbell. I'll consider posting a patch for that
separately.
- ensure ntb_transport/ntb_netdev are loaded on both sides
Results:
- Without this series (pci.git main)
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=6.04 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=12.6 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=7.40 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=5.38 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=11.4 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=9.42 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=3.36 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=9.48 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=4.24 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=10.4 ms
- With this series (on top of pci.git main + Dependency (1), (2) and (3))
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=0.845 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=0.742 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=0.868 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=0.806 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=0.951 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=0.965 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=0.871 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=0.877 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=0.938 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=0.960 ms
---
Changelog
---------
* v8->v9 changes:
- Add a new dependency series (3), which moved the BAR reserved-subregion
framework + the RK3588 BAR4 example out of v8 (dropping the corresponding
patches from this series).
- pci-epf-vntb: rename the duplicate-IRQ helper and invert the return value,
per Frank's review.
- pci-epf-test: drop the extra size_add() doorbell-offset check, per Niklas'
review.
- pci-ep-msi: add a DWORD alignment check for DOORBELL_MMIO, per Niklas's
review.
- Carry over Reviewed-by tags for unchanged patches + drop Reviewed-by tags
where code changed.
- Rename the last patch subject (drop 'eDMA' word).
* v7->v8 changes:
- Deduplicate request_irq()/free_irq() calls based on virq (shared
IRQ) rather than doorbell type, as suggested during review of v7
Patch #7.
- Clean up the pci_epf_alloc_doorbell() error path, as suggested
during review of v7 Patch #9.
- Use range_end_overflows_t() instead of an open-coded overflow check,
following discussion during review of v7 Patch #5.
- Add a write-data field to the DOORBELL_MMIO aux-resource metadata
and plumb it through to the embedded doorbell backend (DesignWare
uses data=0).
* v6->v7 changes:
- Split out preparatory patches to keep the series below 10 patches.
- Add support for platforms where the eDMA register block is fixed
within a reserved BAR window (e.g. RK3588 BAR4) and must be reused
as-is.
- Introduce a dedicated virtual IRQ and irq_chip (using
handle_level_irq) for interrupt-emulation doorbells instead of
reusing per-channel IRQs. This avoids delivery via different IRQs on
platforms with chip->nr_irqs > 1.
* v5->v6 changes:
- Fix a double-free in v5 Patch 8/8 caused by mixing __free(kfree) with
an explicit kfree(). This is a functional bug (detectable by KASAN),
hence the respin solely for this fix. Sorry for the noise. No other
changes.
* v4->v5 changes:
- Change the series subject now that the series has evolved into a
consumer-driven set focused on the embedded doorbell fallback and its
in-tree users (epf-test and epf-vntb).
- Drop [PATCH v4 01/09] (dw-edma per-channel interrupt routing control)
from this series for now, so the series focuses on what's needed by the
current consumer (i.e. the doorbell fallback implementation).
- Replace the v4 embedded-doorbell "test variant + host/kselftest
plumbing" with a generic embedded-doorbell fallback in
pci_epf_alloc_doorbell(), including exposing required IRQ request flags
to EPF drivers.
- Two preparatory fix patches (Patch 6/8 and 7/8) to clean up error
handling and state management ahead of Patch 8/8.
- Rename *_get_remote_resource() to *_get_aux_resources() and adjust
relevant variable namings and kernel docs. Discussion may continue.
- Rework dw-edma per-channel metadata exposure to cache the needed info
in dw_edma_chip (IRQ number + emulation doorbell offset) and consume it
from the DesignWare EPC auxiliary resource provider without calling back
to dw-edma.
* v3->v4 changes:
- Drop dma_slave_caps.hw_id and the dmaengine selfirq callback
registration API. Instead, add a dw-edma specific dw_edma_chan_info()
helper and extend the EPC remote resource metadata accordingly.
- Add explicit acking for eDMA interrupt emulation and adjust the
dw-edma IRQ path for embedded-doorbell usage.
- Replace the previous EPC API smoke test with an embedded doorbell
test variant (pci-epf-test + pci_endpoint_test/selftests).
- Rebase onto pci.git controller/dwc commit 43d324eeb08c.
* v2->v3 changes:
- Replace DWC-specific helpers with a generic EPC remote resource query API.
- Add pci-epf-test smoke test and host/kselftest support for the new API.
- Drop the dw-edma-specific notify-only channel and polling approach
([PATCH v2 4/7] and [PATCH v2 5/7]), and rework notification handling
around a generic dmaengine_(un)register_selfirq() API implemented
by dw-edma.
* v1->v2 changes:
- Combine the two previously posted series into a single set (per Frank's
suggestion). Order dmaengine/dw-edma patches first so hw_id support
lands before the PCI LL-region helper, which assumes
dma_slave_caps.hw_id availability.
v8: https://lore.kernel.org/linux-pci/20260217080601.3808847-1-den@valinux.co.jp/
v7: https://lore.kernel.org/linux-pci/20260215163847.3522572-1-den@valinux.co.jp/
v6: https://lore.kernel.org/all/20260209125316.2132589-1-den@valinux.co.jp/
v5: https://lore.kernel.org/all/20260209062952.2049053-1-den@valinux.co.jp/
v4: https://lore.kernel.org/all/20260206172646.1556847-1-den@valinux.co.jp/
v3: https://lore.kernel.org/all/20260204145440.950609-1-den@valinux.co.jp/
v2: https://lore.kernel.org/all/20260127033420.3460579-1-den@valinux.co.jp/
v1: https://lore.kernel.org/dmaengine/20260126073652.3293564-1-den@valinux.co.jp/
+
https://lore.kernel.org/linux-pci/20260126071550.3233631-1-den@valinux.co.jp/
Thanks for reviewing.
Koichiro Den (7):
PCI: endpoint: Add auxiliary resource query API
PCI: dwc: Record integrated eDMA register window
PCI: dwc: ep: Expose integrated eDMA resources via EPC aux-resource
API
PCI: endpoint: pci-ep-msi: Refactor doorbell allocation for new
backends
PCI: endpoint: pci-epf-vntb: Reuse pre-exposed doorbells and IRQ flags
PCI: endpoint: pci-epf-test: Reuse pre-exposed doorbell targets
PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback
.../pci/controller/dwc/pcie-designware-ep.c | 151 ++++++++++++++++++
drivers/pci/controller/dwc/pcie-designware.c | 4 +
drivers/pci/controller/dwc/pcie-designware.h | 2 +
drivers/pci/endpoint/functions/pci-epf-test.c | 84 ++++++----
drivers/pci/endpoint/functions/pci-epf-vntb.c | 61 ++++++-
drivers/pci/endpoint/pci-ep-msi.c | 149 +++++++++++++++--
drivers/pci/endpoint/pci-epc-core.c | 41 +++++
include/linux/pci-epc.h | 52 ++++++
include/linux/pci-epf.h | 23 ++-
9 files changed, 520 insertions(+), 47 deletions(-)
--
2.51.0
| null | null | null | [PATCH v9 0/7] PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback | Endpoint controller drivers may integrate auxiliary blocks (e.g. DMA
engines) whose register windows and descriptor memories metadata need to
be exposed to a remote peer. Endpoint function drivers need a generic
way to discover such resources without hard-coding controller-specific
helpers.
Add pci_epc_get_aux_resources() and the corresponding pci_epc_ops
get_aux_resources() callback. The API returns a list of resources
described by type, physical address and size, plus type-specific
metadata.
Passing resources == NULL (or num_resources == 0) returns the required
number of entries.
Reviewed-by: Frank Li <Frank.Li@nxp.com>
Signed-off-by: Koichiro Den <den@valinux.co.jp>
---
drivers/pci/endpoint/pci-epc-core.c | 41 +++++++++++++++++++++++
include/linux/pci-epc.h | 52 +++++++++++++++++++++++++++++
2 files changed, 93 insertions(+)
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 5045e22367cf..10bd392c4667 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -157,6 +157,47 @@ const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
}
EXPORT_SYMBOL_GPL(pci_epc_get_features);
+/**
+ * pci_epc_get_aux_resources() - query EPC-provided auxiliary resources
+ * @epc: EPC device
+ * @func_no: function number
+ * @vfunc_no: virtual function number
+ * @resources: output array (may be NULL to query required count)
+ * @num_resources: size of @resources array in entries (0 when querying count)
+ *
+ * Some EPC backends integrate auxiliary blocks (e.g. DMA engines) whose control
+ * registers and/or descriptor memories can be exposed to the host by mapping
+ * them into BAR space. This helper queries the backend for such resources.
+ *
+ * Return:
+ * * >= 0: number of resources returned (or required, if @resources is NULL)
+ * * -EOPNOTSUPP: backend does not support auxiliary resource queries
+ * * other -errno on failure
+ */
+int pci_epc_get_aux_resources(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epc_aux_resource *resources,
+ int num_resources)
+{
+ int ret;
+
+ if (!epc || !epc->ops)
+ return -EINVAL;
+
+ if (func_no >= epc->max_functions)
+ return -EINVAL;
+
+ if (!epc->ops->get_aux_resources)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&epc->lock);
+ ret = epc->ops->get_aux_resources(epc, func_no, vfunc_no, resources,
+ num_resources);
+ mutex_unlock(&epc->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_epc_get_aux_resources);
+
/**
* pci_epc_stop() - stop the PCI link
* @epc: the link of the EPC device that has to be stopped
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index 8687b9c3462b..705026f64ef1 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -61,6 +61,51 @@ struct pci_epc_map {
void __iomem *virt_addr;
};
+/**
+ * enum pci_epc_aux_resource_type - auxiliary resource type identifiers
+ * @PCI_EPC_AUX_DMA_CTRL_MMIO: Integrated DMA controller register window (MMIO)
+ * @PCI_EPC_AUX_DMA_CHAN_DESC: Per-channel DMA descriptor
+ * @PCI_EPC_AUX_DOORBELL_MMIO: Doorbell MMIO, that might be outside the DMA
+ * controller register window
+ *
+ * EPC backends may expose auxiliary blocks (e.g. DMA engines) by mapping their
+ * register windows and descriptor memories into BAR space. This enum
+ * identifies the type of each exposable resource.
+ */
+enum pci_epc_aux_resource_type {
+ PCI_EPC_AUX_DMA_CTRL_MMIO,
+ PCI_EPC_AUX_DMA_CHAN_DESC,
+ PCI_EPC_AUX_DOORBELL_MMIO,
+};
+
+/**
+ * struct pci_epc_aux_resource - a physical auxiliary resource that may be
+ * exposed for peer use
+ * @type: resource type, see enum pci_epc_aux_resource_type
+ * @phys_addr: physical base address of the resource
+ * @size: size of the resource in bytes
+ * @bar: BAR number where this resource is already exposed to the RC
+ * (NO_BAR if not)
+ * @bar_offset: offset within @bar where the resource starts (valid iff
+ * @bar != NO_BAR)
+ * @u: type-specific metadata
+ */
+struct pci_epc_aux_resource {
+ enum pci_epc_aux_resource_type type;
+ phys_addr_t phys_addr;
+ resource_size_t size;
+ enum pci_barno bar;
+ resource_size_t bar_offset;
+
+ union {
+ /* PCI_EPC_AUX_DOORBELL_MMIO */
+ struct {
+ int irq; /* IRQ number for the doorbell handler */
+ u32 data; /* write value to ring the doorbell */
+ } db_mmio;
+ } u;
+};
+
/**
* struct pci_epc_ops - set of function pointers for performing EPC operations
* @write_header: ops to populate configuration space header
@@ -84,6 +129,7 @@ struct pci_epc_map {
* @start: ops to start the PCI link
* @stop: ops to stop the PCI link
* @get_features: ops to get the features supported by the EPC
+ * @get_aux_resources: ops to retrieve controller-owned auxiliary resources
* @owner: the module owner containing the ops
*/
struct pci_epc_ops {
@@ -115,6 +161,9 @@ struct pci_epc_ops {
void (*stop)(struct pci_epc *epc);
const struct pci_epc_features* (*get_features)(struct pci_epc *epc,
u8 func_no, u8 vfunc_no);
+ int (*get_aux_resources)(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epc_aux_resource *resources,
+ int num_resources);
struct module *owner;
};
@@ -348,6 +397,9 @@ int pci_epc_start(struct pci_epc *epc);
void pci_epc_stop(struct pci_epc *epc);
const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc,
u8 func_no, u8 vfunc_no);
+int pci_epc_get_aux_resources(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epc_aux_resource *resources,
+ int num_resources);
enum pci_barno
pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features);
enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features
--
2.51.0 | {
"author": "Koichiro Den <den@valinux.co.jp>",
"date": "Thu, 19 Feb 2026 17:13:12 +0900",
"is_openbsd": false,
"thread_id": "aaG5asXVV5sxRbnQ@ryzen.mbox.gz"
} |
lkml_critique | lkml | Hi,
Some endpoint platforms cannot use a GIC ITS-backed MSI domain for
EP-side doorbells. In those cases, endpoint function (EPF) drivers
cannot provide a doorbell to the root complex (RC), and features such as
vNTB may fall back to polling with significantly higher latency.
This series adds an alternate doorbell backend based on the DesignWare
PCIe controller's integrated eDMA interrupt-emulation feature. The RC
rings the doorbell by doing a single 32-bit MMIO write to an eDMA
doorbell location exposed in a BAR window. The EP side receives a Linux
IRQ that EPF drivers can use as a doorbell interrupt, without relying on
MSI message writes reaching the ITS.
To support this, the series:
- Adds an EPC auxiliary resource query API so EPF drivers can discover
controller-integrated resources (DMA MMIO, doorbell MMIO, and DMA LL
memory).
- Updates DesignWare EP controllers to report integrated eDMA
resources via the new API.
- Updates dw-edma to provide a dedicated virtual IRQ for interrupt
emulation and to perform the core-specific deassert sequence.
- Updates pci-epf-test and pci-epf-vntb to reuse a pre-exposed
BAR/offset and to honor per-doorbell IRQ flags.
Many thanks to Frank and Niklas for their continued review and valuable
feedback throughout the development of this series. The Reviewed-by tags
for the last two patches are dropped due to the additional changes
following Niklas' review in the v8 threads. Since the diff is small, I'd
appreciate it if Frank could re-check them.
Dependencies
------------
The following three series are prerequisites for this series:
(1). [PATCH v2 0/4] PCI: endpoint: Doorbell-related fixes
https://lore.kernel.org/linux-pci/20260217063856.3759713-1-den@valinux.co.jp/
(2). [PATCH 0/2] dmaengine: dw-edma: Interrupt-emulation doorbell support
https://lore.kernel.org/dmaengine/20260215152216.3393561-1-den@valinux.co.jp/
(3). [PATCH 0/9] PCI: endpoint differentiate between disabled and reserved BARs
https://lore.kernel.org/linux-pci/20260217212707.2450423-11-cassel@kernel.org/
Regarding (3):
- [PATCH 2/9] and [PATCH 3/9] are strictly the prerequisites for this v9 series.
In fact, they are split out from v8 series.
- With [PATCH 6/9], this v9 series should allow the embedded doorbell fallback
path to pass on RK3588 from the beginning. Given that, picking up the whole
(3) series earlier should be the most streamlined choice.
Tested on
---------
I re-tested the embedded (DMA) doorbell fallback path (via pci-epf-test)
on R-Car Spider boards (with this v9 series):
$ ./pci_endpoint_test -t DOORBELL_TEST
TAP version 13
1..1
# Starting 1 tests from 1 test cases.
# RUN pcie_ep_doorbell.DOORBELL_TEST ...
# OK pcie_ep_doorbell.DOORBELL_TEST
ok 1 pcie_ep_doorbell.DOORBELL_TEST
# PASSED: 1 / 1 tests passed.
# Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0
with the following message observed on the EP side:
[ 82.043715] pci_epf_test pci_epf_test.0: Can't find MSI domain for EPC
[ 82.044382] pci_epf_test pci_epf_test.0: Using embedded (DMA) doorbell fallback
(Note: for the test to pass on R-Car Spider, one of the following was required:
- echo 1048576 > functions/pci_epf_test/func1/pci_epf_test.0/bar2_size
- apply https://lore.kernel.org/linux-pci/20260210160315.2272930-1-den@valinux.co.jp/)
Performance test: vNTB ping latency
-----------------------------------
Setup:
- configfs (R-Car Spider in EP mode):
cd /sys/kernel/config/pci_ep/
mkdir functions/pci_epf_vntb/func1
echo 0x1912 > functions/pci_epf_vntb/func1/vendorid
echo 0x0030 > functions/pci_epf_vntb/func1/deviceid
echo 32 > functions/pci_epf_vntb/func1/msi_interrupts
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_count
echo 128 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/spad_count
echo 1 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/num_mws
echo 0x100000 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1
echo 0x1912 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_vid
echo 0x0030 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_pid
echo 0x10 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vbus_number
echo 0 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/ctrl_bar
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_bar [*]
echo 2 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1_bar
ln -s controllers/e65d0000.pcie-ep functions/pci_epf_vntb/func1/primary/
echo 1 > controllers/e65d0000.pcie-ep/start
[*]: On R-Car Spider, a hack is currently needed to use BAR4 for
the doorbell. I'll consider posting a patch for that
separately.
- ensure ntb_transport/ntb_netdev are loaded on both sides
Results:
- Without this series (pci.git main)
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=6.04 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=12.6 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=7.40 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=5.38 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=11.4 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=9.42 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=3.36 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=9.48 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=4.24 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=10.4 ms
- With this series (on top of pci.git main + Dependency (1), (2) and (3))
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=0.845 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=0.742 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=0.868 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=0.806 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=0.951 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=0.965 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=0.871 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=0.877 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=0.938 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=0.960 ms
---
Changelog
---------
* v8->v9 changes:
- Add a new dependency series (3), which moved the BAR reserved-subregion
framework + the RK3588 BAR4 example out of v8 (dropping the corresponding
patches from this series).
- pci-epf-vntb: rename the duplicate-IRQ helper and invert the return value,
per Frank's review.
- pci-epf-test: drop the extra size_add() doorbell-offset check, per Niklas'
review.
- pci-ep-msi: add a DWORD alignment check for DOORBELL_MMIO, per Niklas's
review.
- Carry over Reviewed-by tags for unchanged patches + drop Reviewed-by tags
where code changed.
- Rename the last patch subject (drop 'eDMA' word).
* v7->v8 changes:
- Deduplicate request_irq()/free_irq() calls based on virq (shared
IRQ) rather than doorbell type, as suggested during review of v7
Patch #7.
- Clean up the pci_epf_alloc_doorbell() error path, as suggested
during review of v7 Patch #9.
- Use range_end_overflows_t() instead of an open-coded overflow check,
following discussion during review of v7 Patch #5.
- Add a write-data field to the DOORBELL_MMIO aux-resource metadata
and plumb it through to the embedded doorbell backend (DesignWare
uses data=0).
* v6->v7 changes:
- Split out preparatory patches to keep the series below 10 patches.
- Add support for platforms where the eDMA register block is fixed
within a reserved BAR window (e.g. RK3588 BAR4) and must be reused
as-is.
- Introduce a dedicated virtual IRQ and irq_chip (using
handle_level_irq) for interrupt-emulation doorbells instead of
reusing per-channel IRQs. This avoids delivery via different IRQs on
platforms with chip->nr_irqs > 1.
* v5->v6 changes:
- Fix a double-free in v5 Patch 8/8 caused by mixing __free(kfree) with
an explicit kfree(). This is a functional bug (detectable by KASAN),
hence the respin solely for this fix. Sorry for the noise. No other
changes.
* v4->v5 changes:
- Change the series subject now that the series has evolved into a
consumer-driven set focused on the embedded doorbell fallback and its
in-tree users (epf-test and epf-vntb).
- Drop [PATCH v4 01/09] (dw-edma per-channel interrupt routing control)
from this series for now, so the series focuses on what's needed by the
current consumer (i.e. the doorbell fallback implementation).
- Replace the v4 embedded-doorbell "test variant + host/kselftest
plumbing" with a generic embedded-doorbell fallback in
pci_epf_alloc_doorbell(), including exposing required IRQ request flags
to EPF drivers.
- Two preparatory fix patches (Patch 6/8 and 7/8) to clean up error
handling and state management ahead of Patch 8/8.
- Rename *_get_remote_resource() to *_get_aux_resources() and adjust
relevant variable namings and kernel docs. Discussion may continue.
- Rework dw-edma per-channel metadata exposure to cache the needed info
in dw_edma_chip (IRQ number + emulation doorbell offset) and consume it
from the DesignWare EPC auxiliary resource provider without calling back
to dw-edma.
* v3->v4 changes:
- Drop dma_slave_caps.hw_id and the dmaengine selfirq callback
registration API. Instead, add a dw-edma specific dw_edma_chan_info()
helper and extend the EPC remote resource metadata accordingly.
- Add explicit acking for eDMA interrupt emulation and adjust the
dw-edma IRQ path for embedded-doorbell usage.
- Replace the previous EPC API smoke test with an embedded doorbell
test variant (pci-epf-test + pci_endpoint_test/selftests).
- Rebase onto pci.git controller/dwc commit 43d324eeb08c.
* v2->v3 changes:
- Replace DWC-specific helpers with a generic EPC remote resource query API.
- Add pci-epf-test smoke test and host/kselftest support for the new API.
- Drop the dw-edma-specific notify-only channel and polling approach
([PATCH v2 4/7] and [PATCH v2 5/7]), and rework notification handling
around a generic dmaengine_(un)register_selfirq() API implemented
by dw-edma.
* v1->v2 changes:
- Combine the two previously posted series into a single set (per Frank's
suggestion). Order dmaengine/dw-edma patches first so hw_id support
lands before the PCI LL-region helper, which assumes
dma_slave_caps.hw_id availability.
v8: https://lore.kernel.org/linux-pci/20260217080601.3808847-1-den@valinux.co.jp/
v7: https://lore.kernel.org/linux-pci/20260215163847.3522572-1-den@valinux.co.jp/
v6: https://lore.kernel.org/all/20260209125316.2132589-1-den@valinux.co.jp/
v5: https://lore.kernel.org/all/20260209062952.2049053-1-den@valinux.co.jp/
v4: https://lore.kernel.org/all/20260206172646.1556847-1-den@valinux.co.jp/
v3: https://lore.kernel.org/all/20260204145440.950609-1-den@valinux.co.jp/
v2: https://lore.kernel.org/all/20260127033420.3460579-1-den@valinux.co.jp/
v1: https://lore.kernel.org/dmaengine/20260126073652.3293564-1-den@valinux.co.jp/
+
https://lore.kernel.org/linux-pci/20260126071550.3233631-1-den@valinux.co.jp/
Thanks for reviewing.
Koichiro Den (7):
PCI: endpoint: Add auxiliary resource query API
PCI: dwc: Record integrated eDMA register window
PCI: dwc: ep: Expose integrated eDMA resources via EPC aux-resource
API
PCI: endpoint: pci-ep-msi: Refactor doorbell allocation for new
backends
PCI: endpoint: pci-epf-vntb: Reuse pre-exposed doorbells and IRQ flags
PCI: endpoint: pci-epf-test: Reuse pre-exposed doorbell targets
PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback
.../pci/controller/dwc/pcie-designware-ep.c | 151 ++++++++++++++++++
drivers/pci/controller/dwc/pcie-designware.c | 4 +
drivers/pci/controller/dwc/pcie-designware.h | 2 +
drivers/pci/endpoint/functions/pci-epf-test.c | 84 ++++++----
drivers/pci/endpoint/functions/pci-epf-vntb.c | 61 ++++++-
drivers/pci/endpoint/pci-ep-msi.c | 149 +++++++++++++++--
drivers/pci/endpoint/pci-epc-core.c | 41 +++++
include/linux/pci-epc.h | 52 ++++++
include/linux/pci-epf.h | 23 ++-
9 files changed, 520 insertions(+), 47 deletions(-)
--
2.51.0
| null | null | null | [PATCH v9 0/7] PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback | Implement the EPC aux-resource API for DesignWare endpoint controllers
with integrated eDMA.
Report:
- DMA controller MMIO window (PCI_EPC_AUX_DMA_CTRL_MMIO)
- interrupt-emulation doorbell register (PCI_EPC_AUX_DOORBELL_MMIO),
including its Linux IRQ and the data value to write to trigger the
interrupt
- per-channel LL descriptor regions (PCI_EPC_AUX_DMA_CHAN_DESC)
If the DMA controller MMIO window is already exposed via a
platform-owned fixed BAR subregion, also provide the BAR number and
offset so EPF drivers can reuse it without reprogramming the BAR.
Signed-off-by: Koichiro Den <den@valinux.co.jp>
---
.../pci/controller/dwc/pcie-designware-ep.c | 151 ++++++++++++++++++
1 file changed, 151 insertions(+)
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
index 5e47517c757c..2408ce95c103 100644
--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
+++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
@@ -9,6 +9,7 @@
#include <linux/align.h>
#include <linux/bitfield.h>
#include <linux/of.h>
+#include <linux/overflow.h>
#include <linux/platform_device.h>
#include "pcie-designware.h"
@@ -808,6 +809,155 @@ dw_pcie_ep_get_features(struct pci_epc *epc, u8 func_no, u8 vfunc_no)
return ep->ops->get_features(ep);
}
+static const struct pci_epc_bar_rsvd_region *
+dw_pcie_ep_find_bar_rsvd_region(struct dw_pcie_ep *ep,
+ enum pci_epc_bar_rsvd_region_type type,
+ enum pci_barno *bar,
+ resource_size_t *bar_offset)
+{
+ const struct pci_epc_features *features;
+ const struct pci_epc_bar_desc *bar_desc;
+ const struct pci_epc_bar_rsvd_region *r;
+ int i, j;
+
+ if (!ep->ops->get_features)
+ return NULL;
+
+ features = ep->ops->get_features(ep);
+ if (!features)
+ return NULL;
+
+ for (i = BAR_0; i <= BAR_5; i++) {
+ bar_desc = &features->bar[i];
+
+ if (!bar_desc->nr_rsvd_regions || !bar_desc->rsvd_regions)
+ continue;
+
+ for (j = 0; j < bar_desc->nr_rsvd_regions; j++) {
+ r = &bar_desc->rsvd_regions[j];
+
+ if (r->type != type)
+ continue;
+
+ if (bar)
+ *bar = i;
+ if (bar_offset)
+ *bar_offset = r->offset;
+ return r;
+ }
+ }
+
+ return NULL;
+}
+
+static int
+dw_pcie_ep_get_aux_resources(struct pci_epc *epc, u8 func_no, u8 vfunc_no,
+ struct pci_epc_aux_resource *resources,
+ int num_resources)
+{
+ struct dw_pcie_ep *ep = epc_get_drvdata(epc);
+ struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
+ const struct pci_epc_bar_rsvd_region *rsvd;
+ struct dw_edma_chip *edma = &pci->edma;
+ enum pci_barno dma_ctrl_bar = NO_BAR;
+ int ll_cnt = 0, needed, idx = 0;
+ resource_size_t db_offset = edma->db_offset;
+ resource_size_t dma_ctrl_bar_offset = 0;
+ resource_size_t dma_reg_size;
+ unsigned int i;
+
+ if (!pci->edma_reg_size)
+ return 0;
+
+ dma_reg_size = pci->edma_reg_size;
+
+ for (i = 0; i < edma->ll_wr_cnt; i++)
+ if (edma->ll_region_wr[i].sz)
+ ll_cnt++;
+
+ for (i = 0; i < edma->ll_rd_cnt; i++)
+ if (edma->ll_region_rd[i].sz)
+ ll_cnt++;
+
+ needed = 1 + ll_cnt + (db_offset != ~0 ? 1 : 0);
+
+ /* Count query mode */
+ if (!resources || !num_resources)
+ return needed;
+
+ if (num_resources < needed)
+ return -ENOSPC;
+
+ rsvd = dw_pcie_ep_find_bar_rsvd_region(ep,
+ PCI_EPC_BAR_RSVD_DMA_CTRL_MMIO,
+ &dma_ctrl_bar,
+ &dma_ctrl_bar_offset);
+ if (rsvd && rsvd->size < dma_reg_size)
+ dma_reg_size = rsvd->size;
+
+ /* DMA register block */
+ resources[idx++] = (struct pci_epc_aux_resource) {
+ .type = PCI_EPC_AUX_DMA_CTRL_MMIO,
+ .phys_addr = pci->edma_reg_phys,
+ .size = dma_reg_size,
+ .bar = dma_ctrl_bar,
+ .bar_offset = dma_ctrl_bar_offset,
+ };
+
+ /*
+ * For interrupt-emulation doorbells, report a standalone resource
+ * instead of bundling it into the DMA controller MMIO resource.
+ */
+ if (db_offset != ~0) {
+ if (range_end_overflows_t(resource_size_t, db_offset,
+ sizeof(u32), dma_reg_size))
+ return -EINVAL;
+
+ resources[idx++] = (struct pci_epc_aux_resource) {
+ .type = PCI_EPC_AUX_DOORBELL_MMIO,
+ .phys_addr = pci->edma_reg_phys + db_offset,
+ .size = sizeof(u32),
+ .bar = dma_ctrl_bar,
+ .bar_offset = dma_ctrl_bar != NO_BAR ?
+ dma_ctrl_bar_offset + db_offset : 0,
+ .u.db_mmio = {
+ .irq = edma->db_irq,
+ .data = 0, /* write 0 to assert */
+ },
+ };
+ }
+
+ /* One LL region per write channel */
+ for (i = 0; i < edma->ll_wr_cnt; i++) {
+ if (!edma->ll_region_wr[i].sz)
+ continue;
+
+ resources[idx++] = (struct pci_epc_aux_resource) {
+ .type = PCI_EPC_AUX_DMA_CHAN_DESC,
+ .phys_addr = edma->ll_region_wr[i].paddr,
+ .size = edma->ll_region_wr[i].sz,
+ .bar = NO_BAR,
+ .bar_offset = 0,
+ };
+ }
+
+ /* One LL region per read channel */
+ for (i = 0; i < edma->ll_rd_cnt; i++) {
+ if (!edma->ll_region_rd[i].sz)
+ continue;
+
+ resources[idx++] = (struct pci_epc_aux_resource) {
+ .type = PCI_EPC_AUX_DMA_CHAN_DESC,
+ .phys_addr = edma->ll_region_rd[i].paddr,
+ .size = edma->ll_region_rd[i].sz,
+ .bar = NO_BAR,
+ .bar_offset = 0,
+ };
+ }
+
+ return idx;
+}
+
static const struct pci_epc_ops epc_ops = {
.write_header = dw_pcie_ep_write_header,
.set_bar = dw_pcie_ep_set_bar,
@@ -823,6 +973,7 @@ static const struct pci_epc_ops epc_ops = {
.start = dw_pcie_ep_start,
.stop = dw_pcie_ep_stop,
.get_features = dw_pcie_ep_get_features,
+ .get_aux_resources = dw_pcie_ep_get_aux_resources,
};
/**
--
2.51.0 | {
"author": "Koichiro Den <den@valinux.co.jp>",
"date": "Thu, 19 Feb 2026 17:13:14 +0900",
"is_openbsd": false,
"thread_id": "aaG5asXVV5sxRbnQ@ryzen.mbox.gz"
} |
lkml_critique | lkml | Hi,
Some endpoint platforms cannot use a GIC ITS-backed MSI domain for
EP-side doorbells. In those cases, endpoint function (EPF) drivers
cannot provide a doorbell to the root complex (RC), and features such as
vNTB may fall back to polling with significantly higher latency.
This series adds an alternate doorbell backend based on the DesignWare
PCIe controller's integrated eDMA interrupt-emulation feature. The RC
rings the doorbell by doing a single 32-bit MMIO write to an eDMA
doorbell location exposed in a BAR window. The EP side receives a Linux
IRQ that EPF drivers can use as a doorbell interrupt, without relying on
MSI message writes reaching the ITS.
To support this, the series:
- Adds an EPC auxiliary resource query API so EPF drivers can discover
controller-integrated resources (DMA MMIO, doorbell MMIO, and DMA LL
memory).
- Updates DesignWare EP controllers to report integrated eDMA
resources via the new API.
- Updates dw-edma to provide a dedicated virtual IRQ for interrupt
emulation and to perform the core-specific deassert sequence.
- Updates pci-epf-test and pci-epf-vntb to reuse a pre-exposed
BAR/offset and to honor per-doorbell IRQ flags.
Many thanks to Frank and Niklas for their continued review and valuable
feedback throughout the development of this series. The Reviewed-by tags
for the last two patches are dropped due to the additional changes
following Niklas' review in the v8 threads. Since the diff is small, I'd
appreciate it if Frank could re-check them.
Dependencies
------------
The following three series are prerequisites for this series:
(1). [PATCH v2 0/4] PCI: endpoint: Doorbell-related fixes
https://lore.kernel.org/linux-pci/20260217063856.3759713-1-den@valinux.co.jp/
(2). [PATCH 0/2] dmaengine: dw-edma: Interrupt-emulation doorbell support
https://lore.kernel.org/dmaengine/20260215152216.3393561-1-den@valinux.co.jp/
(3). [PATCH 0/9] PCI: endpoint differentiate between disabled and reserved BARs
https://lore.kernel.org/linux-pci/20260217212707.2450423-11-cassel@kernel.org/
Regarding (3):
- [PATCH 2/9] and [PATCH 3/9] are strictly the prerequisites for this v9 series.
In fact, they are split out from v8 series.
- With [PATCH 6/9], this v9 series should allow the embedded doorbell fallback
path to pass on RK3588 from the beginning. Given that, picking up the whole
(3) series earlier should be the most streamlined choice.
Tested on
---------
I re-tested the embedded (DMA) doorbell fallback path (via pci-epf-test)
on R-Car Spider boards (with this v9 series):
$ ./pci_endpoint_test -t DOORBELL_TEST
TAP version 13
1..1
# Starting 1 tests from 1 test cases.
# RUN pcie_ep_doorbell.DOORBELL_TEST ...
# OK pcie_ep_doorbell.DOORBELL_TEST
ok 1 pcie_ep_doorbell.DOORBELL_TEST
# PASSED: 1 / 1 tests passed.
# Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0
with the following message observed on the EP side:
[ 82.043715] pci_epf_test pci_epf_test.0: Can't find MSI domain for EPC
[ 82.044382] pci_epf_test pci_epf_test.0: Using embedded (DMA) doorbell fallback
(Note: for the test to pass on R-Car Spider, one of the following was required:
- echo 1048576 > functions/pci_epf_test/func1/pci_epf_test.0/bar2_size
- apply https://lore.kernel.org/linux-pci/20260210160315.2272930-1-den@valinux.co.jp/)
Performance test: vNTB ping latency
-----------------------------------
Setup:
- configfs (R-Car Spider in EP mode):
cd /sys/kernel/config/pci_ep/
mkdir functions/pci_epf_vntb/func1
echo 0x1912 > functions/pci_epf_vntb/func1/vendorid
echo 0x0030 > functions/pci_epf_vntb/func1/deviceid
echo 32 > functions/pci_epf_vntb/func1/msi_interrupts
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_count
echo 128 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/spad_count
echo 1 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/num_mws
echo 0x100000 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1
echo 0x1912 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_vid
echo 0x0030 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_pid
echo 0x10 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vbus_number
echo 0 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/ctrl_bar
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_bar [*]
echo 2 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1_bar
ln -s controllers/e65d0000.pcie-ep functions/pci_epf_vntb/func1/primary/
echo 1 > controllers/e65d0000.pcie-ep/start
[*]: On R-Car Spider, a hack is currently needed to use BAR4 for
the doorbell. I'll consider posting a patch for that
separately.
- ensure ntb_transport/ntb_netdev are loaded on both sides
Results:
- Without this series (pci.git main)
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=6.04 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=12.6 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=7.40 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=5.38 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=11.4 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=9.42 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=3.36 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=9.48 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=4.24 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=10.4 ms
- With this series (on top of pci.git main + Dependency (1), (2) and (3))
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=0.845 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=0.742 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=0.868 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=0.806 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=0.951 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=0.965 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=0.871 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=0.877 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=0.938 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=0.960 ms
---
Changelog
---------
* v8->v9 changes:
- Add a new dependency series (3), which moved the BAR reserved-subregion
framework + the RK3588 BAR4 example out of v8 (dropping the corresponding
patches from this series).
- pci-epf-vntb: rename the duplicate-IRQ helper and invert the return value,
per Frank's review.
- pci-epf-test: drop the extra size_add() doorbell-offset check, per Niklas'
review.
- pci-ep-msi: add a DWORD alignment check for DOORBELL_MMIO, per Niklas's
review.
- Carry over Reviewed-by tags for unchanged patches + drop Reviewed-by tags
where code changed.
- Rename the last patch subject (drop 'eDMA' word).
* v7->v8 changes:
- Deduplicate request_irq()/free_irq() calls based on virq (shared
IRQ) rather than doorbell type, as suggested during review of v7
Patch #7.
- Clean up the pci_epf_alloc_doorbell() error path, as suggested
during review of v7 Patch #9.
- Use range_end_overflows_t() instead of an open-coded overflow check,
following discussion during review of v7 Patch #5.
- Add a write-data field to the DOORBELL_MMIO aux-resource metadata
and plumb it through to the embedded doorbell backend (DesignWare
uses data=0).
* v6->v7 changes:
- Split out preparatory patches to keep the series below 10 patches.
- Add support for platforms where the eDMA register block is fixed
within a reserved BAR window (e.g. RK3588 BAR4) and must be reused
as-is.
- Introduce a dedicated virtual IRQ and irq_chip (using
handle_level_irq) for interrupt-emulation doorbells instead of
reusing per-channel IRQs. This avoids delivery via different IRQs on
platforms with chip->nr_irqs > 1.
* v5->v6 changes:
- Fix a double-free in v5 Patch 8/8 caused by mixing __free(kfree) with
an explicit kfree(). This is a functional bug (detectable by KASAN),
hence the respin solely for this fix. Sorry for the noise. No other
changes.
* v4->v5 changes:
- Change the series subject now that the series has evolved into a
consumer-driven set focused on the embedded doorbell fallback and its
in-tree users (epf-test and epf-vntb).
- Drop [PATCH v4 01/09] (dw-edma per-channel interrupt routing control)
from this series for now, so the series focuses on what's needed by the
current consumer (i.e. the doorbell fallback implementation).
- Replace the v4 embedded-doorbell "test variant + host/kselftest
plumbing" with a generic embedded-doorbell fallback in
pci_epf_alloc_doorbell(), including exposing required IRQ request flags
to EPF drivers.
- Two preparatory fix patches (Patch 6/8 and 7/8) to clean up error
handling and state management ahead of Patch 8/8.
- Rename *_get_remote_resource() to *_get_aux_resources() and adjust
relevant variable namings and kernel docs. Discussion may continue.
- Rework dw-edma per-channel metadata exposure to cache the needed info
in dw_edma_chip (IRQ number + emulation doorbell offset) and consume it
from the DesignWare EPC auxiliary resource provider without calling back
to dw-edma.
* v3->v4 changes:
- Drop dma_slave_caps.hw_id and the dmaengine selfirq callback
registration API. Instead, add a dw-edma specific dw_edma_chan_info()
helper and extend the EPC remote resource metadata accordingly.
- Add explicit acking for eDMA interrupt emulation and adjust the
dw-edma IRQ path for embedded-doorbell usage.
- Replace the previous EPC API smoke test with an embedded doorbell
test variant (pci-epf-test + pci_endpoint_test/selftests).
- Rebase onto pci.git controller/dwc commit 43d324eeb08c.
* v2->v3 changes:
- Replace DWC-specific helpers with a generic EPC remote resource query API.
- Add pci-epf-test smoke test and host/kselftest support for the new API.
- Drop the dw-edma-specific notify-only channel and polling approach
([PATCH v2 4/7] and [PATCH v2 5/7]), and rework notification handling
around a generic dmaengine_(un)register_selfirq() API implemented
by dw-edma.
* v1->v2 changes:
- Combine the two previously posted series into a single set (per Frank's
suggestion). Order dmaengine/dw-edma patches first so hw_id support
lands before the PCI LL-region helper, which assumes
dma_slave_caps.hw_id availability.
v8: https://lore.kernel.org/linux-pci/20260217080601.3808847-1-den@valinux.co.jp/
v7: https://lore.kernel.org/linux-pci/20260215163847.3522572-1-den@valinux.co.jp/
v6: https://lore.kernel.org/all/20260209125316.2132589-1-den@valinux.co.jp/
v5: https://lore.kernel.org/all/20260209062952.2049053-1-den@valinux.co.jp/
v4: https://lore.kernel.org/all/20260206172646.1556847-1-den@valinux.co.jp/
v3: https://lore.kernel.org/all/20260204145440.950609-1-den@valinux.co.jp/
v2: https://lore.kernel.org/all/20260127033420.3460579-1-den@valinux.co.jp/
v1: https://lore.kernel.org/dmaengine/20260126073652.3293564-1-den@valinux.co.jp/
+
https://lore.kernel.org/linux-pci/20260126071550.3233631-1-den@valinux.co.jp/
Thanks for reviewing.
Koichiro Den (7):
PCI: endpoint: Add auxiliary resource query API
PCI: dwc: Record integrated eDMA register window
PCI: dwc: ep: Expose integrated eDMA resources via EPC aux-resource
API
PCI: endpoint: pci-ep-msi: Refactor doorbell allocation for new
backends
PCI: endpoint: pci-epf-vntb: Reuse pre-exposed doorbells and IRQ flags
PCI: endpoint: pci-epf-test: Reuse pre-exposed doorbell targets
PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback
.../pci/controller/dwc/pcie-designware-ep.c | 151 ++++++++++++++++++
drivers/pci/controller/dwc/pcie-designware.c | 4 +
drivers/pci/controller/dwc/pcie-designware.h | 2 +
drivers/pci/endpoint/functions/pci-epf-test.c | 84 ++++++----
drivers/pci/endpoint/functions/pci-epf-vntb.c | 61 ++++++-
drivers/pci/endpoint/pci-ep-msi.c | 149 +++++++++++++++--
drivers/pci/endpoint/pci-epc-core.c | 41 +++++
include/linux/pci-epc.h | 52 ++++++
include/linux/pci-epf.h | 23 ++-
9 files changed, 520 insertions(+), 47 deletions(-)
--
2.51.0
| null | null | null | [PATCH v9 0/7] PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback | Some DesignWare PCIe controllers integrate an eDMA block whose registers
are located in a dedicated register window. Endpoint function drivers
may need the physical base and size of this window to map/expose it to a
peer.
Record the physical base and size of the integrated eDMA register window
in struct dw_pcie.
Reviewed-by: Frank Li <Frank.Li@nxp.com>
Signed-off-by: Koichiro Den <den@valinux.co.jp>
---
drivers/pci/controller/dwc/pcie-designware.c | 4 ++++
drivers/pci/controller/dwc/pcie-designware.h | 2 ++
2 files changed, 6 insertions(+)
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 5741c09dde7f..f82ed189f6ae 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -162,8 +162,12 @@ int dw_pcie_get_resources(struct dw_pcie *pci)
pci->edma.reg_base = devm_ioremap_resource(pci->dev, res);
if (IS_ERR(pci->edma.reg_base))
return PTR_ERR(pci->edma.reg_base);
+ pci->edma_reg_phys = res->start;
+ pci->edma_reg_size = resource_size(res);
} else if (pci->atu_size >= 2 * DEFAULT_DBI_DMA_OFFSET) {
pci->edma.reg_base = pci->atu_base + DEFAULT_DBI_DMA_OFFSET;
+ pci->edma_reg_phys = pci->atu_phys_addr + DEFAULT_DBI_DMA_OFFSET;
+ pci->edma_reg_size = pci->atu_size - DEFAULT_DBI_DMA_OFFSET;
}
}
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index ae6389dd9caa..52f26663e8b1 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -541,6 +541,8 @@ struct dw_pcie {
int max_link_speed;
u8 n_fts[2];
struct dw_edma_chip edma;
+ phys_addr_t edma_reg_phys;
+ resource_size_t edma_reg_size;
bool l1ss_support; /* L1 PM Substates support */
struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS];
struct clk_bulk_data core_clks[DW_PCIE_NUM_CORE_CLKS];
--
2.51.0 | {
"author": "Koichiro Den <den@valinux.co.jp>",
"date": "Thu, 19 Feb 2026 17:13:13 +0900",
"is_openbsd": false,
"thread_id": "aaG5asXVV5sxRbnQ@ryzen.mbox.gz"
} |
lkml_critique | lkml | Hi,
Some endpoint platforms cannot use a GIC ITS-backed MSI domain for
EP-side doorbells. In those cases, endpoint function (EPF) drivers
cannot provide a doorbell to the root complex (RC), and features such as
vNTB may fall back to polling with significantly higher latency.
This series adds an alternate doorbell backend based on the DesignWare
PCIe controller's integrated eDMA interrupt-emulation feature. The RC
rings the doorbell by doing a single 32-bit MMIO write to an eDMA
doorbell location exposed in a BAR window. The EP side receives a Linux
IRQ that EPF drivers can use as a doorbell interrupt, without relying on
MSI message writes reaching the ITS.
To support this, the series:
- Adds an EPC auxiliary resource query API so EPF drivers can discover
controller-integrated resources (DMA MMIO, doorbell MMIO, and DMA LL
memory).
- Updates DesignWare EP controllers to report integrated eDMA
resources via the new API.
- Updates dw-edma to provide a dedicated virtual IRQ for interrupt
emulation and to perform the core-specific deassert sequence.
- Updates pci-epf-test and pci-epf-vntb to reuse a pre-exposed
BAR/offset and to honor per-doorbell IRQ flags.
Many thanks to Frank and Niklas for their continued review and valuable
feedback throughout the development of this series. The Reviewed-by tags
for the last two patches are dropped due to the additional changes
following Niklas' review in the v8 threads. Since the diff is small, I'd
appreciate it if Frank could re-check them.
Dependencies
------------
The following three series are prerequisites for this series:
(1). [PATCH v2 0/4] PCI: endpoint: Doorbell-related fixes
https://lore.kernel.org/linux-pci/20260217063856.3759713-1-den@valinux.co.jp/
(2). [PATCH 0/2] dmaengine: dw-edma: Interrupt-emulation doorbell support
https://lore.kernel.org/dmaengine/20260215152216.3393561-1-den@valinux.co.jp/
(3). [PATCH 0/9] PCI: endpoint differentiate between disabled and reserved BARs
https://lore.kernel.org/linux-pci/20260217212707.2450423-11-cassel@kernel.org/
Regarding (3):
- [PATCH 2/9] and [PATCH 3/9] are strictly the prerequisites for this v9 series.
In fact, they are split out from v8 series.
- With [PATCH 6/9], this v9 series should allow the embedded doorbell fallback
path to pass on RK3588 from the beginning. Given that, picking up the whole
(3) series earlier should be the most streamlined choice.
Tested on
---------
I re-tested the embedded (DMA) doorbell fallback path (via pci-epf-test)
on R-Car Spider boards (with this v9 series):
$ ./pci_endpoint_test -t DOORBELL_TEST
TAP version 13
1..1
# Starting 1 tests from 1 test cases.
# RUN pcie_ep_doorbell.DOORBELL_TEST ...
# OK pcie_ep_doorbell.DOORBELL_TEST
ok 1 pcie_ep_doorbell.DOORBELL_TEST
# PASSED: 1 / 1 tests passed.
# Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0
with the following message observed on the EP side:
[ 82.043715] pci_epf_test pci_epf_test.0: Can't find MSI domain for EPC
[ 82.044382] pci_epf_test pci_epf_test.0: Using embedded (DMA) doorbell fallback
(Note: for the test to pass on R-Car Spider, one of the following was required:
- echo 1048576 > functions/pci_epf_test/func1/pci_epf_test.0/bar2_size
- apply https://lore.kernel.org/linux-pci/20260210160315.2272930-1-den@valinux.co.jp/)
Performance test: vNTB ping latency
-----------------------------------
Setup:
- configfs (R-Car Spider in EP mode):
cd /sys/kernel/config/pci_ep/
mkdir functions/pci_epf_vntb/func1
echo 0x1912 > functions/pci_epf_vntb/func1/vendorid
echo 0x0030 > functions/pci_epf_vntb/func1/deviceid
echo 32 > functions/pci_epf_vntb/func1/msi_interrupts
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_count
echo 128 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/spad_count
echo 1 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/num_mws
echo 0x100000 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1
echo 0x1912 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_vid
echo 0x0030 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_pid
echo 0x10 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vbus_number
echo 0 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/ctrl_bar
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_bar [*]
echo 2 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1_bar
ln -s controllers/e65d0000.pcie-ep functions/pci_epf_vntb/func1/primary/
echo 1 > controllers/e65d0000.pcie-ep/start
[*]: On R-Car Spider, a hack is currently needed to use BAR4 for
the doorbell. I'll consider posting a patch for that
separately.
- ensure ntb_transport/ntb_netdev are loaded on both sides
Results:
- Without this series (pci.git main)
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=6.04 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=12.6 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=7.40 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=5.38 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=11.4 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=9.42 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=3.36 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=9.48 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=4.24 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=10.4 ms
- With this series (on top of pci.git main + Dependency (1), (2) and (3))
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=0.845 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=0.742 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=0.868 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=0.806 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=0.951 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=0.965 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=0.871 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=0.877 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=0.938 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=0.960 ms
---
Changelog
---------
* v8->v9 changes:
- Add a new dependency series (3), which moved the BAR reserved-subregion
framework + the RK3588 BAR4 example out of v8 (dropping the corresponding
patches from this series).
- pci-epf-vntb: rename the duplicate-IRQ helper and invert the return value,
per Frank's review.
- pci-epf-test: drop the extra size_add() doorbell-offset check, per Niklas'
review.
- pci-ep-msi: add a DWORD alignment check for DOORBELL_MMIO, per Niklas's
review.
- Carry over Reviewed-by tags for unchanged patches + drop Reviewed-by tags
where code changed.
- Rename the last patch subject (drop 'eDMA' word).
* v7->v8 changes:
- Deduplicate request_irq()/free_irq() calls based on virq (shared
IRQ) rather than doorbell type, as suggested during review of v7
Patch #7.
- Clean up the pci_epf_alloc_doorbell() error path, as suggested
during review of v7 Patch #9.
- Use range_end_overflows_t() instead of an open-coded overflow check,
following discussion during review of v7 Patch #5.
- Add a write-data field to the DOORBELL_MMIO aux-resource metadata
and plumb it through to the embedded doorbell backend (DesignWare
uses data=0).
* v6->v7 changes:
- Split out preparatory patches to keep the series below 10 patches.
- Add support for platforms where the eDMA register block is fixed
within a reserved BAR window (e.g. RK3588 BAR4) and must be reused
as-is.
- Introduce a dedicated virtual IRQ and irq_chip (using
handle_level_irq) for interrupt-emulation doorbells instead of
reusing per-channel IRQs. This avoids delivery via different IRQs on
platforms with chip->nr_irqs > 1.
* v5->v6 changes:
- Fix a double-free in v5 Patch 8/8 caused by mixing __free(kfree) with
an explicit kfree(). This is a functional bug (detectable by KASAN),
hence the respin solely for this fix. Sorry for the noise. No other
changes.
* v4->v5 changes:
- Change the series subject now that the series has evolved into a
consumer-driven set focused on the embedded doorbell fallback and its
in-tree users (epf-test and epf-vntb).
- Drop [PATCH v4 01/09] (dw-edma per-channel interrupt routing control)
from this series for now, so the series focuses on what's needed by the
current consumer (i.e. the doorbell fallback implementation).
- Replace the v4 embedded-doorbell "test variant + host/kselftest
plumbing" with a generic embedded-doorbell fallback in
pci_epf_alloc_doorbell(), including exposing required IRQ request flags
to EPF drivers.
- Two preparatory fix patches (Patch 6/8 and 7/8) to clean up error
handling and state management ahead of Patch 8/8.
- Rename *_get_remote_resource() to *_get_aux_resources() and adjust
relevant variable namings and kernel docs. Discussion may continue.
- Rework dw-edma per-channel metadata exposure to cache the needed info
in dw_edma_chip (IRQ number + emulation doorbell offset) and consume it
from the DesignWare EPC auxiliary resource provider without calling back
to dw-edma.
* v3->v4 changes:
- Drop dma_slave_caps.hw_id and the dmaengine selfirq callback
registration API. Instead, add a dw-edma specific dw_edma_chan_info()
helper and extend the EPC remote resource metadata accordingly.
- Add explicit acking for eDMA interrupt emulation and adjust the
dw-edma IRQ path for embedded-doorbell usage.
- Replace the previous EPC API smoke test with an embedded doorbell
test variant (pci-epf-test + pci_endpoint_test/selftests).
- Rebase onto pci.git controller/dwc commit 43d324eeb08c.
* v2->v3 changes:
- Replace DWC-specific helpers with a generic EPC remote resource query API.
- Add pci-epf-test smoke test and host/kselftest support for the new API.
- Drop the dw-edma-specific notify-only channel and polling approach
([PATCH v2 4/7] and [PATCH v2 5/7]), and rework notification handling
around a generic dmaengine_(un)register_selfirq() API implemented
by dw-edma.
* v1->v2 changes:
- Combine the two previously posted series into a single set (per Frank's
suggestion). Order dmaengine/dw-edma patches first so hw_id support
lands before the PCI LL-region helper, which assumes
dma_slave_caps.hw_id availability.
v8: https://lore.kernel.org/linux-pci/20260217080601.3808847-1-den@valinux.co.jp/
v7: https://lore.kernel.org/linux-pci/20260215163847.3522572-1-den@valinux.co.jp/
v6: https://lore.kernel.org/all/20260209125316.2132589-1-den@valinux.co.jp/
v5: https://lore.kernel.org/all/20260209062952.2049053-1-den@valinux.co.jp/
v4: https://lore.kernel.org/all/20260206172646.1556847-1-den@valinux.co.jp/
v3: https://lore.kernel.org/all/20260204145440.950609-1-den@valinux.co.jp/
v2: https://lore.kernel.org/all/20260127033420.3460579-1-den@valinux.co.jp/
v1: https://lore.kernel.org/dmaengine/20260126073652.3293564-1-den@valinux.co.jp/
+
https://lore.kernel.org/linux-pci/20260126071550.3233631-1-den@valinux.co.jp/
Thanks for reviewing.
Koichiro Den (7):
PCI: endpoint: Add auxiliary resource query API
PCI: dwc: Record integrated eDMA register window
PCI: dwc: ep: Expose integrated eDMA resources via EPC aux-resource
API
PCI: endpoint: pci-ep-msi: Refactor doorbell allocation for new
backends
PCI: endpoint: pci-epf-vntb: Reuse pre-exposed doorbells and IRQ flags
PCI: endpoint: pci-epf-test: Reuse pre-exposed doorbell targets
PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback
.../pci/controller/dwc/pcie-designware-ep.c | 151 ++++++++++++++++++
drivers/pci/controller/dwc/pcie-designware.c | 4 +
drivers/pci/controller/dwc/pcie-designware.h | 2 +
drivers/pci/endpoint/functions/pci-epf-test.c | 84 ++++++----
drivers/pci/endpoint/functions/pci-epf-vntb.c | 61 ++++++-
drivers/pci/endpoint/pci-ep-msi.c | 149 +++++++++++++++--
drivers/pci/endpoint/pci-epc-core.c | 41 +++++
include/linux/pci-epc.h | 52 ++++++
include/linux/pci-epf.h | 23 ++-
9 files changed, 520 insertions(+), 47 deletions(-)
--
2.51.0
| null | null | null | [PATCH v9 0/7] PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback | Prepare pci-ep-msi for non-MSI doorbell backends.
Factor MSI doorbell allocation into a helper and extend struct
pci_epf_doorbell_msg with:
- irq_flags: required IRQ request flags (e.g. IRQF_SHARED for some
backends)
- type: doorbell backend type
- bar/offset: pre-exposed doorbell target location, if any
Initialize these fields for the existing MSI-backed doorbell
implementation.
Also add PCI_EPF_DOORBELL_EMBEDDED type, which is to be implemented in a
follow-up patch.
No functional changes.
Reviewed-by: Frank Li <Frank.Li@nxp.com>
Signed-off-by: Koichiro Den <den@valinux.co.jp>
---
drivers/pci/endpoint/pci-ep-msi.c | 54 ++++++++++++++++++++++---------
include/linux/pci-epf.h | 23 +++++++++++--
2 files changed, 60 insertions(+), 17 deletions(-)
diff --git a/drivers/pci/endpoint/pci-ep-msi.c b/drivers/pci/endpoint/pci-ep-msi.c
index ad8a81d6ad77..50badffa9d72 100644
--- a/drivers/pci/endpoint/pci-ep-msi.c
+++ b/drivers/pci/endpoint/pci-ep-msi.c
@@ -8,6 +8,7 @@
#include <linux/device.h>
#include <linux/export.h>
+#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
#include <linux/msi.h>
@@ -35,23 +36,13 @@ static void pci_epf_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
pci_epc_put(epc);
}
-int pci_epf_alloc_doorbell(struct pci_epf *epf, u16 num_db)
+static int pci_epf_alloc_doorbell_msi(struct pci_epf *epf, u16 num_db)
{
- struct pci_epc *epc = epf->epc;
+ struct pci_epf_doorbell_msg *msg;
struct device *dev = &epf->dev;
+ struct pci_epc *epc = epf->epc;
struct irq_domain *domain;
- void *msg;
- int ret;
- int i;
-
- /* TODO: Multi-EPF support */
- if (list_first_entry_or_null(&epc->pci_epf, struct pci_epf, list) != epf) {
- dev_err(dev, "MSI doorbell doesn't support multiple EPF\n");
- return -EINVAL;
- }
-
- if (epf->db_msg)
- return -EBUSY;
+ int ret, i;
domain = of_msi_map_get_device_domain(epc->dev.parent, 0,
DOMAIN_BUS_PLATFORM_MSI);
@@ -74,6 +65,12 @@ int pci_epf_alloc_doorbell(struct pci_epf *epf, u16 num_db)
if (!msg)
return -ENOMEM;
+ for (i = 0; i < num_db; i++)
+ msg[i] = (struct pci_epf_doorbell_msg) {
+ .type = PCI_EPF_DOORBELL_MSI,
+ .bar = NO_BAR,
+ };
+
epf->num_db = num_db;
epf->db_msg = msg;
@@ -90,13 +87,40 @@ int pci_epf_alloc_doorbell(struct pci_epf *epf, u16 num_db)
for (i = 0; i < num_db; i++)
epf->db_msg[i].virq = msi_get_virq(epc->dev.parent, i);
+ return 0;
+}
+
+int pci_epf_alloc_doorbell(struct pci_epf *epf, u16 num_db)
+{
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+ int ret;
+
+ /* TODO: Multi-EPF support */
+ if (list_first_entry_or_null(&epc->pci_epf, struct pci_epf, list) != epf) {
+ dev_err(dev, "Doorbell doesn't support multiple EPF\n");
+ return -EINVAL;
+ }
+
+ if (epf->db_msg)
+ return -EBUSY;
+
+ ret = pci_epf_alloc_doorbell_msi(epf, num_db);
+ if (!ret)
+ return 0;
+
+ dev_err(dev, "Failed to allocate doorbell: %d\n", ret);
return ret;
}
EXPORT_SYMBOL_GPL(pci_epf_alloc_doorbell);
void pci_epf_free_doorbell(struct pci_epf *epf)
{
- platform_device_msi_free_irqs_all(epf->epc->dev.parent);
+ if (!epf->db_msg)
+ return;
+
+ if (epf->db_msg[0].type == PCI_EPF_DOORBELL_MSI)
+ platform_device_msi_free_irqs_all(epf->epc->dev.parent);
kfree(epf->db_msg);
epf->db_msg = NULL;
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 7737a7c03260..cd747447a1ea 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -152,14 +152,33 @@ struct pci_epf_bar {
struct pci_epf_bar_submap *submap;
};
+enum pci_epf_doorbell_type {
+ PCI_EPF_DOORBELL_MSI = 0,
+ PCI_EPF_DOORBELL_EMBEDDED,
+};
+
/**
* struct pci_epf_doorbell_msg - represents doorbell message
- * @msg: MSI message
- * @virq: IRQ number of this doorbell MSI message
+ * @msg: Doorbell address/data pair to be mapped into BAR space.
+ * For MSI-backed doorbells this is the MSI message, while for
+ * "embedded" doorbells this represents an MMIO write that asserts
+ * an interrupt on the EP side.
+ * @virq: IRQ number of this doorbell message
+ * @irq_flags: Required flags for request_irq()/request_threaded_irq().
+ * Callers may OR-in additional flags (e.g. IRQF_ONESHOT).
+ * @type: Doorbell type.
+ * @bar: BAR number where the doorbell target is already exposed to the RC
+ * (NO_BAR if not)
+ * @offset: offset within @bar for the doorbell target (valid iff
+ * @bar != NO_BAR)
*/
struct pci_epf_doorbell_msg {
struct msi_msg msg;
int virq;
+ unsigned long irq_flags;
+ enum pci_epf_doorbell_type type;
+ enum pci_barno bar;
+ resource_size_t offset;
};
/**
--
2.51.0 | {
"author": "Koichiro Den <den@valinux.co.jp>",
"date": "Thu, 19 Feb 2026 17:13:15 +0900",
"is_openbsd": false,
"thread_id": "aaG5asXVV5sxRbnQ@ryzen.mbox.gz"
} |
lkml_critique | lkml | Hi,
Some endpoint platforms cannot use a GIC ITS-backed MSI domain for
EP-side doorbells. In those cases, endpoint function (EPF) drivers
cannot provide a doorbell to the root complex (RC), and features such as
vNTB may fall back to polling with significantly higher latency.
This series adds an alternate doorbell backend based on the DesignWare
PCIe controller's integrated eDMA interrupt-emulation feature. The RC
rings the doorbell by doing a single 32-bit MMIO write to an eDMA
doorbell location exposed in a BAR window. The EP side receives a Linux
IRQ that EPF drivers can use as a doorbell interrupt, without relying on
MSI message writes reaching the ITS.
To support this, the series:
- Adds an EPC auxiliary resource query API so EPF drivers can discover
controller-integrated resources (DMA MMIO, doorbell MMIO, and DMA LL
memory).
- Updates DesignWare EP controllers to report integrated eDMA
resources via the new API.
- Updates dw-edma to provide a dedicated virtual IRQ for interrupt
emulation and to perform the core-specific deassert sequence.
- Updates pci-epf-test and pci-epf-vntb to reuse a pre-exposed
BAR/offset and to honor per-doorbell IRQ flags.
Many thanks to Frank and Niklas for their continued review and valuable
feedback throughout the development of this series. The Reviewed-by tags
for the last two patches are dropped due to the additional changes
following Niklas' review in the v8 threads. Since the diff is small, I'd
appreciate it if Frank could re-check them.
Dependencies
------------
The following three series are prerequisites for this series:
(1). [PATCH v2 0/4] PCI: endpoint: Doorbell-related fixes
https://lore.kernel.org/linux-pci/20260217063856.3759713-1-den@valinux.co.jp/
(2). [PATCH 0/2] dmaengine: dw-edma: Interrupt-emulation doorbell support
https://lore.kernel.org/dmaengine/20260215152216.3393561-1-den@valinux.co.jp/
(3). [PATCH 0/9] PCI: endpoint differentiate between disabled and reserved BARs
https://lore.kernel.org/linux-pci/20260217212707.2450423-11-cassel@kernel.org/
Regarding (3):
- [PATCH 2/9] and [PATCH 3/9] are strictly the prerequisites for this v9 series.
In fact, they are split out from v8 series.
- With [PATCH 6/9], this v9 series should allow the embedded doorbell fallback
path to pass on RK3588 from the beginning. Given that, picking up the whole
(3) series earlier should be the most streamlined choice.
Tested on
---------
I re-tested the embedded (DMA) doorbell fallback path (via pci-epf-test)
on R-Car Spider boards (with this v9 series):
$ ./pci_endpoint_test -t DOORBELL_TEST
TAP version 13
1..1
# Starting 1 tests from 1 test cases.
# RUN pcie_ep_doorbell.DOORBELL_TEST ...
# OK pcie_ep_doorbell.DOORBELL_TEST
ok 1 pcie_ep_doorbell.DOORBELL_TEST
# PASSED: 1 / 1 tests passed.
# Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0
with the following message observed on the EP side:
[ 82.043715] pci_epf_test pci_epf_test.0: Can't find MSI domain for EPC
[ 82.044382] pci_epf_test pci_epf_test.0: Using embedded (DMA) doorbell fallback
(Note: for the test to pass on R-Car Spider, one of the following was required:
- echo 1048576 > functions/pci_epf_test/func1/pci_epf_test.0/bar2_size
- apply https://lore.kernel.org/linux-pci/20260210160315.2272930-1-den@valinux.co.jp/)
Performance test: vNTB ping latency
-----------------------------------
Setup:
- configfs (R-Car Spider in EP mode):
cd /sys/kernel/config/pci_ep/
mkdir functions/pci_epf_vntb/func1
echo 0x1912 > functions/pci_epf_vntb/func1/vendorid
echo 0x0030 > functions/pci_epf_vntb/func1/deviceid
echo 32 > functions/pci_epf_vntb/func1/msi_interrupts
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_count
echo 128 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/spad_count
echo 1 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/num_mws
echo 0x100000 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1
echo 0x1912 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_vid
echo 0x0030 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_pid
echo 0x10 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vbus_number
echo 0 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/ctrl_bar
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_bar [*]
echo 2 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1_bar
ln -s controllers/e65d0000.pcie-ep functions/pci_epf_vntb/func1/primary/
echo 1 > controllers/e65d0000.pcie-ep/start
[*]: On R-Car Spider, a hack is currently needed to use BAR4 for
the doorbell. I'll consider posting a patch for that
separately.
- ensure ntb_transport/ntb_netdev are loaded on both sides
Results:
- Without this series (pci.git main)
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=6.04 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=12.6 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=7.40 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=5.38 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=11.4 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=9.42 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=3.36 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=9.48 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=4.24 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=10.4 ms
- With this series (on top of pci.git main + Dependency (1), (2) and (3))
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=0.845 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=0.742 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=0.868 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=0.806 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=0.951 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=0.965 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=0.871 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=0.877 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=0.938 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=0.960 ms
---
Changelog
---------
* v8->v9 changes:
- Add a new dependency series (3), which moved the BAR reserved-subregion
framework + the RK3588 BAR4 example out of v8 (dropping the corresponding
patches from this series).
- pci-epf-vntb: rename the duplicate-IRQ helper and invert the return value,
per Frank's review.
- pci-epf-test: drop the extra size_add() doorbell-offset check, per Niklas'
review.
- pci-ep-msi: add a DWORD alignment check for DOORBELL_MMIO, per Niklas's
review.
- Carry over Reviewed-by tags for unchanged patches + drop Reviewed-by tags
where code changed.
- Rename the last patch subject (drop 'eDMA' word).
* v7->v8 changes:
- Deduplicate request_irq()/free_irq() calls based on virq (shared
IRQ) rather than doorbell type, as suggested during review of v7
Patch #7.
- Clean up the pci_epf_alloc_doorbell() error path, as suggested
during review of v7 Patch #9.
- Use range_end_overflows_t() instead of an open-coded overflow check,
following discussion during review of v7 Patch #5.
- Add a write-data field to the DOORBELL_MMIO aux-resource metadata
and plumb it through to the embedded doorbell backend (DesignWare
uses data=0).
* v6->v7 changes:
- Split out preparatory patches to keep the series below 10 patches.
- Add support for platforms where the eDMA register block is fixed
within a reserved BAR window (e.g. RK3588 BAR4) and must be reused
as-is.
- Introduce a dedicated virtual IRQ and irq_chip (using
handle_level_irq) for interrupt-emulation doorbells instead of
reusing per-channel IRQs. This avoids delivery via different IRQs on
platforms with chip->nr_irqs > 1.
* v5->v6 changes:
- Fix a double-free in v5 Patch 8/8 caused by mixing __free(kfree) with
an explicit kfree(). This is a functional bug (detectable by KASAN),
hence the respin solely for this fix. Sorry for the noise. No other
changes.
* v4->v5 changes:
- Change the series subject now that the series has evolved into a
consumer-driven set focused on the embedded doorbell fallback and its
in-tree users (epf-test and epf-vntb).
- Drop [PATCH v4 01/09] (dw-edma per-channel interrupt routing control)
from this series for now, so the series focuses on what's needed by the
current consumer (i.e. the doorbell fallback implementation).
- Replace the v4 embedded-doorbell "test variant + host/kselftest
plumbing" with a generic embedded-doorbell fallback in
pci_epf_alloc_doorbell(), including exposing required IRQ request flags
to EPF drivers.
- Two preparatory fix patches (Patch 6/8 and 7/8) to clean up error
handling and state management ahead of Patch 8/8.
- Rename *_get_remote_resource() to *_get_aux_resources() and adjust
relevant variable namings and kernel docs. Discussion may continue.
- Rework dw-edma per-channel metadata exposure to cache the needed info
in dw_edma_chip (IRQ number + emulation doorbell offset) and consume it
from the DesignWare EPC auxiliary resource provider without calling back
to dw-edma.
* v3->v4 changes:
- Drop dma_slave_caps.hw_id and the dmaengine selfirq callback
registration API. Instead, add a dw-edma specific dw_edma_chan_info()
helper and extend the EPC remote resource metadata accordingly.
- Add explicit acking for eDMA interrupt emulation and adjust the
dw-edma IRQ path for embedded-doorbell usage.
- Replace the previous EPC API smoke test with an embedded doorbell
test variant (pci-epf-test + pci_endpoint_test/selftests).
- Rebase onto pci.git controller/dwc commit 43d324eeb08c.
* v2->v3 changes:
- Replace DWC-specific helpers with a generic EPC remote resource query API.
- Add pci-epf-test smoke test and host/kselftest support for the new API.
- Drop the dw-edma-specific notify-only channel and polling approach
([PATCH v2 4/7] and [PATCH v2 5/7]), and rework notification handling
around a generic dmaengine_(un)register_selfirq() API implemented
by dw-edma.
* v1->v2 changes:
- Combine the two previously posted series into a single set (per Frank's
suggestion). Order dmaengine/dw-edma patches first so hw_id support
lands before the PCI LL-region helper, which assumes
dma_slave_caps.hw_id availability.
v8: https://lore.kernel.org/linux-pci/20260217080601.3808847-1-den@valinux.co.jp/
v7: https://lore.kernel.org/linux-pci/20260215163847.3522572-1-den@valinux.co.jp/
v6: https://lore.kernel.org/all/20260209125316.2132589-1-den@valinux.co.jp/
v5: https://lore.kernel.org/all/20260209062952.2049053-1-den@valinux.co.jp/
v4: https://lore.kernel.org/all/20260206172646.1556847-1-den@valinux.co.jp/
v3: https://lore.kernel.org/all/20260204145440.950609-1-den@valinux.co.jp/
v2: https://lore.kernel.org/all/20260127033420.3460579-1-den@valinux.co.jp/
v1: https://lore.kernel.org/dmaengine/20260126073652.3293564-1-den@valinux.co.jp/
+
https://lore.kernel.org/linux-pci/20260126071550.3233631-1-den@valinux.co.jp/
Thanks for reviewing.
Koichiro Den (7):
PCI: endpoint: Add auxiliary resource query API
PCI: dwc: Record integrated eDMA register window
PCI: dwc: ep: Expose integrated eDMA resources via EPC aux-resource
API
PCI: endpoint: pci-ep-msi: Refactor doorbell allocation for new
backends
PCI: endpoint: pci-epf-vntb: Reuse pre-exposed doorbells and IRQ flags
PCI: endpoint: pci-epf-test: Reuse pre-exposed doorbell targets
PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback
.../pci/controller/dwc/pcie-designware-ep.c | 151 ++++++++++++++++++
drivers/pci/controller/dwc/pcie-designware.c | 4 +
drivers/pci/controller/dwc/pcie-designware.h | 2 +
drivers/pci/endpoint/functions/pci-epf-test.c | 84 ++++++----
drivers/pci/endpoint/functions/pci-epf-vntb.c | 61 ++++++-
drivers/pci/endpoint/pci-ep-msi.c | 149 +++++++++++++++--
drivers/pci/endpoint/pci-epc-core.c | 41 +++++
include/linux/pci-epc.h | 52 ++++++
include/linux/pci-epf.h | 23 ++-
9 files changed, 520 insertions(+), 47 deletions(-)
--
2.51.0
| null | null | null | [PATCH v9 0/7] PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback | Some endpoint platforms cannot use platform MSI / GIC ITS to implement
EP-side doorbells. In those cases, EPF drivers cannot provide an
interrupt-driven doorbell and often fall back to polling.
Add an "embedded" doorbell backend that uses a controller-integrated
doorbell target (e.g. DesignWare integrated eDMA interrupt-emulation
doorbell).
The backend locates the doorbell register and a corresponding Linux IRQ
via the EPC aux-resource API. If the doorbell register is already
exposed via a fixed BAR mapping, provide BAR+offset. Otherwise provide
the physical address so EPF drivers can map it into BAR space.
When MSI doorbell allocation fails with -ENODEV,
pci_epf_alloc_doorbell() falls back to this embedded backend.
Signed-off-by: Koichiro Den <den@valinux.co.jp>
---
Changes since v8:
- Add MMIO address alignment check
- Drop 'eDMA' word from the subject
drivers/pci/endpoint/pci-ep-msi.c | 99 ++++++++++++++++++++++++++++++-
1 file changed, 97 insertions(+), 2 deletions(-)
diff --git a/drivers/pci/endpoint/pci-ep-msi.c b/drivers/pci/endpoint/pci-ep-msi.c
index 50badffa9d72..f287fbf684ca 100644
--- a/drivers/pci/endpoint/pci-ep-msi.c
+++ b/drivers/pci/endpoint/pci-ep-msi.c
@@ -6,6 +6,8 @@
* Author: Frank Li <Frank.Li@nxp.com>
*/
+#include <linux/align.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/interrupt.h>
@@ -36,6 +38,86 @@ static void pci_epf_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
pci_epc_put(epc);
}
+static int pci_epf_alloc_doorbell_embedded(struct pci_epf *epf, u16 num_db)
+{
+ const struct pci_epc_aux_resource *doorbell = NULL;
+ struct pci_epf_doorbell_msg *msg;
+ struct pci_epc *epc = epf->epc;
+ struct device *dev = &epf->dev;
+ int count, ret, i;
+ u64 addr;
+
+ count = pci_epc_get_aux_resources(epc, epf->func_no, epf->vfunc_no,
+ NULL, 0);
+ if (count == -EOPNOTSUPP || count == 0)
+ return -ENODEV;
+ if (count < 0)
+ return count;
+
+ struct pci_epc_aux_resource *res __free(kfree) =
+ kcalloc(count, sizeof(*res), GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ ret = pci_epc_get_aux_resources(epc, epf->func_no, epf->vfunc_no,
+ res, count);
+ if (ret == -EOPNOTSUPP || ret == 0)
+ return -ENODEV;
+ if (ret < 0)
+ return ret;
+
+ count = ret;
+
+ for (i = 0; i < count; i++) {
+ if (res[i].type == PCI_EPC_AUX_DOORBELL_MMIO) {
+ if (doorbell) {
+ dev_warn(dev,
+ "Duplicate DOORBELL_MMIO resource found\n");
+ continue;
+ }
+ doorbell = &res[i];
+ }
+ }
+ if (!doorbell)
+ return -ENODEV;
+
+ addr = doorbell->phys_addr;
+ if (!IS_ALIGNED(addr, sizeof(u32)))
+ return -EINVAL;
+
+ msg = kcalloc(num_db, sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ /*
+ * Embedded doorbell backends (e.g. DesignWare eDMA interrupt emulation)
+ * typically provide a single IRQ and do not offer per-doorbell
+ * distinguishable address/data pairs. The EPC aux resource therefore
+ * exposes one DOORBELL_MMIO entry (u.db_mmio.irq).
+ *
+ * Still, pci_epf_alloc_doorbell() allows requesting multiple doorbells.
+ * For such backends we replicate the same address/data for each entry
+ * and mark the IRQ as shared (IRQF_SHARED). Consumers must treat them
+ * as equivalent "kick" doorbells.
+ */
+ for (i = 0; i < num_db; i++)
+ msg[i] = (struct pci_epf_doorbell_msg) {
+ .msg.address_lo = (u32)addr,
+ .msg.address_hi = (u32)(addr >> 32),
+ .msg.data = doorbell->u.db_mmio.data,
+ .virq = doorbell->u.db_mmio.irq,
+ .irq_flags = IRQF_SHARED,
+ .type = PCI_EPF_DOORBELL_EMBEDDED,
+ .bar = doorbell->bar,
+ .offset = (doorbell->bar == NO_BAR) ? 0 :
+ doorbell->bar_offset,
+ };
+
+ epf->num_db = num_db;
+ epf->db_msg = msg;
+ return 0;
+}
+
static int pci_epf_alloc_doorbell_msi(struct pci_epf *epf, u16 num_db)
{
struct pci_epf_doorbell_msg *msg;
@@ -109,8 +191,21 @@ int pci_epf_alloc_doorbell(struct pci_epf *epf, u16 num_db)
if (!ret)
return 0;
- dev_err(dev, "Failed to allocate doorbell: %d\n", ret);
- return ret;
+ /*
+ * Fall back to embedded doorbell only when platform MSI is unavailable
+ * for this EPC.
+ */
+ if (ret != -ENODEV)
+ return ret;
+
+ ret = pci_epf_alloc_doorbell_embedded(epf, num_db);
+ if (ret) {
+ dev_err(dev, "Failed to allocate doorbell: %d\n", ret);
+ return ret;
+ }
+
+ dev_info(dev, "Using embedded (DMA) doorbell fallback\n");
+ return 0;
}
EXPORT_SYMBOL_GPL(pci_epf_alloc_doorbell);
--
2.51.0 | {
"author": "Koichiro Den <den@valinux.co.jp>",
"date": "Thu, 19 Feb 2026 17:13:18 +0900",
"is_openbsd": false,
"thread_id": "aaG5asXVV5sxRbnQ@ryzen.mbox.gz"
} |
lkml_critique | lkml | Hi,
Some endpoint platforms cannot use a GIC ITS-backed MSI domain for
EP-side doorbells. In those cases, endpoint function (EPF) drivers
cannot provide a doorbell to the root complex (RC), and features such as
vNTB may fall back to polling with significantly higher latency.
This series adds an alternate doorbell backend based on the DesignWare
PCIe controller's integrated eDMA interrupt-emulation feature. The RC
rings the doorbell by doing a single 32-bit MMIO write to an eDMA
doorbell location exposed in a BAR window. The EP side receives a Linux
IRQ that EPF drivers can use as a doorbell interrupt, without relying on
MSI message writes reaching the ITS.
To support this, the series:
- Adds an EPC auxiliary resource query API so EPF drivers can discover
controller-integrated resources (DMA MMIO, doorbell MMIO, and DMA LL
memory).
- Updates DesignWare EP controllers to report integrated eDMA
resources via the new API.
- Updates dw-edma to provide a dedicated virtual IRQ for interrupt
emulation and to perform the core-specific deassert sequence.
- Updates pci-epf-test and pci-epf-vntb to reuse a pre-exposed
BAR/offset and to honor per-doorbell IRQ flags.
Many thanks to Frank and Niklas for their continued review and valuable
feedback throughout the development of this series. The Reviewed-by tags
for the last two patches are dropped due to the additional changes
following Niklas' review in the v8 threads. Since the diff is small, I'd
appreciate it if Frank could re-check them.
Dependencies
------------
The following three series are prerequisites for this series:
(1). [PATCH v2 0/4] PCI: endpoint: Doorbell-related fixes
https://lore.kernel.org/linux-pci/20260217063856.3759713-1-den@valinux.co.jp/
(2). [PATCH 0/2] dmaengine: dw-edma: Interrupt-emulation doorbell support
https://lore.kernel.org/dmaengine/20260215152216.3393561-1-den@valinux.co.jp/
(3). [PATCH 0/9] PCI: endpoint differentiate between disabled and reserved BARs
https://lore.kernel.org/linux-pci/20260217212707.2450423-11-cassel@kernel.org/
Regarding (3):
- [PATCH 2/9] and [PATCH 3/9] are strictly the prerequisites for this v9 series.
In fact, they are split out from v8 series.
- With [PATCH 6/9], this v9 series should allow the embedded doorbell fallback
path to pass on RK3588 from the beginning. Given that, picking up the whole
(3) series earlier should be the most streamlined choice.
Tested on
---------
I re-tested the embedded (DMA) doorbell fallback path (via pci-epf-test)
on R-Car Spider boards (with this v9 series):
$ ./pci_endpoint_test -t DOORBELL_TEST
TAP version 13
1..1
# Starting 1 tests from 1 test cases.
# RUN pcie_ep_doorbell.DOORBELL_TEST ...
# OK pcie_ep_doorbell.DOORBELL_TEST
ok 1 pcie_ep_doorbell.DOORBELL_TEST
# PASSED: 1 / 1 tests passed.
# Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0
with the following message observed on the EP side:
[ 82.043715] pci_epf_test pci_epf_test.0: Can't find MSI domain for EPC
[ 82.044382] pci_epf_test pci_epf_test.0: Using embedded (DMA) doorbell fallback
(Note: for the test to pass on R-Car Spider, one of the following was required:
- echo 1048576 > functions/pci_epf_test/func1/pci_epf_test.0/bar2_size
- apply https://lore.kernel.org/linux-pci/20260210160315.2272930-1-den@valinux.co.jp/)
Performance test: vNTB ping latency
-----------------------------------
Setup:
- configfs (R-Car Spider in EP mode):
cd /sys/kernel/config/pci_ep/
mkdir functions/pci_epf_vntb/func1
echo 0x1912 > functions/pci_epf_vntb/func1/vendorid
echo 0x0030 > functions/pci_epf_vntb/func1/deviceid
echo 32 > functions/pci_epf_vntb/func1/msi_interrupts
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_count
echo 128 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/spad_count
echo 1 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/num_mws
echo 0x100000 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1
echo 0x1912 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_vid
echo 0x0030 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_pid
echo 0x10 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vbus_number
echo 0 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/ctrl_bar
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_bar [*]
echo 2 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1_bar
ln -s controllers/e65d0000.pcie-ep functions/pci_epf_vntb/func1/primary/
echo 1 > controllers/e65d0000.pcie-ep/start
[*]: On R-Car Spider, a hack is currently needed to use BAR4 for
the doorbell. I'll consider posting a patch for that
separately.
- ensure ntb_transport/ntb_netdev are loaded on both sides
Results:
- Without this series (pci.git main)
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=6.04 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=12.6 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=7.40 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=5.38 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=11.4 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=9.42 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=3.36 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=9.48 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=4.24 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=10.4 ms
- With this series (on top of pci.git main + Dependency (1), (2) and (3))
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=0.845 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=0.742 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=0.868 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=0.806 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=0.951 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=0.965 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=0.871 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=0.877 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=0.938 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=0.960 ms
---
Changelog
---------
* v8->v9 changes:
- Add a new dependency series (3), which moved the BAR reserved-subregion
framework + the RK3588 BAR4 example out of v8 (dropping the corresponding
patches from this series).
- pci-epf-vntb: rename the duplicate-IRQ helper and invert the return value,
per Frank's review.
- pci-epf-test: drop the extra size_add() doorbell-offset check, per Niklas'
review.
- pci-ep-msi: add a DWORD alignment check for DOORBELL_MMIO, per Niklas's
review.
- Carry over Reviewed-by tags for unchanged patches + drop Reviewed-by tags
where code changed.
- Rename the last patch subject (drop 'eDMA' word).
* v7->v8 changes:
- Deduplicate request_irq()/free_irq() calls based on virq (shared
IRQ) rather than doorbell type, as suggested during review of v7
Patch #7.
- Clean up the pci_epf_alloc_doorbell() error path, as suggested
during review of v7 Patch #9.
- Use range_end_overflows_t() instead of an open-coded overflow check,
following discussion during review of v7 Patch #5.
- Add a write-data field to the DOORBELL_MMIO aux-resource metadata
and plumb it through to the embedded doorbell backend (DesignWare
uses data=0).
* v6->v7 changes:
- Split out preparatory patches to keep the series below 10 patches.
- Add support for platforms where the eDMA register block is fixed
within a reserved BAR window (e.g. RK3588 BAR4) and must be reused
as-is.
- Introduce a dedicated virtual IRQ and irq_chip (using
handle_level_irq) for interrupt-emulation doorbells instead of
reusing per-channel IRQs. This avoids delivery via different IRQs on
platforms with chip->nr_irqs > 1.
* v5->v6 changes:
- Fix a double-free in v5 Patch 8/8 caused by mixing __free(kfree) with
an explicit kfree(). This is a functional bug (detectable by KASAN),
hence the respin solely for this fix. Sorry for the noise. No other
changes.
* v4->v5 changes:
- Change the series subject now that the series has evolved into a
consumer-driven set focused on the embedded doorbell fallback and its
in-tree users (epf-test and epf-vntb).
- Drop [PATCH v4 01/09] (dw-edma per-channel interrupt routing control)
from this series for now, so the series focuses on what's needed by the
current consumer (i.e. the doorbell fallback implementation).
- Replace the v4 embedded-doorbell "test variant + host/kselftest
plumbing" with a generic embedded-doorbell fallback in
pci_epf_alloc_doorbell(), including exposing required IRQ request flags
to EPF drivers.
- Two preparatory fix patches (Patch 6/8 and 7/8) to clean up error
handling and state management ahead of Patch 8/8.
- Rename *_get_remote_resource() to *_get_aux_resources() and adjust
relevant variable namings and kernel docs. Discussion may continue.
- Rework dw-edma per-channel metadata exposure to cache the needed info
in dw_edma_chip (IRQ number + emulation doorbell offset) and consume it
from the DesignWare EPC auxiliary resource provider without calling back
to dw-edma.
* v3->v4 changes:
- Drop dma_slave_caps.hw_id and the dmaengine selfirq callback
registration API. Instead, add a dw-edma specific dw_edma_chan_info()
helper and extend the EPC remote resource metadata accordingly.
- Add explicit acking for eDMA interrupt emulation and adjust the
dw-edma IRQ path for embedded-doorbell usage.
- Replace the previous EPC API smoke test with an embedded doorbell
test variant (pci-epf-test + pci_endpoint_test/selftests).
- Rebase onto pci.git controller/dwc commit 43d324eeb08c.
* v2->v3 changes:
- Replace DWC-specific helpers with a generic EPC remote resource query API.
- Add pci-epf-test smoke test and host/kselftest support for the new API.
- Drop the dw-edma-specific notify-only channel and polling approach
([PATCH v2 4/7] and [PATCH v2 5/7]), and rework notification handling
around a generic dmaengine_(un)register_selfirq() API implemented
by dw-edma.
* v1->v2 changes:
- Combine the two previously posted series into a single set (per Frank's
suggestion). Order dmaengine/dw-edma patches first so hw_id support
lands before the PCI LL-region helper, which assumes
dma_slave_caps.hw_id availability.
v8: https://lore.kernel.org/linux-pci/20260217080601.3808847-1-den@valinux.co.jp/
v7: https://lore.kernel.org/linux-pci/20260215163847.3522572-1-den@valinux.co.jp/
v6: https://lore.kernel.org/all/20260209125316.2132589-1-den@valinux.co.jp/
v5: https://lore.kernel.org/all/20260209062952.2049053-1-den@valinux.co.jp/
v4: https://lore.kernel.org/all/20260206172646.1556847-1-den@valinux.co.jp/
v3: https://lore.kernel.org/all/20260204145440.950609-1-den@valinux.co.jp/
v2: https://lore.kernel.org/all/20260127033420.3460579-1-den@valinux.co.jp/
v1: https://lore.kernel.org/dmaengine/20260126073652.3293564-1-den@valinux.co.jp/
+
https://lore.kernel.org/linux-pci/20260126071550.3233631-1-den@valinux.co.jp/
Thanks for reviewing.
Koichiro Den (7):
PCI: endpoint: Add auxiliary resource query API
PCI: dwc: Record integrated eDMA register window
PCI: dwc: ep: Expose integrated eDMA resources via EPC aux-resource
API
PCI: endpoint: pci-ep-msi: Refactor doorbell allocation for new
backends
PCI: endpoint: pci-epf-vntb: Reuse pre-exposed doorbells and IRQ flags
PCI: endpoint: pci-epf-test: Reuse pre-exposed doorbell targets
PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback
.../pci/controller/dwc/pcie-designware-ep.c | 151 ++++++++++++++++++
drivers/pci/controller/dwc/pcie-designware.c | 4 +
drivers/pci/controller/dwc/pcie-designware.h | 2 +
drivers/pci/endpoint/functions/pci-epf-test.c | 84 ++++++----
drivers/pci/endpoint/functions/pci-epf-vntb.c | 61 ++++++-
drivers/pci/endpoint/pci-ep-msi.c | 149 +++++++++++++++--
drivers/pci/endpoint/pci-epc-core.c | 41 +++++
include/linux/pci-epc.h | 52 ++++++
include/linux/pci-epf.h | 23 ++-
9 files changed, 520 insertions(+), 47 deletions(-)
--
2.51.0
| null | null | null | [PATCH v9 0/7] PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback | Support doorbell backends where the doorbell target is already exposed
via a platform-owned fixed BAR mapping and/or where the doorbell IRQ
must be requested with specific flags.
When pci_epf_alloc_doorbell() provides db_msg[].bar/offset, reuse the
pre-exposed BAR window and skip programming a new inbound mapping. Also
honor db_msg[].irq_flags when requesting the doorbell IRQ.
Multiple doorbells may share the same Linux IRQ. Avoid duplicate
request_irq() calls by requesting each unique virq once.
Make pci-epf-vntb work with platform-defined or embedded doorbell
backends without exposing backend-specific details to the consumer
layer.
Signed-off-by: Koichiro Den <den@valinux.co.jp>
---
Changes since v8:
- Reword the last paragraph into imperative mood
- Rename s/epf_ntb_db_irq_is_first/epf_ntb_db_irq_is_duplicated/ and
invert the returned bool value
drivers/pci/endpoint/functions/pci-epf-vntb.c | 61 ++++++++++++++++++-
1 file changed, 58 insertions(+), 3 deletions(-)
diff --git a/drivers/pci/endpoint/functions/pci-epf-vntb.c b/drivers/pci/endpoint/functions/pci-epf-vntb.c
index 52cf442ca1d9..7a27e9343394 100644
--- a/drivers/pci/endpoint/functions/pci-epf-vntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-vntb.c
@@ -134,6 +134,11 @@ struct epf_ntb {
u16 vntb_vid;
bool linkup;
+
+ /*
+ * True when doorbells are interrupt-driven (MSI or embedded), false
+ * when polled.
+ */
bool msi_doorbell;
u32 spad_size;
@@ -517,6 +522,17 @@ static int epf_ntb_configure_interrupt(struct epf_ntb *ntb)
return 0;
}
+static bool epf_ntb_db_irq_is_duplicated(const struct pci_epf *epf, unsigned int idx)
+{
+ unsigned int i;
+
+ for (i = 0; i < idx; i++)
+ if (epf->db_msg[i].virq == epf->db_msg[idx].virq)
+ return true;
+
+ return false;
+}
+
static int epf_ntb_db_bar_init_msi_doorbell(struct epf_ntb *ntb,
struct pci_epf_bar *db_bar,
const struct pci_epc_features *epc_features,
@@ -533,9 +549,24 @@ static int epf_ntb_db_bar_init_msi_doorbell(struct epf_ntb *ntb,
if (ret)
return ret;
+ /*
+ * The doorbell target may already be exposed by a platform-owned fixed
+ * BAR. In that case, we must reuse it and the requested db_bar must
+ * match.
+ */
+ if (epf->db_msg[0].bar != NO_BAR && epf->db_msg[0].bar != barno) {
+ ret = -EINVAL;
+ goto err_free_doorbell;
+ }
+
for (req = 0; req < ntb->db_count; req++) {
+ /* Avoid requesting duplicate handlers */
+ if (epf_ntb_db_irq_is_duplicated(epf, req))
+ continue;
+
ret = request_irq(epf->db_msg[req].virq, epf_ntb_doorbell_handler,
- 0, "pci_epf_vntb_db", ntb);
+ epf->db_msg[req].irq_flags, "pci_epf_vntb_db",
+ ntb);
if (ret) {
dev_err(&epf->dev,
@@ -545,6 +576,22 @@ static int epf_ntb_db_bar_init_msi_doorbell(struct epf_ntb *ntb,
}
}
+ if (epf->db_msg[0].bar != NO_BAR) {
+ for (i = 0; i < ntb->db_count; i++) {
+ msg = &epf->db_msg[i].msg;
+
+ if (epf->db_msg[i].bar != barno) {
+ ret = -EINVAL;
+ goto err_free_irq;
+ }
+
+ ntb->reg->db_data[i] = msg->data;
+ ntb->reg->db_offset[i] = epf->db_msg[i].offset;
+ }
+ goto out;
+ }
+
+ /* Program inbound mapping for the doorbell */
msg = &epf->db_msg[0].msg;
high = 0;
@@ -591,6 +638,7 @@ static int epf_ntb_db_bar_init_msi_doorbell(struct epf_ntb *ntb,
ntb->reg->db_offset[i] = offset;
}
+out:
ntb->reg->db_entry_size = 0;
ntb->msi_doorbell = true;
@@ -598,9 +646,13 @@ static int epf_ntb_db_bar_init_msi_doorbell(struct epf_ntb *ntb,
return 0;
err_free_irq:
- for (req--; req >= 0; req--)
+ for (req--; req >= 0; req--) {
+ if (epf_ntb_db_irq_is_duplicated(epf, req))
+ continue;
free_irq(epf->db_msg[req].virq, ntb);
+ }
+err_free_doorbell:
pci_epf_free_doorbell(ntb->epf);
return ret;
}
@@ -666,8 +718,11 @@ static void epf_ntb_db_bar_clear(struct epf_ntb *ntb)
if (ntb->msi_doorbell) {
int i;
- for (i = 0; i < ntb->db_count; i++)
+ for (i = 0; i < ntb->db_count; i++) {
+ if (epf_ntb_db_irq_is_duplicated(ntb->epf, i))
+ continue;
free_irq(ntb->epf->db_msg[i].virq, ntb);
+ }
}
if (ntb->epf->db_msg)
--
2.51.0 | {
"author": "Koichiro Den <den@valinux.co.jp>",
"date": "Thu, 19 Feb 2026 17:13:16 +0900",
"is_openbsd": false,
"thread_id": "aaG5asXVV5sxRbnQ@ryzen.mbox.gz"
} |
lkml_critique | lkml | Hi,
Some endpoint platforms cannot use a GIC ITS-backed MSI domain for
EP-side doorbells. In those cases, endpoint function (EPF) drivers
cannot provide a doorbell to the root complex (RC), and features such as
vNTB may fall back to polling with significantly higher latency.
This series adds an alternate doorbell backend based on the DesignWare
PCIe controller's integrated eDMA interrupt-emulation feature. The RC
rings the doorbell by doing a single 32-bit MMIO write to an eDMA
doorbell location exposed in a BAR window. The EP side receives a Linux
IRQ that EPF drivers can use as a doorbell interrupt, without relying on
MSI message writes reaching the ITS.
To support this, the series:
- Adds an EPC auxiliary resource query API so EPF drivers can discover
controller-integrated resources (DMA MMIO, doorbell MMIO, and DMA LL
memory).
- Updates DesignWare EP controllers to report integrated eDMA
resources via the new API.
- Updates dw-edma to provide a dedicated virtual IRQ for interrupt
emulation and to perform the core-specific deassert sequence.
- Updates pci-epf-test and pci-epf-vntb to reuse a pre-exposed
BAR/offset and to honor per-doorbell IRQ flags.
Many thanks to Frank and Niklas for their continued review and valuable
feedback throughout the development of this series. The Reviewed-by tags
for the last two patches are dropped due to the additional changes
following Niklas' review in the v8 threads. Since the diff is small, I'd
appreciate it if Frank could re-check them.
Dependencies
------------
The following three series are prerequisites for this series:
(1). [PATCH v2 0/4] PCI: endpoint: Doorbell-related fixes
https://lore.kernel.org/linux-pci/20260217063856.3759713-1-den@valinux.co.jp/
(2). [PATCH 0/2] dmaengine: dw-edma: Interrupt-emulation doorbell support
https://lore.kernel.org/dmaengine/20260215152216.3393561-1-den@valinux.co.jp/
(3). [PATCH 0/9] PCI: endpoint differentiate between disabled and reserved BARs
https://lore.kernel.org/linux-pci/20260217212707.2450423-11-cassel@kernel.org/
Regarding (3):
- [PATCH 2/9] and [PATCH 3/9] are strictly the prerequisites for this v9 series.
In fact, they are split out from v8 series.
- With [PATCH 6/9], this v9 series should allow the embedded doorbell fallback
path to pass on RK3588 from the beginning. Given that, picking up the whole
(3) series earlier should be the most streamlined choice.
Tested on
---------
I re-tested the embedded (DMA) doorbell fallback path (via pci-epf-test)
on R-Car Spider boards (with this v9 series):
$ ./pci_endpoint_test -t DOORBELL_TEST
TAP version 13
1..1
# Starting 1 tests from 1 test cases.
# RUN pcie_ep_doorbell.DOORBELL_TEST ...
# OK pcie_ep_doorbell.DOORBELL_TEST
ok 1 pcie_ep_doorbell.DOORBELL_TEST
# PASSED: 1 / 1 tests passed.
# Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0
with the following message observed on the EP side:
[ 82.043715] pci_epf_test pci_epf_test.0: Can't find MSI domain for EPC
[ 82.044382] pci_epf_test pci_epf_test.0: Using embedded (DMA) doorbell fallback
(Note: for the test to pass on R-Car Spider, one of the following was required:
- echo 1048576 > functions/pci_epf_test/func1/pci_epf_test.0/bar2_size
- apply https://lore.kernel.org/linux-pci/20260210160315.2272930-1-den@valinux.co.jp/)
Performance test: vNTB ping latency
-----------------------------------
Setup:
- configfs (R-Car Spider in EP mode):
cd /sys/kernel/config/pci_ep/
mkdir functions/pci_epf_vntb/func1
echo 0x1912 > functions/pci_epf_vntb/func1/vendorid
echo 0x0030 > functions/pci_epf_vntb/func1/deviceid
echo 32 > functions/pci_epf_vntb/func1/msi_interrupts
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_count
echo 128 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/spad_count
echo 1 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/num_mws
echo 0x100000 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1
echo 0x1912 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_vid
echo 0x0030 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_pid
echo 0x10 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vbus_number
echo 0 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/ctrl_bar
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_bar [*]
echo 2 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1_bar
ln -s controllers/e65d0000.pcie-ep functions/pci_epf_vntb/func1/primary/
echo 1 > controllers/e65d0000.pcie-ep/start
[*]: On R-Car Spider, a hack is currently needed to use BAR4 for
the doorbell. I'll consider posting a patch for that
separately.
- ensure ntb_transport/ntb_netdev are loaded on both sides
Results:
- Without this series (pci.git main)
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=6.04 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=12.6 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=7.40 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=5.38 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=11.4 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=9.42 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=3.36 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=9.48 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=4.24 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=10.4 ms
- With this series (on top of pci.git main + Dependency (1), (2) and (3))
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=0.845 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=0.742 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=0.868 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=0.806 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=0.951 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=0.965 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=0.871 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=0.877 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=0.938 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=0.960 ms
---
Changelog
---------
* v8->v9 changes:
- Add a new dependency series (3), which moved the BAR reserved-subregion
framework + the RK3588 BAR4 example out of v8 (dropping the corresponding
patches from this series).
- pci-epf-vntb: rename the duplicate-IRQ helper and invert the return value,
per Frank's review.
- pci-epf-test: drop the extra size_add() doorbell-offset check, per Niklas'
review.
- pci-ep-msi: add a DWORD alignment check for DOORBELL_MMIO, per Niklas's
review.
- Carry over Reviewed-by tags for unchanged patches + drop Reviewed-by tags
where code changed.
- Rename the last patch subject (drop 'eDMA' word).
* v7->v8 changes:
- Deduplicate request_irq()/free_irq() calls based on virq (shared
IRQ) rather than doorbell type, as suggested during review of v7
Patch #7.
- Clean up the pci_epf_alloc_doorbell() error path, as suggested
during review of v7 Patch #9.
- Use range_end_overflows_t() instead of an open-coded overflow check,
following discussion during review of v7 Patch #5.
- Add a write-data field to the DOORBELL_MMIO aux-resource metadata
and plumb it through to the embedded doorbell backend (DesignWare
uses data=0).
* v6->v7 changes:
- Split out preparatory patches to keep the series below 10 patches.
- Add support for platforms where the eDMA register block is fixed
within a reserved BAR window (e.g. RK3588 BAR4) and must be reused
as-is.
- Introduce a dedicated virtual IRQ and irq_chip (using
handle_level_irq) for interrupt-emulation doorbells instead of
reusing per-channel IRQs. This avoids delivery via different IRQs on
platforms with chip->nr_irqs > 1.
* v5->v6 changes:
- Fix a double-free in v5 Patch 8/8 caused by mixing __free(kfree) with
an explicit kfree(). This is a functional bug (detectable by KASAN),
hence the respin solely for this fix. Sorry for the noise. No other
changes.
* v4->v5 changes:
- Change the series subject now that the series has evolved into a
consumer-driven set focused on the embedded doorbell fallback and its
in-tree users (epf-test and epf-vntb).
- Drop [PATCH v4 01/09] (dw-edma per-channel interrupt routing control)
from this series for now, so the series focuses on what's needed by the
current consumer (i.e. the doorbell fallback implementation).
- Replace the v4 embedded-doorbell "test variant + host/kselftest
plumbing" with a generic embedded-doorbell fallback in
pci_epf_alloc_doorbell(), including exposing required IRQ request flags
to EPF drivers.
- Two preparatory fix patches (Patch 6/8 and 7/8) to clean up error
handling and state management ahead of Patch 8/8.
- Rename *_get_remote_resource() to *_get_aux_resources() and adjust
relevant variable namings and kernel docs. Discussion may continue.
- Rework dw-edma per-channel metadata exposure to cache the needed info
in dw_edma_chip (IRQ number + emulation doorbell offset) and consume it
from the DesignWare EPC auxiliary resource provider without calling back
to dw-edma.
* v3->v4 changes:
- Drop dma_slave_caps.hw_id and the dmaengine selfirq callback
registration API. Instead, add a dw-edma specific dw_edma_chan_info()
helper and extend the EPC remote resource metadata accordingly.
- Add explicit acking for eDMA interrupt emulation and adjust the
dw-edma IRQ path for embedded-doorbell usage.
- Replace the previous EPC API smoke test with an embedded doorbell
test variant (pci-epf-test + pci_endpoint_test/selftests).
- Rebase onto pci.git controller/dwc commit 43d324eeb08c.
* v2->v3 changes:
- Replace DWC-specific helpers with a generic EPC remote resource query API.
- Add pci-epf-test smoke test and host/kselftest support for the new API.
- Drop the dw-edma-specific notify-only channel and polling approach
([PATCH v2 4/7] and [PATCH v2 5/7]), and rework notification handling
around a generic dmaengine_(un)register_selfirq() API implemented
by dw-edma.
* v1->v2 changes:
- Combine the two previously posted series into a single set (per Frank's
suggestion). Order dmaengine/dw-edma patches first so hw_id support
lands before the PCI LL-region helper, which assumes
dma_slave_caps.hw_id availability.
v8: https://lore.kernel.org/linux-pci/20260217080601.3808847-1-den@valinux.co.jp/
v7: https://lore.kernel.org/linux-pci/20260215163847.3522572-1-den@valinux.co.jp/
v6: https://lore.kernel.org/all/20260209125316.2132589-1-den@valinux.co.jp/
v5: https://lore.kernel.org/all/20260209062952.2049053-1-den@valinux.co.jp/
v4: https://lore.kernel.org/all/20260206172646.1556847-1-den@valinux.co.jp/
v3: https://lore.kernel.org/all/20260204145440.950609-1-den@valinux.co.jp/
v2: https://lore.kernel.org/all/20260127033420.3460579-1-den@valinux.co.jp/
v1: https://lore.kernel.org/dmaengine/20260126073652.3293564-1-den@valinux.co.jp/
+
https://lore.kernel.org/linux-pci/20260126071550.3233631-1-den@valinux.co.jp/
Thanks for reviewing.
Koichiro Den (7):
PCI: endpoint: Add auxiliary resource query API
PCI: dwc: Record integrated eDMA register window
PCI: dwc: ep: Expose integrated eDMA resources via EPC aux-resource
API
PCI: endpoint: pci-ep-msi: Refactor doorbell allocation for new
backends
PCI: endpoint: pci-epf-vntb: Reuse pre-exposed doorbells and IRQ flags
PCI: endpoint: pci-epf-test: Reuse pre-exposed doorbell targets
PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback
.../pci/controller/dwc/pcie-designware-ep.c | 151 ++++++++++++++++++
drivers/pci/controller/dwc/pcie-designware.c | 4 +
drivers/pci/controller/dwc/pcie-designware.h | 2 +
drivers/pci/endpoint/functions/pci-epf-test.c | 84 ++++++----
drivers/pci/endpoint/functions/pci-epf-vntb.c | 61 ++++++-
drivers/pci/endpoint/pci-ep-msi.c | 149 +++++++++++++++--
drivers/pci/endpoint/pci-epc-core.c | 41 +++++
include/linux/pci-epc.h | 52 ++++++
include/linux/pci-epf.h | 23 ++-
9 files changed, 520 insertions(+), 47 deletions(-)
--
2.51.0
| null | null | null | [PATCH v9 0/7] PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback | pci-epf-test advertises the doorbell target to the RC as a BAR number
and an offset, and the RC rings the doorbell with a single DWORD MMIO
write.
Some doorbell backends may report that the doorbell target is already
exposed via a platform-owned fixed BAR (db_msg[0].bar/offset). In that
case, reuse the pre-exposed window and do not reprogram the BAR with
pci_epc_set_bar().
Also honor db_msg[0].irq_flags when requesting the doorbell IRQ, and
only restore the original BAR mapping on disable if pci-epf-test
programmed it.
Signed-off-by: Koichiro Den <den@valinux.co.jp>
---
Changes since v8:
- Drop the extra size_add() doorbell-offset check, which is generally
unneeded when pci_epf_align_inbound_addr() runs. This fixes
BAR_RESERVED cases where epf->bar[bar].size can be 0.
drivers/pci/endpoint/functions/pci-epf-test.c | 84 +++++++++++++------
1 file changed, 57 insertions(+), 27 deletions(-)
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 684f018ea242..4fc53edbceed 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -92,6 +92,7 @@ struct pci_epf_test {
bool dma_private;
const struct pci_epc_features *epc_features;
struct pci_epf_bar db_bar;
+ bool db_bar_programmed;
size_t bar_size[PCI_STD_NUM_BARS];
};
@@ -731,7 +732,9 @@ static void pci_epf_test_enable_doorbell(struct pci_epf_test *epf_test,
{
u32 status = le32_to_cpu(reg->status);
struct pci_epf *epf = epf_test->epf;
+ struct pci_epf_doorbell_msg *db;
struct pci_epc *epc = epf->epc;
+ unsigned long irq_flags;
struct msi_msg *msg;
enum pci_barno bar;
size_t offset;
@@ -741,13 +744,28 @@ static void pci_epf_test_enable_doorbell(struct pci_epf_test *epf_test,
if (ret)
goto set_status_err;
- msg = &epf->db_msg[0].msg;
- bar = pci_epc_get_next_free_bar(epf_test->epc_features, epf_test->test_reg_bar + 1);
- if (bar < BAR_0)
- goto err_doorbell_cleanup;
+ db = &epf->db_msg[0];
+ msg = &db->msg;
+ epf_test->db_bar_programmed = false;
+
+ if (db->bar != NO_BAR) {
+ /*
+ * The doorbell target is already exposed via a platform-owned
+ * fixed BAR
+ */
+ bar = db->bar;
+ offset = db->offset;
+ } else {
+ bar = pci_epc_get_next_free_bar(epf_test->epc_features,
+ epf_test->test_reg_bar + 1);
+ if (bar < BAR_0)
+ goto err_doorbell_cleanup;
+ }
+
+ irq_flags = epf->db_msg[0].irq_flags | IRQF_ONESHOT;
ret = request_threaded_irq(epf->db_msg[0].virq, NULL,
- pci_epf_test_doorbell_handler, IRQF_ONESHOT,
+ pci_epf_test_doorbell_handler, irq_flags,
"pci-ep-test-doorbell", epf_test);
if (ret) {
dev_err(&epf->dev,
@@ -759,22 +777,30 @@ static void pci_epf_test_enable_doorbell(struct pci_epf_test *epf_test,
reg->doorbell_data = cpu_to_le32(msg->data);
reg->doorbell_bar = cpu_to_le32(bar);
- msg = &epf->db_msg[0].msg;
- ret = pci_epf_align_inbound_addr(epf, bar, ((u64)msg->address_hi << 32) | msg->address_lo,
- &epf_test->db_bar.phys_addr, &offset);
+ if (db->bar == NO_BAR) {
+ ret = pci_epf_align_inbound_addr(epf, bar,
+ ((u64)msg->address_hi << 32) |
+ msg->address_lo,
+ &epf_test->db_bar.phys_addr,
+ &offset);
- if (ret)
- goto err_free_irq;
+ if (ret)
+ goto err_free_irq;
+ }
reg->doorbell_offset = cpu_to_le32(offset);
- epf_test->db_bar.barno = bar;
- epf_test->db_bar.size = epf->bar[bar].size;
- epf_test->db_bar.flags = epf->bar[bar].flags;
+ if (db->bar == NO_BAR) {
+ epf_test->db_bar.barno = bar;
+ epf_test->db_bar.size = epf->bar[bar].size;
+ epf_test->db_bar.flags = epf->bar[bar].flags;
- ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar);
- if (ret)
- goto err_free_irq;
+ ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf_test->db_bar);
+ if (ret)
+ goto err_free_irq;
+
+ epf_test->db_bar_programmed = true;
+ }
status |= STATUS_DOORBELL_ENABLE_SUCCESS;
reg->status = cpu_to_le32(status);
@@ -804,17 +830,21 @@ static void pci_epf_test_disable_doorbell(struct pci_epf_test *epf_test,
free_irq(epf->db_msg[0].virq, epf_test);
pci_epf_test_doorbell_cleanup(epf_test);
- /*
- * The doorbell feature temporarily overrides the inbound translation
- * to point to the address stored in epf_test->db_bar.phys_addr, i.e.,
- * it calls set_bar() twice without ever calling clear_bar(), as
- * calling clear_bar() would clear the BAR's PCI address assigned by
- * the host. Thus, when disabling the doorbell, restore the inbound
- * translation to point to the memory allocated for the BAR.
- */
- ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf->bar[bar]);
- if (ret)
- goto set_status_err;
+ if (epf_test->db_bar_programmed) {
+ /*
+ * The doorbell feature temporarily overrides the inbound translation
+ * to point to the address stored in epf_test->db_bar.phys_addr, i.e.,
+ * it calls set_bar() twice without ever calling clear_bar(), as
+ * calling clear_bar() would clear the BAR's PCI address assigned by
+ * the host. Thus, when disabling the doorbell, restore the inbound
+ * translation to point to the memory allocated for the BAR.
+ */
+ ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, &epf->bar[bar]);
+ if (ret)
+ goto set_status_err;
+
+ epf_test->db_bar_programmed = false;
+ }
status |= STATUS_DOORBELL_DISABLE_SUCCESS;
reg->status = cpu_to_le32(status);
--
2.51.0 | {
"author": "Koichiro Den <den@valinux.co.jp>",
"date": "Thu, 19 Feb 2026 17:13:17 +0900",
"is_openbsd": false,
"thread_id": "aaG5asXVV5sxRbnQ@ryzen.mbox.gz"
} |
lkml_critique | lkml | Hi,
Some endpoint platforms cannot use a GIC ITS-backed MSI domain for
EP-side doorbells. In those cases, endpoint function (EPF) drivers
cannot provide a doorbell to the root complex (RC), and features such as
vNTB may fall back to polling with significantly higher latency.
This series adds an alternate doorbell backend based on the DesignWare
PCIe controller's integrated eDMA interrupt-emulation feature. The RC
rings the doorbell by doing a single 32-bit MMIO write to an eDMA
doorbell location exposed in a BAR window. The EP side receives a Linux
IRQ that EPF drivers can use as a doorbell interrupt, without relying on
MSI message writes reaching the ITS.
To support this, the series:
- Adds an EPC auxiliary resource query API so EPF drivers can discover
controller-integrated resources (DMA MMIO, doorbell MMIO, and DMA LL
memory).
- Updates DesignWare EP controllers to report integrated eDMA
resources via the new API.
- Updates dw-edma to provide a dedicated virtual IRQ for interrupt
emulation and to perform the core-specific deassert sequence.
- Updates pci-epf-test and pci-epf-vntb to reuse a pre-exposed
BAR/offset and to honor per-doorbell IRQ flags.
Many thanks to Frank and Niklas for their continued review and valuable
feedback throughout the development of this series. The Reviewed-by tags
for the last two patches are dropped due to the additional changes
following Niklas' review in the v8 threads. Since the diff is small, I'd
appreciate it if Frank could re-check them.
Dependencies
------------
The following three series are prerequisites for this series:
(1). [PATCH v2 0/4] PCI: endpoint: Doorbell-related fixes
https://lore.kernel.org/linux-pci/20260217063856.3759713-1-den@valinux.co.jp/
(2). [PATCH 0/2] dmaengine: dw-edma: Interrupt-emulation doorbell support
https://lore.kernel.org/dmaengine/20260215152216.3393561-1-den@valinux.co.jp/
(3). [PATCH 0/9] PCI: endpoint differentiate between disabled and reserved BARs
https://lore.kernel.org/linux-pci/20260217212707.2450423-11-cassel@kernel.org/
Regarding (3):
- [PATCH 2/9] and [PATCH 3/9] are strictly the prerequisites for this v9 series.
In fact, they are split out from v8 series.
- With [PATCH 6/9], this v9 series should allow the embedded doorbell fallback
path to pass on RK3588 from the beginning. Given that, picking up the whole
(3) series earlier should be the most streamlined choice.
Tested on
---------
I re-tested the embedded (DMA) doorbell fallback path (via pci-epf-test)
on R-Car Spider boards (with this v9 series):
$ ./pci_endpoint_test -t DOORBELL_TEST
TAP version 13
1..1
# Starting 1 tests from 1 test cases.
# RUN pcie_ep_doorbell.DOORBELL_TEST ...
# OK pcie_ep_doorbell.DOORBELL_TEST
ok 1 pcie_ep_doorbell.DOORBELL_TEST
# PASSED: 1 / 1 tests passed.
# Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0
with the following message observed on the EP side:
[ 82.043715] pci_epf_test pci_epf_test.0: Can't find MSI domain for EPC
[ 82.044382] pci_epf_test pci_epf_test.0: Using embedded (DMA) doorbell fallback
(Note: for the test to pass on R-Car Spider, one of the following was required:
- echo 1048576 > functions/pci_epf_test/func1/pci_epf_test.0/bar2_size
- apply https://lore.kernel.org/linux-pci/20260210160315.2272930-1-den@valinux.co.jp/)
Performance test: vNTB ping latency
-----------------------------------
Setup:
- configfs (R-Car Spider in EP mode):
cd /sys/kernel/config/pci_ep/
mkdir functions/pci_epf_vntb/func1
echo 0x1912 > functions/pci_epf_vntb/func1/vendorid
echo 0x0030 > functions/pci_epf_vntb/func1/deviceid
echo 32 > functions/pci_epf_vntb/func1/msi_interrupts
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_count
echo 128 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/spad_count
echo 1 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/num_mws
echo 0x100000 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1
echo 0x1912 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_vid
echo 0x0030 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_pid
echo 0x10 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vbus_number
echo 0 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/ctrl_bar
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_bar [*]
echo 2 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1_bar
ln -s controllers/e65d0000.pcie-ep functions/pci_epf_vntb/func1/primary/
echo 1 > controllers/e65d0000.pcie-ep/start
[*]: On R-Car Spider, a hack is currently needed to use BAR4 for
the doorbell. I'll consider posting a patch for that
separately.
- ensure ntb_transport/ntb_netdev are loaded on both sides
Results:
- Without this series (pci.git main)
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=6.04 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=12.6 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=7.40 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=5.38 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=11.4 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=9.42 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=3.36 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=9.48 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=4.24 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=10.4 ms
- With this series (on top of pci.git main + Dependency (1), (2) and (3))
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=0.845 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=0.742 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=0.868 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=0.806 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=0.951 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=0.965 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=0.871 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=0.877 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=0.938 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=0.960 ms
---
Changelog
---------
* v8->v9 changes:
- Add a new dependency series (3), which moved the BAR reserved-subregion
framework + the RK3588 BAR4 example out of v8 (dropping the corresponding
patches from this series).
- pci-epf-vntb: rename the duplicate-IRQ helper and invert the return value,
per Frank's review.
- pci-epf-test: drop the extra size_add() doorbell-offset check, per Niklas'
review.
- pci-ep-msi: add a DWORD alignment check for DOORBELL_MMIO, per Niklas's
review.
- Carry over Reviewed-by tags for unchanged patches + drop Reviewed-by tags
where code changed.
- Rename the last patch subject (drop 'eDMA' word).
* v7->v8 changes:
- Deduplicate request_irq()/free_irq() calls based on virq (shared
IRQ) rather than doorbell type, as suggested during review of v7
Patch #7.
- Clean up the pci_epf_alloc_doorbell() error path, as suggested
during review of v7 Patch #9.
- Use range_end_overflows_t() instead of an open-coded overflow check,
following discussion during review of v7 Patch #5.
- Add a write-data field to the DOORBELL_MMIO aux-resource metadata
and plumb it through to the embedded doorbell backend (DesignWare
uses data=0).
* v6->v7 changes:
- Split out preparatory patches to keep the series below 10 patches.
- Add support for platforms where the eDMA register block is fixed
within a reserved BAR window (e.g. RK3588 BAR4) and must be reused
as-is.
- Introduce a dedicated virtual IRQ and irq_chip (using
handle_level_irq) for interrupt-emulation doorbells instead of
reusing per-channel IRQs. This avoids delivery via different IRQs on
platforms with chip->nr_irqs > 1.
* v5->v6 changes:
- Fix a double-free in v5 Patch 8/8 caused by mixing __free(kfree) with
an explicit kfree(). This is a functional bug (detectable by KASAN),
hence the respin solely for this fix. Sorry for the noise. No other
changes.
* v4->v5 changes:
- Change the series subject now that the series has evolved into a
consumer-driven set focused on the embedded doorbell fallback and its
in-tree users (epf-test and epf-vntb).
- Drop [PATCH v4 01/09] (dw-edma per-channel interrupt routing control)
from this series for now, so the series focuses on what's needed by the
current consumer (i.e. the doorbell fallback implementation).
- Replace the v4 embedded-doorbell "test variant + host/kselftest
plumbing" with a generic embedded-doorbell fallback in
pci_epf_alloc_doorbell(), including exposing required IRQ request flags
to EPF drivers.
- Two preparatory fix patches (Patch 6/8 and 7/8) to clean up error
handling and state management ahead of Patch 8/8.
- Rename *_get_remote_resource() to *_get_aux_resources() and adjust
relevant variable namings and kernel docs. Discussion may continue.
- Rework dw-edma per-channel metadata exposure to cache the needed info
in dw_edma_chip (IRQ number + emulation doorbell offset) and consume it
from the DesignWare EPC auxiliary resource provider without calling back
to dw-edma.
* v3->v4 changes:
- Drop dma_slave_caps.hw_id and the dmaengine selfirq callback
registration API. Instead, add a dw-edma specific dw_edma_chan_info()
helper and extend the EPC remote resource metadata accordingly.
- Add explicit acking for eDMA interrupt emulation and adjust the
dw-edma IRQ path for embedded-doorbell usage.
- Replace the previous EPC API smoke test with an embedded doorbell
test variant (pci-epf-test + pci_endpoint_test/selftests).
- Rebase onto pci.git controller/dwc commit 43d324eeb08c.
* v2->v3 changes:
- Replace DWC-specific helpers with a generic EPC remote resource query API.
- Add pci-epf-test smoke test and host/kselftest support for the new API.
- Drop the dw-edma-specific notify-only channel and polling approach
([PATCH v2 4/7] and [PATCH v2 5/7]), and rework notification handling
around a generic dmaengine_(un)register_selfirq() API implemented
by dw-edma.
* v1->v2 changes:
- Combine the two previously posted series into a single set (per Frank's
suggestion). Order dmaengine/dw-edma patches first so hw_id support
lands before the PCI LL-region helper, which assumes
dma_slave_caps.hw_id availability.
v8: https://lore.kernel.org/linux-pci/20260217080601.3808847-1-den@valinux.co.jp/
v7: https://lore.kernel.org/linux-pci/20260215163847.3522572-1-den@valinux.co.jp/
v6: https://lore.kernel.org/all/20260209125316.2132589-1-den@valinux.co.jp/
v5: https://lore.kernel.org/all/20260209062952.2049053-1-den@valinux.co.jp/
v4: https://lore.kernel.org/all/20260206172646.1556847-1-den@valinux.co.jp/
v3: https://lore.kernel.org/all/20260204145440.950609-1-den@valinux.co.jp/
v2: https://lore.kernel.org/all/20260127033420.3460579-1-den@valinux.co.jp/
v1: https://lore.kernel.org/dmaengine/20260126073652.3293564-1-den@valinux.co.jp/
+
https://lore.kernel.org/linux-pci/20260126071550.3233631-1-den@valinux.co.jp/
Thanks for reviewing.
Koichiro Den (7):
PCI: endpoint: Add auxiliary resource query API
PCI: dwc: Record integrated eDMA register window
PCI: dwc: ep: Expose integrated eDMA resources via EPC aux-resource
API
PCI: endpoint: pci-ep-msi: Refactor doorbell allocation for new
backends
PCI: endpoint: pci-epf-vntb: Reuse pre-exposed doorbells and IRQ flags
PCI: endpoint: pci-epf-test: Reuse pre-exposed doorbell targets
PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback
.../pci/controller/dwc/pcie-designware-ep.c | 151 ++++++++++++++++++
drivers/pci/controller/dwc/pcie-designware.c | 4 +
drivers/pci/controller/dwc/pcie-designware.h | 2 +
drivers/pci/endpoint/functions/pci-epf-test.c | 84 ++++++----
drivers/pci/endpoint/functions/pci-epf-vntb.c | 61 ++++++-
drivers/pci/endpoint/pci-ep-msi.c | 149 +++++++++++++++--
drivers/pci/endpoint/pci-epc-core.c | 41 +++++
include/linux/pci-epc.h | 52 ++++++
include/linux/pci-epf.h | 23 ++-
9 files changed, 520 insertions(+), 47 deletions(-)
--
2.51.0
| null | null | null | [PATCH v9 0/7] PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback | On 2/19/2026 1:43 PM, Koichiro Den wrote:
The return value of pci_epc_get_features() seems to be used here
without checking for NULL.
Since this function can return NULL, and other EPF drivers
(pci-epf-test.c, pci-epf-ntb.c) handle that case,
is VNTB assuming that epc_features is always non-NULL,
or should a defensive NULL check be added for pci_epc_get_features()?
Thanks,
Alok | {
"author": "ALOK TIWARI <alok.a.tiwari@oracle.com>",
"date": "Thu, 19 Feb 2026 22:00:19 +0530",
"is_openbsd": false,
"thread_id": "aaG5asXVV5sxRbnQ@ryzen.mbox.gz"
} |
lkml_critique | lkml | Hi,
Some endpoint platforms cannot use a GIC ITS-backed MSI domain for
EP-side doorbells. In those cases, endpoint function (EPF) drivers
cannot provide a doorbell to the root complex (RC), and features such as
vNTB may fall back to polling with significantly higher latency.
This series adds an alternate doorbell backend based on the DesignWare
PCIe controller's integrated eDMA interrupt-emulation feature. The RC
rings the doorbell by doing a single 32-bit MMIO write to an eDMA
doorbell location exposed in a BAR window. The EP side receives a Linux
IRQ that EPF drivers can use as a doorbell interrupt, without relying on
MSI message writes reaching the ITS.
To support this, the series:
- Adds an EPC auxiliary resource query API so EPF drivers can discover
controller-integrated resources (DMA MMIO, doorbell MMIO, and DMA LL
memory).
- Updates DesignWare EP controllers to report integrated eDMA
resources via the new API.
- Updates dw-edma to provide a dedicated virtual IRQ for interrupt
emulation and to perform the core-specific deassert sequence.
- Updates pci-epf-test and pci-epf-vntb to reuse a pre-exposed
BAR/offset and to honor per-doorbell IRQ flags.
Many thanks to Frank and Niklas for their continued review and valuable
feedback throughout the development of this series. The Reviewed-by tags
for the last two patches are dropped due to the additional changes
following Niklas' review in the v8 threads. Since the diff is small, I'd
appreciate it if Frank could re-check them.
Dependencies
------------
The following three series are prerequisites for this series:
(1). [PATCH v2 0/4] PCI: endpoint: Doorbell-related fixes
https://lore.kernel.org/linux-pci/20260217063856.3759713-1-den@valinux.co.jp/
(2). [PATCH 0/2] dmaengine: dw-edma: Interrupt-emulation doorbell support
https://lore.kernel.org/dmaengine/20260215152216.3393561-1-den@valinux.co.jp/
(3). [PATCH 0/9] PCI: endpoint differentiate between disabled and reserved BARs
https://lore.kernel.org/linux-pci/20260217212707.2450423-11-cassel@kernel.org/
Regarding (3):
- [PATCH 2/9] and [PATCH 3/9] are strictly the prerequisites for this v9 series.
In fact, they are split out from v8 series.
- With [PATCH 6/9], this v9 series should allow the embedded doorbell fallback
path to pass on RK3588 from the beginning. Given that, picking up the whole
(3) series earlier should be the most streamlined choice.
Tested on
---------
I re-tested the embedded (DMA) doorbell fallback path (via pci-epf-test)
on R-Car Spider boards (with this v9 series):
$ ./pci_endpoint_test -t DOORBELL_TEST
TAP version 13
1..1
# Starting 1 tests from 1 test cases.
# RUN pcie_ep_doorbell.DOORBELL_TEST ...
# OK pcie_ep_doorbell.DOORBELL_TEST
ok 1 pcie_ep_doorbell.DOORBELL_TEST
# PASSED: 1 / 1 tests passed.
# Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0
with the following message observed on the EP side:
[ 82.043715] pci_epf_test pci_epf_test.0: Can't find MSI domain for EPC
[ 82.044382] pci_epf_test pci_epf_test.0: Using embedded (DMA) doorbell fallback
(Note: for the test to pass on R-Car Spider, one of the following was required:
- echo 1048576 > functions/pci_epf_test/func1/pci_epf_test.0/bar2_size
- apply https://lore.kernel.org/linux-pci/20260210160315.2272930-1-den@valinux.co.jp/)
Performance test: vNTB ping latency
-----------------------------------
Setup:
- configfs (R-Car Spider in EP mode):
cd /sys/kernel/config/pci_ep/
mkdir functions/pci_epf_vntb/func1
echo 0x1912 > functions/pci_epf_vntb/func1/vendorid
echo 0x0030 > functions/pci_epf_vntb/func1/deviceid
echo 32 > functions/pci_epf_vntb/func1/msi_interrupts
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_count
echo 128 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/spad_count
echo 1 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/num_mws
echo 0x100000 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1
echo 0x1912 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_vid
echo 0x0030 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_pid
echo 0x10 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vbus_number
echo 0 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/ctrl_bar
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_bar [*]
echo 2 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1_bar
ln -s controllers/e65d0000.pcie-ep functions/pci_epf_vntb/func1/primary/
echo 1 > controllers/e65d0000.pcie-ep/start
[*]: On R-Car Spider, a hack is currently needed to use BAR4 for
the doorbell. I'll consider posting a patch for that
separately.
- ensure ntb_transport/ntb_netdev are loaded on both sides
Results:
- Without this series (pci.git main)
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=6.04 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=12.6 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=7.40 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=5.38 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=11.4 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=9.42 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=3.36 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=9.48 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=4.24 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=10.4 ms
- With this series (on top of pci.git main + Dependency (1), (2) and (3))
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=0.845 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=0.742 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=0.868 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=0.806 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=0.951 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=0.965 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=0.871 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=0.877 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=0.938 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=0.960 ms
---
Changelog
---------
* v8->v9 changes:
- Add a new dependency series (3), which moved the BAR reserved-subregion
framework + the RK3588 BAR4 example out of v8 (dropping the corresponding
patches from this series).
- pci-epf-vntb: rename the duplicate-IRQ helper and invert the return value,
per Frank's review.
- pci-epf-test: drop the extra size_add() doorbell-offset check, per Niklas'
review.
- pci-ep-msi: add a DWORD alignment check for DOORBELL_MMIO, per Niklas's
review.
- Carry over Reviewed-by tags for unchanged patches + drop Reviewed-by tags
where code changed.
- Rename the last patch subject (drop 'eDMA' word).
* v7->v8 changes:
- Deduplicate request_irq()/free_irq() calls based on virq (shared
IRQ) rather than doorbell type, as suggested during review of v7
Patch #7.
- Clean up the pci_epf_alloc_doorbell() error path, as suggested
during review of v7 Patch #9.
- Use range_end_overflows_t() instead of an open-coded overflow check,
following discussion during review of v7 Patch #5.
- Add a write-data field to the DOORBELL_MMIO aux-resource metadata
and plumb it through to the embedded doorbell backend (DesignWare
uses data=0).
* v6->v7 changes:
- Split out preparatory patches to keep the series below 10 patches.
- Add support for platforms where the eDMA register block is fixed
within a reserved BAR window (e.g. RK3588 BAR4) and must be reused
as-is.
- Introduce a dedicated virtual IRQ and irq_chip (using
handle_level_irq) for interrupt-emulation doorbells instead of
reusing per-channel IRQs. This avoids delivery via different IRQs on
platforms with chip->nr_irqs > 1.
* v5->v6 changes:
- Fix a double-free in v5 Patch 8/8 caused by mixing __free(kfree) with
an explicit kfree(). This is a functional bug (detectable by KASAN),
hence the respin solely for this fix. Sorry for the noise. No other
changes.
* v4->v5 changes:
- Change the series subject now that the series has evolved into a
consumer-driven set focused on the embedded doorbell fallback and its
in-tree users (epf-test and epf-vntb).
- Drop [PATCH v4 01/09] (dw-edma per-channel interrupt routing control)
from this series for now, so the series focuses on what's needed by the
current consumer (i.e. the doorbell fallback implementation).
- Replace the v4 embedded-doorbell "test variant + host/kselftest
plumbing" with a generic embedded-doorbell fallback in
pci_epf_alloc_doorbell(), including exposing required IRQ request flags
to EPF drivers.
- Two preparatory fix patches (Patch 6/8 and 7/8) to clean up error
handling and state management ahead of Patch 8/8.
- Rename *_get_remote_resource() to *_get_aux_resources() and adjust
relevant variable namings and kernel docs. Discussion may continue.
- Rework dw-edma per-channel metadata exposure to cache the needed info
in dw_edma_chip (IRQ number + emulation doorbell offset) and consume it
from the DesignWare EPC auxiliary resource provider without calling back
to dw-edma.
* v3->v4 changes:
- Drop dma_slave_caps.hw_id and the dmaengine selfirq callback
registration API. Instead, add a dw-edma specific dw_edma_chan_info()
helper and extend the EPC remote resource metadata accordingly.
- Add explicit acking for eDMA interrupt emulation and adjust the
dw-edma IRQ path for embedded-doorbell usage.
- Replace the previous EPC API smoke test with an embedded doorbell
test variant (pci-epf-test + pci_endpoint_test/selftests).
- Rebase onto pci.git controller/dwc commit 43d324eeb08c.
* v2->v3 changes:
- Replace DWC-specific helpers with a generic EPC remote resource query API.
- Add pci-epf-test smoke test and host/kselftest support for the new API.
- Drop the dw-edma-specific notify-only channel and polling approach
([PATCH v2 4/7] and [PATCH v2 5/7]), and rework notification handling
around a generic dmaengine_(un)register_selfirq() API implemented
by dw-edma.
* v1->v2 changes:
- Combine the two previously posted series into a single set (per Frank's
suggestion). Order dmaengine/dw-edma patches first so hw_id support
lands before the PCI LL-region helper, which assumes
dma_slave_caps.hw_id availability.
v8: https://lore.kernel.org/linux-pci/20260217080601.3808847-1-den@valinux.co.jp/
v7: https://lore.kernel.org/linux-pci/20260215163847.3522572-1-den@valinux.co.jp/
v6: https://lore.kernel.org/all/20260209125316.2132589-1-den@valinux.co.jp/
v5: https://lore.kernel.org/all/20260209062952.2049053-1-den@valinux.co.jp/
v4: https://lore.kernel.org/all/20260206172646.1556847-1-den@valinux.co.jp/
v3: https://lore.kernel.org/all/20260204145440.950609-1-den@valinux.co.jp/
v2: https://lore.kernel.org/all/20260127033420.3460579-1-den@valinux.co.jp/
v1: https://lore.kernel.org/dmaengine/20260126073652.3293564-1-den@valinux.co.jp/
+
https://lore.kernel.org/linux-pci/20260126071550.3233631-1-den@valinux.co.jp/
Thanks for reviewing.
Koichiro Den (7):
PCI: endpoint: Add auxiliary resource query API
PCI: dwc: Record integrated eDMA register window
PCI: dwc: ep: Expose integrated eDMA resources via EPC aux-resource
API
PCI: endpoint: pci-ep-msi: Refactor doorbell allocation for new
backends
PCI: endpoint: pci-epf-vntb: Reuse pre-exposed doorbells and IRQ flags
PCI: endpoint: pci-epf-test: Reuse pre-exposed doorbell targets
PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback
.../pci/controller/dwc/pcie-designware-ep.c | 151 ++++++++++++++++++
drivers/pci/controller/dwc/pcie-designware.c | 4 +
drivers/pci/controller/dwc/pcie-designware.h | 2 +
drivers/pci/endpoint/functions/pci-epf-test.c | 84 ++++++----
drivers/pci/endpoint/functions/pci-epf-vntb.c | 61 ++++++-
drivers/pci/endpoint/pci-ep-msi.c | 149 +++++++++++++++--
drivers/pci/endpoint/pci-epc-core.c | 41 +++++
include/linux/pci-epc.h | 52 ++++++
include/linux/pci-epf.h | 23 ++-
9 files changed, 520 insertions(+), 47 deletions(-)
--
2.51.0
| null | null | null | [PATCH v9 0/7] PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback | On Thu, Feb 19, 2026 at 10:00:19PM +0530, ALOK TIWARI wrote:
Thanks for the comment, good catch.
AFAICT, this is a pre-existing issue (at least since the initial vNTB merge,
commit e35f56bb0330), and the same pattern can be found in a few other paths in
epf-vntb, such as:
- epf_ntb_config_spad_bar_alloc()
- epf_ntb_configure_interrupt()
- epf_ntb_db_bar_init() (the one you pointed out)
a .get_features callback and return a non-NULL pointer, and the same holds for
the in-tree dw_pcie_ep_ops implementations. So in practice this does not appear
to be triggering a NULL-dereference issue today.
That said, pci_epc_get_features() is documented to return NULL on failure, so
adding defensive checks would certainly imnprove robustness and align vNTB with
other EPF drivers.
Since this is independent of the doorbell rework in this series, I think it
would probably cleaner to address it in a separate patch.
If you are planning to send such a patch, I would be happy to test and/or review
it. Otherwise, I can prepare a small follow-up patch (with a Reported-by tag)
when I have a spare cycle. Given that this is pre-existing and does not seem to
cause observable issues today, I do not think it requires a Fixes: tag or stable
backporting.
Best regards,
Koichiro | {
"author": "Koichiro Den <den@valinux.co.jp>",
"date": "Fri, 20 Feb 2026 12:35:31 +0900",
"is_openbsd": false,
"thread_id": "aaG5asXVV5sxRbnQ@ryzen.mbox.gz"
} |
lkml_critique | lkml | Hi,
Some endpoint platforms cannot use a GIC ITS-backed MSI domain for
EP-side doorbells. In those cases, endpoint function (EPF) drivers
cannot provide a doorbell to the root complex (RC), and features such as
vNTB may fall back to polling with significantly higher latency.
This series adds an alternate doorbell backend based on the DesignWare
PCIe controller's integrated eDMA interrupt-emulation feature. The RC
rings the doorbell by doing a single 32-bit MMIO write to an eDMA
doorbell location exposed in a BAR window. The EP side receives a Linux
IRQ that EPF drivers can use as a doorbell interrupt, without relying on
MSI message writes reaching the ITS.
To support this, the series:
- Adds an EPC auxiliary resource query API so EPF drivers can discover
controller-integrated resources (DMA MMIO, doorbell MMIO, and DMA LL
memory).
- Updates DesignWare EP controllers to report integrated eDMA
resources via the new API.
- Updates dw-edma to provide a dedicated virtual IRQ for interrupt
emulation and to perform the core-specific deassert sequence.
- Updates pci-epf-test and pci-epf-vntb to reuse a pre-exposed
BAR/offset and to honor per-doorbell IRQ flags.
Many thanks to Frank and Niklas for their continued review and valuable
feedback throughout the development of this series. The Reviewed-by tags
for the last two patches are dropped due to the additional changes
following Niklas' review in the v8 threads. Since the diff is small, I'd
appreciate it if Frank could re-check them.
Dependencies
------------
The following three series are prerequisites for this series:
(1). [PATCH v2 0/4] PCI: endpoint: Doorbell-related fixes
https://lore.kernel.org/linux-pci/20260217063856.3759713-1-den@valinux.co.jp/
(2). [PATCH 0/2] dmaengine: dw-edma: Interrupt-emulation doorbell support
https://lore.kernel.org/dmaengine/20260215152216.3393561-1-den@valinux.co.jp/
(3). [PATCH 0/9] PCI: endpoint differentiate between disabled and reserved BARs
https://lore.kernel.org/linux-pci/20260217212707.2450423-11-cassel@kernel.org/
Regarding (3):
- [PATCH 2/9] and [PATCH 3/9] are strictly the prerequisites for this v9 series.
In fact, they are split out from v8 series.
- With [PATCH 6/9], this v9 series should allow the embedded doorbell fallback
path to pass on RK3588 from the beginning. Given that, picking up the whole
(3) series earlier should be the most streamlined choice.
Tested on
---------
I re-tested the embedded (DMA) doorbell fallback path (via pci-epf-test)
on R-Car Spider boards (with this v9 series):
$ ./pci_endpoint_test -t DOORBELL_TEST
TAP version 13
1..1
# Starting 1 tests from 1 test cases.
# RUN pcie_ep_doorbell.DOORBELL_TEST ...
# OK pcie_ep_doorbell.DOORBELL_TEST
ok 1 pcie_ep_doorbell.DOORBELL_TEST
# PASSED: 1 / 1 tests passed.
# Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0
with the following message observed on the EP side:
[ 82.043715] pci_epf_test pci_epf_test.0: Can't find MSI domain for EPC
[ 82.044382] pci_epf_test pci_epf_test.0: Using embedded (DMA) doorbell fallback
(Note: for the test to pass on R-Car Spider, one of the following was required:
- echo 1048576 > functions/pci_epf_test/func1/pci_epf_test.0/bar2_size
- apply https://lore.kernel.org/linux-pci/20260210160315.2272930-1-den@valinux.co.jp/)
Performance test: vNTB ping latency
-----------------------------------
Setup:
- configfs (R-Car Spider in EP mode):
cd /sys/kernel/config/pci_ep/
mkdir functions/pci_epf_vntb/func1
echo 0x1912 > functions/pci_epf_vntb/func1/vendorid
echo 0x0030 > functions/pci_epf_vntb/func1/deviceid
echo 32 > functions/pci_epf_vntb/func1/msi_interrupts
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_count
echo 128 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/spad_count
echo 1 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/num_mws
echo 0x100000 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1
echo 0x1912 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_vid
echo 0x0030 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_pid
echo 0x10 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vbus_number
echo 0 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/ctrl_bar
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_bar [*]
echo 2 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1_bar
ln -s controllers/e65d0000.pcie-ep functions/pci_epf_vntb/func1/primary/
echo 1 > controllers/e65d0000.pcie-ep/start
[*]: On R-Car Spider, a hack is currently needed to use BAR4 for
the doorbell. I'll consider posting a patch for that
separately.
- ensure ntb_transport/ntb_netdev are loaded on both sides
Results:
- Without this series (pci.git main)
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=6.04 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=12.6 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=7.40 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=5.38 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=11.4 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=9.42 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=3.36 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=9.48 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=4.24 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=10.4 ms
- With this series (on top of pci.git main + Dependency (1), (2) and (3))
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=0.845 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=0.742 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=0.868 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=0.806 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=0.951 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=0.965 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=0.871 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=0.877 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=0.938 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=0.960 ms
---
Changelog
---------
* v8->v9 changes:
- Add a new dependency series (3), which moved the BAR reserved-subregion
framework + the RK3588 BAR4 example out of v8 (dropping the corresponding
patches from this series).
- pci-epf-vntb: rename the duplicate-IRQ helper and invert the return value,
per Frank's review.
- pci-epf-test: drop the extra size_add() doorbell-offset check, per Niklas'
review.
- pci-ep-msi: add a DWORD alignment check for DOORBELL_MMIO, per Niklas's
review.
- Carry over Reviewed-by tags for unchanged patches + drop Reviewed-by tags
where code changed.
- Rename the last patch subject (drop 'eDMA' word).
* v7->v8 changes:
- Deduplicate request_irq()/free_irq() calls based on virq (shared
IRQ) rather than doorbell type, as suggested during review of v7
Patch #7.
- Clean up the pci_epf_alloc_doorbell() error path, as suggested
during review of v7 Patch #9.
- Use range_end_overflows_t() instead of an open-coded overflow check,
following discussion during review of v7 Patch #5.
- Add a write-data field to the DOORBELL_MMIO aux-resource metadata
and plumb it through to the embedded doorbell backend (DesignWare
uses data=0).
* v6->v7 changes:
- Split out preparatory patches to keep the series below 10 patches.
- Add support for platforms where the eDMA register block is fixed
within a reserved BAR window (e.g. RK3588 BAR4) and must be reused
as-is.
- Introduce a dedicated virtual IRQ and irq_chip (using
handle_level_irq) for interrupt-emulation doorbells instead of
reusing per-channel IRQs. This avoids delivery via different IRQs on
platforms with chip->nr_irqs > 1.
* v5->v6 changes:
- Fix a double-free in v5 Patch 8/8 caused by mixing __free(kfree) with
an explicit kfree(). This is a functional bug (detectable by KASAN),
hence the respin solely for this fix. Sorry for the noise. No other
changes.
* v4->v5 changes:
- Change the series subject now that the series has evolved into a
consumer-driven set focused on the embedded doorbell fallback and its
in-tree users (epf-test and epf-vntb).
- Drop [PATCH v4 01/09] (dw-edma per-channel interrupt routing control)
from this series for now, so the series focuses on what's needed by the
current consumer (i.e. the doorbell fallback implementation).
- Replace the v4 embedded-doorbell "test variant + host/kselftest
plumbing" with a generic embedded-doorbell fallback in
pci_epf_alloc_doorbell(), including exposing required IRQ request flags
to EPF drivers.
- Two preparatory fix patches (Patch 6/8 and 7/8) to clean up error
handling and state management ahead of Patch 8/8.
- Rename *_get_remote_resource() to *_get_aux_resources() and adjust
relevant variable namings and kernel docs. Discussion may continue.
- Rework dw-edma per-channel metadata exposure to cache the needed info
in dw_edma_chip (IRQ number + emulation doorbell offset) and consume it
from the DesignWare EPC auxiliary resource provider without calling back
to dw-edma.
* v3->v4 changes:
- Drop dma_slave_caps.hw_id and the dmaengine selfirq callback
registration API. Instead, add a dw-edma specific dw_edma_chan_info()
helper and extend the EPC remote resource metadata accordingly.
- Add explicit acking for eDMA interrupt emulation and adjust the
dw-edma IRQ path for embedded-doorbell usage.
- Replace the previous EPC API smoke test with an embedded doorbell
test variant (pci-epf-test + pci_endpoint_test/selftests).
- Rebase onto pci.git controller/dwc commit 43d324eeb08c.
* v2->v3 changes:
- Replace DWC-specific helpers with a generic EPC remote resource query API.
- Add pci-epf-test smoke test and host/kselftest support for the new API.
- Drop the dw-edma-specific notify-only channel and polling approach
([PATCH v2 4/7] and [PATCH v2 5/7]), and rework notification handling
around a generic dmaengine_(un)register_selfirq() API implemented
by dw-edma.
* v1->v2 changes:
- Combine the two previously posted series into a single set (per Frank's
suggestion). Order dmaengine/dw-edma patches first so hw_id support
lands before the PCI LL-region helper, which assumes
dma_slave_caps.hw_id availability.
v8: https://lore.kernel.org/linux-pci/20260217080601.3808847-1-den@valinux.co.jp/
v7: https://lore.kernel.org/linux-pci/20260215163847.3522572-1-den@valinux.co.jp/
v6: https://lore.kernel.org/all/20260209125316.2132589-1-den@valinux.co.jp/
v5: https://lore.kernel.org/all/20260209062952.2049053-1-den@valinux.co.jp/
v4: https://lore.kernel.org/all/20260206172646.1556847-1-den@valinux.co.jp/
v3: https://lore.kernel.org/all/20260204145440.950609-1-den@valinux.co.jp/
v2: https://lore.kernel.org/all/20260127033420.3460579-1-den@valinux.co.jp/
v1: https://lore.kernel.org/dmaengine/20260126073652.3293564-1-den@valinux.co.jp/
+
https://lore.kernel.org/linux-pci/20260126071550.3233631-1-den@valinux.co.jp/
Thanks for reviewing.
Koichiro Den (7):
PCI: endpoint: Add auxiliary resource query API
PCI: dwc: Record integrated eDMA register window
PCI: dwc: ep: Expose integrated eDMA resources via EPC aux-resource
API
PCI: endpoint: pci-ep-msi: Refactor doorbell allocation for new
backends
PCI: endpoint: pci-epf-vntb: Reuse pre-exposed doorbells and IRQ flags
PCI: endpoint: pci-epf-test: Reuse pre-exposed doorbell targets
PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback
.../pci/controller/dwc/pcie-designware-ep.c | 151 ++++++++++++++++++
drivers/pci/controller/dwc/pcie-designware.c | 4 +
drivers/pci/controller/dwc/pcie-designware.h | 2 +
drivers/pci/endpoint/functions/pci-epf-test.c | 84 ++++++----
drivers/pci/endpoint/functions/pci-epf-vntb.c | 61 ++++++-
drivers/pci/endpoint/pci-ep-msi.c | 149 +++++++++++++++--
drivers/pci/endpoint/pci-epc-core.c | 41 +++++
include/linux/pci-epc.h | 52 ++++++
include/linux/pci-epf.h | 23 ++-
9 files changed, 520 insertions(+), 47 deletions(-)
--
2.51.0
| null | null | null | [PATCH v9 0/7] PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback | On Fri, Feb 20, 2026 at 12:35:31PM +0900, Koichiro Den wrote:
We should really clean this up somehow.
The problems are:
1) A long time ago, not all EPC driver had a get_features callback.
Now, EPC drivers do have such a callback.
Ideally, we should probably add a check that an EPC driver implements
epc->ops_get_features in __pci_epc_create(), and return failure if it
doesn't.
This way we can remove the if (!epc->ops_get_features) check in e.g.
pci_epc_get_features().
2) DWC based glue drivers have their own get_features callback in
struct dw_pcie_ep
But here we should just have some check in dw_pcie_ep_init() that
returns failure if the glue driver has not implemented
(struct dw_pcie_ep *)->ops->get_features)
This way we can remove the
if (!ep->ops->get_features) checks in pcie-designware-ep.c.
3) Even if the get_features callback is implemented, EPF drivers call
pci_epc_get_features(), which has this code:
if (!pci_epc_function_is_valid(epc, func_no, vfunc_no))
return NULL;
So, it will return NULL for invalid func_no / vfunc_no.
I think this currently makes it quite hard to remove the NULL checks on the
return value from a epc->ops_get_features() call in the EPF drivers.
How pci-epf-test has managed to "workaround" this the silliness of having
features = pci_epc_get_features(epc, func_no, vfunc_no);
if (!features)
checks everywhere (problem 3): It calls pci_epc_get_features() once in .bind()
and if it fails, it fails bind(), if it returns non-NULL, it caches the result:
https://github.com/torvalds/linux/blob/v6.19/drivers/pci/endpoint/functions/pci-epf-test.c#L1112-L1123
That way, all other places in pci-epf-test.c does not need to NULL check
pci_epc_get_features(). (Instead it uses the cached value in struct pci_epf_test *)
pci-epf-vntb.c should probably do something similar to avoid sprinkling
NULL checks all over pci-epf-vntb.c.
Kind regards,
Niklas | {
"author": "Niklas Cassel <cassel@kernel.org>",
"date": "Fri, 20 Feb 2026 11:27:05 +0100",
"is_openbsd": false,
"thread_id": "aaG5asXVV5sxRbnQ@ryzen.mbox.gz"
} |
lkml_critique | lkml | Hi,
Some endpoint platforms cannot use a GIC ITS-backed MSI domain for
EP-side doorbells. In those cases, endpoint function (EPF) drivers
cannot provide a doorbell to the root complex (RC), and features such as
vNTB may fall back to polling with significantly higher latency.
This series adds an alternate doorbell backend based on the DesignWare
PCIe controller's integrated eDMA interrupt-emulation feature. The RC
rings the doorbell by doing a single 32-bit MMIO write to an eDMA
doorbell location exposed in a BAR window. The EP side receives a Linux
IRQ that EPF drivers can use as a doorbell interrupt, without relying on
MSI message writes reaching the ITS.
To support this, the series:
- Adds an EPC auxiliary resource query API so EPF drivers can discover
controller-integrated resources (DMA MMIO, doorbell MMIO, and DMA LL
memory).
- Updates DesignWare EP controllers to report integrated eDMA
resources via the new API.
- Updates dw-edma to provide a dedicated virtual IRQ for interrupt
emulation and to perform the core-specific deassert sequence.
- Updates pci-epf-test and pci-epf-vntb to reuse a pre-exposed
BAR/offset and to honor per-doorbell IRQ flags.
Many thanks to Frank and Niklas for their continued review and valuable
feedback throughout the development of this series. The Reviewed-by tags
for the last two patches are dropped due to the additional changes
following Niklas' review in the v8 threads. Since the diff is small, I'd
appreciate it if Frank could re-check them.
Dependencies
------------
The following three series are prerequisites for this series:
(1). [PATCH v2 0/4] PCI: endpoint: Doorbell-related fixes
https://lore.kernel.org/linux-pci/20260217063856.3759713-1-den@valinux.co.jp/
(2). [PATCH 0/2] dmaengine: dw-edma: Interrupt-emulation doorbell support
https://lore.kernel.org/dmaengine/20260215152216.3393561-1-den@valinux.co.jp/
(3). [PATCH 0/9] PCI: endpoint differentiate between disabled and reserved BARs
https://lore.kernel.org/linux-pci/20260217212707.2450423-11-cassel@kernel.org/
Regarding (3):
- [PATCH 2/9] and [PATCH 3/9] are strictly the prerequisites for this v9 series.
In fact, they are split out from v8 series.
- With [PATCH 6/9], this v9 series should allow the embedded doorbell fallback
path to pass on RK3588 from the beginning. Given that, picking up the whole
(3) series earlier should be the most streamlined choice.
Tested on
---------
I re-tested the embedded (DMA) doorbell fallback path (via pci-epf-test)
on R-Car Spider boards (with this v9 series):
$ ./pci_endpoint_test -t DOORBELL_TEST
TAP version 13
1..1
# Starting 1 tests from 1 test cases.
# RUN pcie_ep_doorbell.DOORBELL_TEST ...
# OK pcie_ep_doorbell.DOORBELL_TEST
ok 1 pcie_ep_doorbell.DOORBELL_TEST
# PASSED: 1 / 1 tests passed.
# Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0
with the following message observed on the EP side:
[ 82.043715] pci_epf_test pci_epf_test.0: Can't find MSI domain for EPC
[ 82.044382] pci_epf_test pci_epf_test.0: Using embedded (DMA) doorbell fallback
(Note: for the test to pass on R-Car Spider, one of the following was required:
- echo 1048576 > functions/pci_epf_test/func1/pci_epf_test.0/bar2_size
- apply https://lore.kernel.org/linux-pci/20260210160315.2272930-1-den@valinux.co.jp/)
Performance test: vNTB ping latency
-----------------------------------
Setup:
- configfs (R-Car Spider in EP mode):
cd /sys/kernel/config/pci_ep/
mkdir functions/pci_epf_vntb/func1
echo 0x1912 > functions/pci_epf_vntb/func1/vendorid
echo 0x0030 > functions/pci_epf_vntb/func1/deviceid
echo 32 > functions/pci_epf_vntb/func1/msi_interrupts
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_count
echo 128 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/spad_count
echo 1 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/num_mws
echo 0x100000 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1
echo 0x1912 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_vid
echo 0x0030 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_pid
echo 0x10 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vbus_number
echo 0 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/ctrl_bar
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_bar [*]
echo 2 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1_bar
ln -s controllers/e65d0000.pcie-ep functions/pci_epf_vntb/func1/primary/
echo 1 > controllers/e65d0000.pcie-ep/start
[*]: On R-Car Spider, a hack is currently needed to use BAR4 for
the doorbell. I'll consider posting a patch for that
separately.
- ensure ntb_transport/ntb_netdev are loaded on both sides
Results:
- Without this series (pci.git main)
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=6.04 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=12.6 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=7.40 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=5.38 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=11.4 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=9.42 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=3.36 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=9.48 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=4.24 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=10.4 ms
- With this series (on top of pci.git main + Dependency (1), (2) and (3))
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=0.845 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=0.742 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=0.868 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=0.806 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=0.951 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=0.965 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=0.871 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=0.877 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=0.938 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=0.960 ms
---
Changelog
---------
* v8->v9 changes:
- Add a new dependency series (3), which moved the BAR reserved-subregion
framework + the RK3588 BAR4 example out of v8 (dropping the corresponding
patches from this series).
- pci-epf-vntb: rename the duplicate-IRQ helper and invert the return value,
per Frank's review.
- pci-epf-test: drop the extra size_add() doorbell-offset check, per Niklas'
review.
- pci-ep-msi: add a DWORD alignment check for DOORBELL_MMIO, per Niklas's
review.
- Carry over Reviewed-by tags for unchanged patches + drop Reviewed-by tags
where code changed.
- Rename the last patch subject (drop 'eDMA' word).
* v7->v8 changes:
- Deduplicate request_irq()/free_irq() calls based on virq (shared
IRQ) rather than doorbell type, as suggested during review of v7
Patch #7.
- Clean up the pci_epf_alloc_doorbell() error path, as suggested
during review of v7 Patch #9.
- Use range_end_overflows_t() instead of an open-coded overflow check,
following discussion during review of v7 Patch #5.
- Add a write-data field to the DOORBELL_MMIO aux-resource metadata
and plumb it through to the embedded doorbell backend (DesignWare
uses data=0).
* v6->v7 changes:
- Split out preparatory patches to keep the series below 10 patches.
- Add support for platforms where the eDMA register block is fixed
within a reserved BAR window (e.g. RK3588 BAR4) and must be reused
as-is.
- Introduce a dedicated virtual IRQ and irq_chip (using
handle_level_irq) for interrupt-emulation doorbells instead of
reusing per-channel IRQs. This avoids delivery via different IRQs on
platforms with chip->nr_irqs > 1.
* v5->v6 changes:
- Fix a double-free in v5 Patch 8/8 caused by mixing __free(kfree) with
an explicit kfree(). This is a functional bug (detectable by KASAN),
hence the respin solely for this fix. Sorry for the noise. No other
changes.
* v4->v5 changes:
- Change the series subject now that the series has evolved into a
consumer-driven set focused on the embedded doorbell fallback and its
in-tree users (epf-test and epf-vntb).
- Drop [PATCH v4 01/09] (dw-edma per-channel interrupt routing control)
from this series for now, so the series focuses on what's needed by the
current consumer (i.e. the doorbell fallback implementation).
- Replace the v4 embedded-doorbell "test variant + host/kselftest
plumbing" with a generic embedded-doorbell fallback in
pci_epf_alloc_doorbell(), including exposing required IRQ request flags
to EPF drivers.
- Two preparatory fix patches (Patch 6/8 and 7/8) to clean up error
handling and state management ahead of Patch 8/8.
- Rename *_get_remote_resource() to *_get_aux_resources() and adjust
relevant variable namings and kernel docs. Discussion may continue.
- Rework dw-edma per-channel metadata exposure to cache the needed info
in dw_edma_chip (IRQ number + emulation doorbell offset) and consume it
from the DesignWare EPC auxiliary resource provider without calling back
to dw-edma.
* v3->v4 changes:
- Drop dma_slave_caps.hw_id and the dmaengine selfirq callback
registration API. Instead, add a dw-edma specific dw_edma_chan_info()
helper and extend the EPC remote resource metadata accordingly.
- Add explicit acking for eDMA interrupt emulation and adjust the
dw-edma IRQ path for embedded-doorbell usage.
- Replace the previous EPC API smoke test with an embedded doorbell
test variant (pci-epf-test + pci_endpoint_test/selftests).
- Rebase onto pci.git controller/dwc commit 43d324eeb08c.
* v2->v3 changes:
- Replace DWC-specific helpers with a generic EPC remote resource query API.
- Add pci-epf-test smoke test and host/kselftest support for the new API.
- Drop the dw-edma-specific notify-only channel and polling approach
([PATCH v2 4/7] and [PATCH v2 5/7]), and rework notification handling
around a generic dmaengine_(un)register_selfirq() API implemented
by dw-edma.
* v1->v2 changes:
- Combine the two previously posted series into a single set (per Frank's
suggestion). Order dmaengine/dw-edma patches first so hw_id support
lands before the PCI LL-region helper, which assumes
dma_slave_caps.hw_id availability.
v8: https://lore.kernel.org/linux-pci/20260217080601.3808847-1-den@valinux.co.jp/
v7: https://lore.kernel.org/linux-pci/20260215163847.3522572-1-den@valinux.co.jp/
v6: https://lore.kernel.org/all/20260209125316.2132589-1-den@valinux.co.jp/
v5: https://lore.kernel.org/all/20260209062952.2049053-1-den@valinux.co.jp/
v4: https://lore.kernel.org/all/20260206172646.1556847-1-den@valinux.co.jp/
v3: https://lore.kernel.org/all/20260204145440.950609-1-den@valinux.co.jp/
v2: https://lore.kernel.org/all/20260127033420.3460579-1-den@valinux.co.jp/
v1: https://lore.kernel.org/dmaengine/20260126073652.3293564-1-den@valinux.co.jp/
+
https://lore.kernel.org/linux-pci/20260126071550.3233631-1-den@valinux.co.jp/
Thanks for reviewing.
Koichiro Den (7):
PCI: endpoint: Add auxiliary resource query API
PCI: dwc: Record integrated eDMA register window
PCI: dwc: ep: Expose integrated eDMA resources via EPC aux-resource
API
PCI: endpoint: pci-ep-msi: Refactor doorbell allocation for new
backends
PCI: endpoint: pci-epf-vntb: Reuse pre-exposed doorbells and IRQ flags
PCI: endpoint: pci-epf-test: Reuse pre-exposed doorbell targets
PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback
.../pci/controller/dwc/pcie-designware-ep.c | 151 ++++++++++++++++++
drivers/pci/controller/dwc/pcie-designware.c | 4 +
drivers/pci/controller/dwc/pcie-designware.h | 2 +
drivers/pci/endpoint/functions/pci-epf-test.c | 84 ++++++----
drivers/pci/endpoint/functions/pci-epf-vntb.c | 61 ++++++-
drivers/pci/endpoint/pci-ep-msi.c | 149 +++++++++++++++--
drivers/pci/endpoint/pci-epc-core.c | 41 +++++
include/linux/pci-epc.h | 52 ++++++
include/linux/pci-epf.h | 23 ++-
9 files changed, 520 insertions(+), 47 deletions(-)
--
2.51.0
| null | null | null | [PATCH v9 0/7] PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback | On 2/20/2026 9:05 AM, Koichiro Den wrote:
Yes, agreed this is independent of the doorbell rework.
I will send a separate patch for this.
Thanks,
Alok | {
"author": "ALOK TIWARI <alok.a.tiwari@oracle.com>",
"date": "Fri, 20 Feb 2026 20:44:43 +0530",
"is_openbsd": false,
"thread_id": "aaG5asXVV5sxRbnQ@ryzen.mbox.gz"
} |
lkml_critique | lkml | Hi,
Some endpoint platforms cannot use a GIC ITS-backed MSI domain for
EP-side doorbells. In those cases, endpoint function (EPF) drivers
cannot provide a doorbell to the root complex (RC), and features such as
vNTB may fall back to polling with significantly higher latency.
This series adds an alternate doorbell backend based on the DesignWare
PCIe controller's integrated eDMA interrupt-emulation feature. The RC
rings the doorbell by doing a single 32-bit MMIO write to an eDMA
doorbell location exposed in a BAR window. The EP side receives a Linux
IRQ that EPF drivers can use as a doorbell interrupt, without relying on
MSI message writes reaching the ITS.
To support this, the series:
- Adds an EPC auxiliary resource query API so EPF drivers can discover
controller-integrated resources (DMA MMIO, doorbell MMIO, and DMA LL
memory).
- Updates DesignWare EP controllers to report integrated eDMA
resources via the new API.
- Updates dw-edma to provide a dedicated virtual IRQ for interrupt
emulation and to perform the core-specific deassert sequence.
- Updates pci-epf-test and pci-epf-vntb to reuse a pre-exposed
BAR/offset and to honor per-doorbell IRQ flags.
Many thanks to Frank and Niklas for their continued review and valuable
feedback throughout the development of this series. The Reviewed-by tags
for the last two patches are dropped due to the additional changes
following Niklas' review in the v8 threads. Since the diff is small, I'd
appreciate it if Frank could re-check them.
Dependencies
------------
The following three series are prerequisites for this series:
(1). [PATCH v2 0/4] PCI: endpoint: Doorbell-related fixes
https://lore.kernel.org/linux-pci/20260217063856.3759713-1-den@valinux.co.jp/
(2). [PATCH 0/2] dmaengine: dw-edma: Interrupt-emulation doorbell support
https://lore.kernel.org/dmaengine/20260215152216.3393561-1-den@valinux.co.jp/
(3). [PATCH 0/9] PCI: endpoint differentiate between disabled and reserved BARs
https://lore.kernel.org/linux-pci/20260217212707.2450423-11-cassel@kernel.org/
Regarding (3):
- [PATCH 2/9] and [PATCH 3/9] are strictly the prerequisites for this v9 series.
In fact, they are split out from v8 series.
- With [PATCH 6/9], this v9 series should allow the embedded doorbell fallback
path to pass on RK3588 from the beginning. Given that, picking up the whole
(3) series earlier should be the most streamlined choice.
Tested on
---------
I re-tested the embedded (DMA) doorbell fallback path (via pci-epf-test)
on R-Car Spider boards (with this v9 series):
$ ./pci_endpoint_test -t DOORBELL_TEST
TAP version 13
1..1
# Starting 1 tests from 1 test cases.
# RUN pcie_ep_doorbell.DOORBELL_TEST ...
# OK pcie_ep_doorbell.DOORBELL_TEST
ok 1 pcie_ep_doorbell.DOORBELL_TEST
# PASSED: 1 / 1 tests passed.
# Totals: pass:1 fail:0 xfail:0 xpass:0 skip:0 error:0
with the following message observed on the EP side:
[ 82.043715] pci_epf_test pci_epf_test.0: Can't find MSI domain for EPC
[ 82.044382] pci_epf_test pci_epf_test.0: Using embedded (DMA) doorbell fallback
(Note: for the test to pass on R-Car Spider, one of the following was required:
- echo 1048576 > functions/pci_epf_test/func1/pci_epf_test.0/bar2_size
- apply https://lore.kernel.org/linux-pci/20260210160315.2272930-1-den@valinux.co.jp/)
Performance test: vNTB ping latency
-----------------------------------
Setup:
- configfs (R-Car Spider in EP mode):
cd /sys/kernel/config/pci_ep/
mkdir functions/pci_epf_vntb/func1
echo 0x1912 > functions/pci_epf_vntb/func1/vendorid
echo 0x0030 > functions/pci_epf_vntb/func1/deviceid
echo 32 > functions/pci_epf_vntb/func1/msi_interrupts
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_count
echo 128 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/spad_count
echo 1 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/num_mws
echo 0x100000 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1
echo 0x1912 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_vid
echo 0x0030 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vntb_pid
echo 0x10 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/vbus_number
echo 0 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/ctrl_bar
echo 4 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/db_bar [*]
echo 2 > functions/pci_epf_vntb/func1/pci_epf_vntb.0/mw1_bar
ln -s controllers/e65d0000.pcie-ep functions/pci_epf_vntb/func1/primary/
echo 1 > controllers/e65d0000.pcie-ep/start
[*]: On R-Car Spider, a hack is currently needed to use BAR4 for
the doorbell. I'll consider posting a patch for that
separately.
- ensure ntb_transport/ntb_netdev are loaded on both sides
Results:
- Without this series (pci.git main)
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=6.04 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=12.6 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=7.40 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=5.38 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=11.4 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=9.42 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=3.36 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=9.48 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=4.24 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=10.4 ms
- With this series (on top of pci.git main + Dependency (1), (2) and (3))
$ ping -c 10 10.0.0.11
PING 10.0.0.11 (10.0.0.11) 56(84) bytes of data.
64 bytes from 10.0.0.11: icmp_seq=1 ttl=64 time=0.845 ms
64 bytes from 10.0.0.11: icmp_seq=2 ttl=64 time=0.742 ms
64 bytes from 10.0.0.11: icmp_seq=3 ttl=64 time=0.868 ms
64 bytes from 10.0.0.11: icmp_seq=4 ttl=64 time=0.806 ms
64 bytes from 10.0.0.11: icmp_seq=5 ttl=64 time=0.951 ms
64 bytes from 10.0.0.11: icmp_seq=6 ttl=64 time=0.965 ms
64 bytes from 10.0.0.11: icmp_seq=7 ttl=64 time=0.871 ms
64 bytes from 10.0.0.11: icmp_seq=8 ttl=64 time=0.877 ms
64 bytes from 10.0.0.11: icmp_seq=9 ttl=64 time=0.938 ms
64 bytes from 10.0.0.11: icmp_seq=10 ttl=64 time=0.960 ms
---
Changelog
---------
* v8->v9 changes:
- Add a new dependency series (3), which moved the BAR reserved-subregion
framework + the RK3588 BAR4 example out of v8 (dropping the corresponding
patches from this series).
- pci-epf-vntb: rename the duplicate-IRQ helper and invert the return value,
per Frank's review.
- pci-epf-test: drop the extra size_add() doorbell-offset check, per Niklas'
review.
- pci-ep-msi: add a DWORD alignment check for DOORBELL_MMIO, per Niklas's
review.
- Carry over Reviewed-by tags for unchanged patches + drop Reviewed-by tags
where code changed.
- Rename the last patch subject (drop 'eDMA' word).
* v7->v8 changes:
- Deduplicate request_irq()/free_irq() calls based on virq (shared
IRQ) rather than doorbell type, as suggested during review of v7
Patch #7.
- Clean up the pci_epf_alloc_doorbell() error path, as suggested
during review of v7 Patch #9.
- Use range_end_overflows_t() instead of an open-coded overflow check,
following discussion during review of v7 Patch #5.
- Add a write-data field to the DOORBELL_MMIO aux-resource metadata
and plumb it through to the embedded doorbell backend (DesignWare
uses data=0).
* v6->v7 changes:
- Split out preparatory patches to keep the series below 10 patches.
- Add support for platforms where the eDMA register block is fixed
within a reserved BAR window (e.g. RK3588 BAR4) and must be reused
as-is.
- Introduce a dedicated virtual IRQ and irq_chip (using
handle_level_irq) for interrupt-emulation doorbells instead of
reusing per-channel IRQs. This avoids delivery via different IRQs on
platforms with chip->nr_irqs > 1.
* v5->v6 changes:
- Fix a double-free in v5 Patch 8/8 caused by mixing __free(kfree) with
an explicit kfree(). This is a functional bug (detectable by KASAN),
hence the respin solely for this fix. Sorry for the noise. No other
changes.
* v4->v5 changes:
- Change the series subject now that the series has evolved into a
consumer-driven set focused on the embedded doorbell fallback and its
in-tree users (epf-test and epf-vntb).
- Drop [PATCH v4 01/09] (dw-edma per-channel interrupt routing control)
from this series for now, so the series focuses on what's needed by the
current consumer (i.e. the doorbell fallback implementation).
- Replace the v4 embedded-doorbell "test variant + host/kselftest
plumbing" with a generic embedded-doorbell fallback in
pci_epf_alloc_doorbell(), including exposing required IRQ request flags
to EPF drivers.
- Two preparatory fix patches (Patch 6/8 and 7/8) to clean up error
handling and state management ahead of Patch 8/8.
- Rename *_get_remote_resource() to *_get_aux_resources() and adjust
relevant variable namings and kernel docs. Discussion may continue.
- Rework dw-edma per-channel metadata exposure to cache the needed info
in dw_edma_chip (IRQ number + emulation doorbell offset) and consume it
from the DesignWare EPC auxiliary resource provider without calling back
to dw-edma.
* v3->v4 changes:
- Drop dma_slave_caps.hw_id and the dmaengine selfirq callback
registration API. Instead, add a dw-edma specific dw_edma_chan_info()
helper and extend the EPC remote resource metadata accordingly.
- Add explicit acking for eDMA interrupt emulation and adjust the
dw-edma IRQ path for embedded-doorbell usage.
- Replace the previous EPC API smoke test with an embedded doorbell
test variant (pci-epf-test + pci_endpoint_test/selftests).
- Rebase onto pci.git controller/dwc commit 43d324eeb08c.
* v2->v3 changes:
- Replace DWC-specific helpers with a generic EPC remote resource query API.
- Add pci-epf-test smoke test and host/kselftest support for the new API.
- Drop the dw-edma-specific notify-only channel and polling approach
([PATCH v2 4/7] and [PATCH v2 5/7]), and rework notification handling
around a generic dmaengine_(un)register_selfirq() API implemented
by dw-edma.
* v1->v2 changes:
- Combine the two previously posted series into a single set (per Frank's
suggestion). Order dmaengine/dw-edma patches first so hw_id support
lands before the PCI LL-region helper, which assumes
dma_slave_caps.hw_id availability.
v8: https://lore.kernel.org/linux-pci/20260217080601.3808847-1-den@valinux.co.jp/
v7: https://lore.kernel.org/linux-pci/20260215163847.3522572-1-den@valinux.co.jp/
v6: https://lore.kernel.org/all/20260209125316.2132589-1-den@valinux.co.jp/
v5: https://lore.kernel.org/all/20260209062952.2049053-1-den@valinux.co.jp/
v4: https://lore.kernel.org/all/20260206172646.1556847-1-den@valinux.co.jp/
v3: https://lore.kernel.org/all/20260204145440.950609-1-den@valinux.co.jp/
v2: https://lore.kernel.org/all/20260127033420.3460579-1-den@valinux.co.jp/
v1: https://lore.kernel.org/dmaengine/20260126073652.3293564-1-den@valinux.co.jp/
+
https://lore.kernel.org/linux-pci/20260126071550.3233631-1-den@valinux.co.jp/
Thanks for reviewing.
Koichiro Den (7):
PCI: endpoint: Add auxiliary resource query API
PCI: dwc: Record integrated eDMA register window
PCI: dwc: ep: Expose integrated eDMA resources via EPC aux-resource
API
PCI: endpoint: pci-ep-msi: Refactor doorbell allocation for new
backends
PCI: endpoint: pci-epf-vntb: Reuse pre-exposed doorbells and IRQ flags
PCI: endpoint: pci-epf-test: Reuse pre-exposed doorbell targets
PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback
.../pci/controller/dwc/pcie-designware-ep.c | 151 ++++++++++++++++++
drivers/pci/controller/dwc/pcie-designware.c | 4 +
drivers/pci/controller/dwc/pcie-designware.h | 2 +
drivers/pci/endpoint/functions/pci-epf-test.c | 84 ++++++----
drivers/pci/endpoint/functions/pci-epf-vntb.c | 61 ++++++-
drivers/pci/endpoint/pci-ep-msi.c | 149 +++++++++++++++--
drivers/pci/endpoint/pci-epc-core.c | 41 +++++
include/linux/pci-epc.h | 52 ++++++
include/linux/pci-epf.h | 23 ++-
9 files changed, 520 insertions(+), 47 deletions(-)
--
2.51.0
| null | null | null | [PATCH v9 0/7] PCI: endpoint: pci-ep-msi: Add embedded doorbell fallback | On Thu, Feb 19, 2026 at 05:13:18PM +0900, Koichiro Den wrote:
On second thought, I'm wondering whether it makes sense to handle the case where
the embedded doorbell target resides behind an IOMMU in this series.
In v9, we simply expose the raw physical address without establishing an IOMMU
mapping. When the EPC parent device is attached to an IOMMU domain, a Host->EP
MMIO write through the BAR window may result in an IOMMU fault.
Initially, I planned to submit IOMMU support separately as a follow-up series
once this series is accepted, to avoid making this series too large [1].
However, for consistency with the MSI doorbell case when CONFIG_IRQ_MSI_IOMMU=y,
it might be cleaner to handle the IOVA mapping as part of this series.
[1] Supporting such an IOMMU-backed case would likely require additional
patches for vNTB + ntb_transport to demonstrate usability, such as:
https://lore.kernel.org/all/20260118135440.1958279-12-den@valinux.co.jp/
https://lore.kernel.org/all/20260118135440.1958279-16-den@valinux.co.jp/
https://lore.kernel.org/all/20260118135440.1958279-19-den@valinux.co.jp/
Perhaps the cleanest option would be to submit these three as a prerequisite
series.
Conceptually, the change would look like the following (to be applied on top of
this v9 Patch 9/9):
diff --git a/drivers/pci/endpoint/pci-ep-msi.c b/drivers/pci/endpoint/pci-ep-msi.c
index f287fbf684ca..05423c83ae45 100644
--- a/drivers/pci/endpoint/pci-ep-msi.c
+++ b/drivers/pci/endpoint/pci-ep-msi.c
@@ -44,6 +44,9 @@ static int pci_epf_alloc_doorbell_embedded(struct pci_epf *epf, u16 num_db)
struct pci_epf_doorbell_msg *msg;
struct pci_epc *epc = epf->epc;
struct device *dev = &epf->dev;
+ phys_addr_t phys_base;
+ size_t map_size, off;
+ dma_addr_t iova_base;
int count, ret, i;
u64 addr;
@@ -85,6 +88,17 @@ static int pci_epf_alloc_doorbell_embedded(struct pci_epf *epf, u16 num_db)
if (!IS_ALIGNED(addr, sizeof(u32)))
return -EINVAL;
+ phys_base = addr & PAGE_MASK;
+ off = addr - phys_base;
+ map_size = PAGE_ALIGN(off + sizeof(u32));
+
+ iova_base = dma_map_resource(epc->dev.parent, phys_base, map_size,
+ DMA_FROM_DEVICE, 0);
+ if (dma_mapping_error(epc->dev.parent, iova_base))
+ return -EIO;
+
+ addr = iova_base + off;
+
msg = kcalloc(num_db, sizeof(*msg), GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -111,6 +125,8 @@ static int pci_epf_alloc_doorbell_embedded(struct pci_epf *epf, u16 num_db)
.bar = doorbell->bar,
.offset = (doorbell->bar == NO_BAR) ? 0 :
doorbell->bar_offset,
+ .iova_base = iova_base,
+ .iova_size = map_size,
};
epf->num_db = num_db;
@@ -211,11 +227,18 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_doorbell);
void pci_epf_free_doorbell(struct pci_epf *epf)
{
+ struct pci_epf_doorbell_msg *msg0;
+ struct pci_epc *epc = epf->epc;
+
if (!epf->db_msg)
return;
- if (epf->db_msg[0].type == PCI_EPF_DOORBELL_MSI)
+ msg0 = &epf->db_msg[0];
+ if (msg0->type == PCI_EPF_DOORBELL_MSI)
platform_device_msi_free_irqs_all(epf->epc->dev.parent);
+ else if (msg0->type == PCI_EPF_DOORBELL_EMBEDDED)
+ dma_unmap_resource(epc->dev.parent, msg0->iova_base,
+ msg0->iova_size, DMA_FROM_DEVICE, 0);
kfree(epf->db_msg);
epf->db_msg = NULL;
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index cd747447a1ea..e39251a5a6f7 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -176,6 +176,8 @@ struct pci_epf_doorbell_msg {
struct msi_msg msg;
int virq;
unsigned long irq_flags;
+ dma_addr_t iova_base;
+ size_t iova_size;
enum pci_epf_doorbell_type type;
enum pci_barno bar;
resource_size_t offset;
----8<----
Note: pci_epc_aux_resource was intentionally designed to expose a common
'phys_addr' field (rather than a DMA address), because some use cases require a
raw physical address. For example, in the remote dw-edma scenario, the host side
programs the (EP-local) physical address directly into
dw_edma_chip->ll_region_*[i].paddr.
Frank, since this would affect Patch 9/9, I would appreciate it if you could
take another look and share your thoughts. I had to drop your Reviewed-by tag in
v9 due to a small change, so a re-review would be very helpful in any case.
Niklas, any comments would be appreciated.
Best regards,
Koichiro | {
"author": "Koichiro Den <den@valinux.co.jp>",
"date": "Sat, 21 Feb 2026 02:42:35 +0900",
"is_openbsd": false,
"thread_id": "aaG5asXVV5sxRbnQ@ryzen.mbox.gz"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.