From 0fc3760944dc9b85c7293a95acfb628a6f999db3 Mon Sep 17 00:00:00 2001 From: Adam Ford Date: Mon, 20 Apr 2026 09:41:36 -0500 Subject: [PATCH] virtio/gpu: mmap dma-buf blob fds into the guest shm region resource_map_blob() in the virgl_resource_map2/Linux variant only mmaps SHM (memfd) blob fds into the guest's virtio shm region directly. DMABUF blob fds are delegated to Rutabaga::resource_map -> virgl_renderer_resource_map2, which is designed for virglrenderer-allocated GPU memory and has no machinery to mmap an external dma-buf fd into a host VA range. For dma-bufs that are not virglrenderer-managed (V4L2 capture buffers exported via VIDIOC_EXPBUF, dma-bufs forwarded to the guest by a host PipeWire daemon, etc.) it silently no-ops and the guest's blob is left backed by zero pages. The user-visible symptom is camera capture across the muvm PipeWire bridge delivering all-zero frames even though every layer reports success: SCM_RIGHTS forwards the dma-buf fd, libkrun classifies it correctly as DMABUF, the guest issues DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB which succeeds, and multifilesink writes plausible-sized files that turn out to be 0x00 bytes from offset 0 to EOF. A V4L2 EXPBUF dma-buf wraps regular kernel pages allocated via VIDIOC_REQBUFS/V4L2_MEMORY_MMAP and is CPU-mappable. It can be mmap'd into the guest's pre-reserved shm region with MAP_FIXED exactly the same way SHM (memfd) blobs are. Extending the existing SHM branch to also accept DMABUF gives the guest live pages and fixes camera capture without changes to the bridge, PipeWire, or the v4l2 userspace stack. If the dma-buf is GPU-only and not CPU-mappable, mmap returns MAP_FAILED and we surface ErrUnspec, no worse than the prior silent no-op. The handle_type is now included in the error log to help diagnose mismatches. The fd lifetime is fine: Rutabaga::export_blob returns a try_clone() of the descriptor, and the mapping persists past the temporary's drop because the original fd is retained in the resource's persistent handle slot. Signed-off-by: Adam Ford --- src/devices/src/virtio/gpu/virtio_gpu.rs | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/src/devices/src/virtio/gpu/virtio_gpu.rs b/src/devices/src/virtio/gpu/virtio_gpu.rs index ebca60486..43b4ef7ec 100644 --- a/src/devices/src/virtio/gpu/virtio_gpu.rs +++ b/src/devices/src/virtio/gpu/virtio_gpu.rs @@ -20,6 +20,8 @@ use krun_display::{ use libc::c_void; #[cfg(target_os = "macos")] use rutabaga_gfx::RUTABAGA_MEM_HANDLE_TYPE_APPLE; +#[cfg(all(feature = "virgl_resource_map2", target_os = "linux"))] +use rutabaga_gfx::RUTABAGA_MEM_HANDLE_TYPE_DMABUF; #[cfg(all(not(feature = "virgl_resource_map2"), target_os = "linux"))] use rutabaga_gfx::RUTABAGA_MEM_HANDLE_TYPE_OPAQUE_FD; #[cfg(all(feature = "virgl_resource_map2", target_os = "linux"))] @@ -837,7 +839,20 @@ impl VirtioGpu { let addr = shm_region.host_addr + offset; if let Ok(export) = self.rutabaga.export_blob(resource_id) { - if export.handle_type == RUTABAGA_MEM_HANDLE_TYPE_SHM { + // SHM and DMABUF are both regular host fds whose pages can be exposed + // to the guest by mmap'ing them directly into the virtio shm region. + // For SHM (memfd) this has always worked. For DMABUF it had been + // delegated to virgl_renderer_resource_map2, which only handles + // virglrenderer-allocated GPU memory and silently no-ops for external + // dma-bufs — leaving the guest blob backed by zero pages. That broke + // muvm camera capture, where the v4l2 source exports kernel buffers + // via VIDIOC_EXPBUF as dma-bufs, the muvm bridge forwards the fd + // across SCM_RIGHTS, libkrun classifies it as DMABUF, and the guest's + // CREATE_BLOB allocates a host-backed-by-nothing blob. Mapping the + // dma-buf fd directly here gives the guest real, live pages. + if export.handle_type == RUTABAGA_MEM_HANDLE_TYPE_SHM + || export.handle_type == RUTABAGA_MEM_HANDLE_TYPE_DMABUF + { let ret = unsafe { libc::mmap( addr as *mut libc::c_void, @@ -849,7 +864,10 @@ impl VirtioGpu { ) }; if ret == libc::MAP_FAILED { - error!("failed to mmap resource in shm region"); + error!( + "failed to mmap resource in shm region (handle_type={:#x})", + export.handle_type + ); return Err(ErrUnspec); } } else {