diff --git a/Cargo.toml b/Cargo.toml index e97be2d462eaa4..371adf5927eb86 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,6 +85,8 @@ bevy_gltf = ["bevy_internal/bevy_gltf", "bevy_asset", "bevy_scene", "bevy_pbr"] # Adds PBR rendering bevy_pbr = ["bevy_internal/bevy_pbr", "bevy_asset", "bevy_render", "bevy_core_pipeline"] +gpu_picking = [] + # Provides rendering functionality bevy_render = ["bevy_internal/bevy_render"] diff --git a/crates/bevy_core_pipeline/src/core_3d/mod.rs b/crates/bevy_core_pipeline/src/core_3d/mod.rs index a142e4ef5c90c1..b876f08cbf2a40 100644 --- a/crates/bevy_core_pipeline/src/core_3d/mod.rs +++ b/crates/bevy_core_pipeline/src/core_3d/mod.rs @@ -86,6 +86,7 @@ impl Plugin for Core3dPlugin { prepare_core_3d_depth_textures .in_set(RenderSet::Prepare) .after(bevy_render::view::prepare_windows), + // #[cfg(feature = "gpu_picking")] prepare_entity_textures .in_set(RenderSet::Prepare) .after(bevy_render::view::prepare_windows), diff --git a/crates/bevy_core_pipeline/src/entity_index_buffer_copy/mod.rs b/crates/bevy_core_pipeline/src/entity_index_buffer_copy/mod.rs index 47a2f0f3856aa3..ff985a93d26aa4 100644 --- a/crates/bevy_core_pipeline/src/entity_index_buffer_copy/mod.rs +++ b/crates/bevy_core_pipeline/src/entity_index_buffer_copy/mod.rs @@ -25,11 +25,9 @@ impl ViewNode for EntityIndexBufferCopyNode { return Ok(()); }; - // copy entity index texture buffers.copy_texture_to_buffer( render_context.command_encoder(), &entity_index_textures.main.texture, - &buffers.entity_buffer, ); Ok(()) diff --git a/crates/bevy_gizmos/src/lib.rs b/crates/bevy_gizmos/src/lib.rs index 3802be98a7ff17..6c98ebdc663b91 100644 --- a/crates/bevy_gizmos/src/lib.rs +++ b/crates/bevy_gizmos/src/lib.rs @@ -37,7 +37,6 @@ use bevy_reflect::{ use bevy_render::{ color::Color, mesh::Mesh, - picking, primitives::Aabb, render_phase::AddRenderCommand, render_resource::{PrimitiveTopology, Shader, SpecializedMeshPipelines}, @@ -45,6 +44,9 @@ use bevy_render::{ }; use bevy_transform::components::{GlobalTransform, Transform}; +// #[cfg(feature = "gpu_picking")] +use bevy_render::picking; + #[cfg(feature = "bevy_pbr")] use bevy_pbr::MeshUniform; #[cfg(feature = "bevy_sprite")] @@ -326,6 +328,7 @@ fn extract_gizmo_data( transform, previous_transform: transform, inverse_transpose_model, + // #[cfg(feature = "gpu_picking")] entity: picking::entity_as_uvec2(Entity::PLACEHOLDER), }, ), diff --git a/crates/bevy_pbr/Cargo.toml b/crates/bevy_pbr/Cargo.toml index 7230c590278161..f87749b5b9d588 100644 --- a/crates/bevy_pbr/Cargo.toml +++ b/crates/bevy_pbr/Cargo.toml @@ -10,6 +10,7 @@ keywords = ["bevy"] [features] webgl = [] +gpu_picking = [] [dependencies] # bevy diff --git a/crates/bevy_pbr/src/render/mesh.rs b/crates/bevy_pbr/src/render/mesh.rs index 3c5718eb842de1..fe46c5c67fe573 100644 --- a/crates/bevy_pbr/src/render/mesh.rs +++ b/crates/bevy_pbr/src/render/mesh.rs @@ -132,7 +132,7 @@ pub struct MeshUniform { pub transform: Mat4, pub previous_transform: Mat4, pub inverse_transpose_model: Mat4, - // TODO should probably be a separate uniform in case gpu picking is disabled + // #[cfg(feature = "gpu_picking")] pub entity: UVec2, pub flags: u32, } @@ -187,6 +187,7 @@ pub fn extract_meshes( flags: flags.bits(), transform, previous_transform, + // #[cfg(feature = "gpu_picking")] entity: picking::entity_as_uvec2(entity), inverse_transpose_model: transform.inverse().transpose(), }; diff --git a/crates/bevy_render/src/lib.rs b/crates/bevy_render/src/lib.rs index 5cc3be5f32cfbd..5f235460bf206a 100644 --- a/crates/bevy_render/src/lib.rs +++ b/crates/bevy_render/src/lib.rs @@ -45,7 +45,6 @@ pub mod prelude { use bevy_window::{PrimaryWindow, RawHandleWrapper}; use globals::GlobalsPlugin; pub use once_cell; -use picking::GpuPickingPlugin; use renderer::{RenderAdapter, RenderAdapterInfo, RenderDevice, RenderQueue}; use wgpu::Instance; @@ -336,8 +335,7 @@ impl Plugin for RenderPlugin { .add_plugin(CameraPlugin) .add_plugin(ViewPlugin) .add_plugin(MeshPlugin) - .add_plugin(GlobalsPlugin) - .add_plugin(GpuPickingPlugin); + .add_plugin(GlobalsPlugin); app.register_type::() .register_type::() diff --git a/crates/bevy_render/src/picking.rs b/crates/bevy_render/src/picking.rs index 3e8e5072b4da8b..edcdaa9eddd097 100644 --- a/crates/bevy_render/src/picking.rs +++ b/crates/bevy_render/src/picking.rs @@ -1,32 +1,51 @@ use crate::{ camera::ExtractedCamera, extract_component::ExtractComponentPlugin, - render_resource::{Buffer, BufferSlice, Texture}, + render_resource::{Buffer, Texture}, renderer::RenderDevice, texture::{CachedTexture, TextureFormatPixelInfo}, Render, RenderApp, RenderSet, }; -use async_channel::{Receiver, Sender, TrySendError}; +use async_channel::{Receiver, Sender}; use bevy_app::{Plugin, Update}; use bevy_ecs::prelude::*; use bevy_math::UVec2; use bevy_render_macros::ExtractComponent; use bevy_tasks::AsyncComputeTaskPool; -use bevy_utils::{default, HashMap}; +use bevy_utils::default; use wgpu::{ - BufferDescriptor, BufferUsages, Color, CommandEncoder, Extent3d, ImageDataLayout, Maintain, - MapMode, Operations, RenderPassColorAttachment, TextureFormat, + BufferDescriptor, BufferUsages, Color, CommandEncoder, Extent3d, ImageDataLayout, MapMode, + Operations, RenderPassColorAttachment, TextureFormat, }; pub const ENTITY_TEXTURE_FORMAT: TextureFormat = TextureFormat::Rg32Uint; +/// This plugin enables the gpu picking feature of bevy. +/// +/// Gpu picking let's you know which entity is currently under the mouse. +/// +/// # How this works: +/// +/// - For each entity being rendered, it will output it's entity id to a texture. +/// - Once everything is rendered it will copy that texture to the cpu and send it to the main world +/// - From the main world you can give it a position like the current mouse position and +/// know exactly which entity was rendered at that specific screen location. +/// - This works at the [`Camera`] level, so it will work with multiple windows or split screen +/// +/// # Api Overview: +/// +/// You need to add the [`GpuPickingCamera`] to any `Camera` that will be used for picking. +/// Then add the [`GpuPickingMesh`] comnponent to any `Mesh` that will need to be picked. +/// +/// Once those components are added, you can query for [`GpuPickingCamera`] +/// and use `GpuPickingCamera::get_entity(position)` to know which entity is at the given position on screen pub struct GpuPickingPlugin; impl Plugin for GpuPickingPlugin { fn build(&self, app: &mut bevy_app::App) { app.add_plugin(ExtractComponentPlugin::::default()) .add_plugin(ExtractComponentPlugin::::default()) - .add_systems(Update, update_entity_buffer); + .add_systems(Update, receive_buffer); let Ok(render_app) = app.get_sub_app_mut(RenderApp) else { return; }; @@ -34,99 +53,54 @@ impl Plugin for GpuPickingPlugin { Render, ( prepare_gpu_picking_buffers.in_set(RenderSet::Prepare), - send_buffer_to_main_world.in_set(RenderSet::RenderFlush), + map_and_send_buffer_async.in_set(RenderSet::RenderFlush), ), ); } } -/// Copies the entity buffer to the cpu and sends it to the main world -fn send_buffer_to_main_world( - query: Query<&ExtractedGpuPickingCamera, With>, - render_device: Res, -) { +/// Maps the entity buffer and sends it to the main world asynchronously +fn map_and_send_buffer_async(query: Query<&ExtractedGpuPickingCamera, With>) { for gpu_picking_camera in &query { let Some(buffers) = gpu_picking_camera.buffers.as_ref() else { return; }; + let buffers = buffers.clone(); + let sender = gpu_picking_camera.sender.clone(); - // Send the data to the main world - fn send( - sender: Sender, - padded_bytes_per_row: usize, - entity_buffer: &Buffer, - buffer_slice: BufferSlice, - ) { - let buffer_view = buffer_slice.get_mapped_range(); - // immediately copy the data to CPU to avoid holding the mapped view for long - let entity_data = Vec::from(&*buffer_view); - drop(buffer_view); - entity_buffer.unmap(); + // Mapping the buffer is an asynchronous process. + // This means we need to wait until the buffer is mapped before sending it to the main world + let task = async move { + let (tx, rx) = async_channel::bounded(1); - if let Err(err) = sender.try_send(GpuPickingData { - padded_bytes_per_row, - entity_data, - }) { - match err { - TrySendError::Full(_) => bevy_log::error!("GPU Picking channel is full"), - TrySendError::Closed(_) => { - bevy_log::error!("GPU Picking channel is closed"); - } - } - } - - // This can only fail if the sender is full or closed - // and we can't do anything if either of those things happen - // let _ = sender.try_send(GpuPickingData { - // padded_bytes_per_row, - // entity_data, - // }); - } - - if true { - // sync + // map entity buffer let buffer_slice = buffers.entity_buffer.slice(..); buffer_slice.map_async(MapMode::Read, move |result| match result { - Ok(_) => {} + Ok(_) => tx.try_send(()).unwrap(), Err(err) => bevy_log::error!("Failed to map entity buffer: {err}"), }); - // WARN This is blocking - render_device.poll(Maintain::Wait); + // Buffer is mapped and ready to be sent + rx.recv().await.unwrap(); // Send the buffer to the main world - send( - gpu_picking_camera.sender.clone(), - buffers.buffer_dimensions.padded_bytes_per_row, - &buffers.entity_buffer, - buffer_slice, - ); - } else { - // async - let entity_buffer = buffers.entity_buffer.clone(); - let sender = gpu_picking_camera.sender.clone(); - let padded_bytes_per_row = buffers.buffer_dimensions.padded_bytes_per_row; - - // Mapping the buffer is an asynchronous process. - // This means we need to wait until the buffer is mapped before sending it to the main world - let task = async move { - let (tx, rx) = async_channel::bounded(1); - - // map entity buffer - let buffer_slice = entity_buffer.slice(..); - buffer_slice.map_async(MapMode::Read, move |result| match result { - Ok(_) => tx.try_send(()).unwrap(), - Err(err) => bevy_log::error!("Failed to map entity buffer: {err}"), - }); - - // Buffer is mapped and ready to be sent - rx.recv().await.unwrap(); - - // Send the buffer to the main world - send(sender, padded_bytes_per_row, &entity_buffer, buffer_slice); - }; - AsyncComputeTaskPool::get().spawn(task).detach(); - } + let entity_buffer = &buffers.entity_buffer; + let buffer_view = buffer_slice.get_mapped_range(); + // immediately copy the data to CPU to avoid holding the mapped view for long + let entity_data = Vec::from(&*buffer_view); + drop(buffer_view); + entity_buffer.unmap(); + + // Because the channel is bounded to 1 entry, it's pretty common to have the channel full. + // This isn't ideal but not blocking makes it faster which is preferred. + // + // The other possible error is for the channel to be closed and in that case we can't do anything + let _ = sender.try_send(GpuPickingData { + padded_bytes_per_row: buffers.buffer_dimensions.padded_bytes_per_row, + entity_data, + }); + }; + AsyncComputeTaskPool::get().spawn(task).detach(); } } @@ -166,7 +140,7 @@ impl Default for GpuPickingCamera { impl GpuPickingCamera { pub fn new() -> Self { Self { - channel: async_channel::unbounded(), + channel: async_channel::bounded(1), data: GpuPickingData::default(), } } @@ -218,24 +192,21 @@ impl crate::extract_component::ExtractComponent for GpuPickingCamera { } /// Contains the buffer and it's dimension required for gpu picking +#[derive(Clone)] pub struct GpuPickingCameraBuffers { pub entity_buffer: Buffer, buffer_dimensions: BufferDimensions, } impl GpuPickingCameraBuffers { - pub fn copy_texture_to_buffer( - &self, - encoder: &mut CommandEncoder, - texture: &Texture, - buffer: &Buffer, - ) { + /// Copies the given texture to the entity buffer + pub fn copy_texture_to_buffer(&self, encoder: &mut CommandEncoder, texture: &Texture) { // This can't be in the Node because it needs access to wgpu but // bevy_core_pipeline doesn't depend on wgpu encoder.copy_texture_to_buffer( texture.as_image_copy(), wgpu::ImageCopyBuffer { - buffer, + buffer: &self.entity_buffer, layout: ImageDataLayout { offset: 0, bytes_per_row: Some(self.buffer_dimensions.padded_bytes_per_row as u32), @@ -251,16 +222,11 @@ impl GpuPickingCameraBuffers { } } -fn update_entity_buffer(mut q: Query<&mut GpuPickingCamera>) { +fn receive_buffer(mut q: Query<&mut GpuPickingCamera>) { for mut cam in &mut q { let (_, receiver) = cam.channel.clone(); - loop { - let Ok(buffer) = receiver.try_recv() else { break; }; - if receiver.is_empty() { - cam.data = buffer; - break; - } - } + let Ok(data) = receiver.try_recv() else { continue; }; + cam.data = data; } } @@ -316,39 +282,25 @@ impl EntityTextures { fn prepare_gpu_picking_buffers( render_device: Res, mut cameras: Query< - (Entity, &ExtractedCamera, &mut ExtractedGpuPickingCamera), + (&ExtractedCamera, &mut ExtractedGpuPickingCamera), Changed, >, - mut buffer_cache: Local>, ) { - for (entity, camera, mut gpu_picking_camera) in &mut cameras { + for (camera, mut gpu_picking_camera) in &mut cameras { let Some(size) = camera.physical_target_size else { continue; }; - // TODO create 2 buffers and altenate between each buffer each frame - - // Only create a buffer if it doesn't already exist or the size is different - let mut create_buffer = true; - if let Some((buffer_dimensions, _)) = buffer_cache.get(&entity) { - create_buffer = buffer_dimensions.width != size.x as usize - || buffer_dimensions.height != size.y as usize; - } - - if create_buffer { - let buffer_dimensions = - BufferDimensions::new(size.x as usize, size.y as usize, ENTITY_TEXTURE_FORMAT); - let entity_buffer = render_device.create_buffer(&BufferDescriptor { - label: Some("Entity Buffer"), - size: (buffer_dimensions.padded_bytes_per_row * buffer_dimensions.height) as u64, - usage: BufferUsages::COPY_DST | BufferUsages::MAP_READ, - mapped_at_creation: false, - }); - buffer_cache.insert(entity, (buffer_dimensions, entity_buffer)); - } + let buffer_dimensions = + BufferDimensions::new(size.x as usize, size.y as usize, ENTITY_TEXTURE_FORMAT); + let entity_buffer = render_device.create_buffer(&BufferDescriptor { + label: Some("Entity Buffer"), + size: (buffer_dimensions.padded_bytes_per_row * buffer_dimensions.height) as u64, + usage: BufferUsages::COPY_DST | BufferUsages::MAP_READ, + mapped_at_creation: false, + }); - let Some((buffer_dimensions, entity_buffer)) = buffer_cache.get(&entity) else { continue; }; gpu_picking_camera.buffers = Some(GpuPickingCameraBuffers { entity_buffer: entity_buffer.clone(), - buffer_dimensions: *buffer_dimensions, + buffer_dimensions, }); } } diff --git a/examples/input/gpu_picking.rs b/examples/input/gpu_picking.rs index 7b84c7a8a44cd8..4f7aa060d9fdb6 100644 --- a/examples/input/gpu_picking.rs +++ b/examples/input/gpu_picking.rs @@ -1,17 +1,20 @@ +//! This example shows how to use the gpu picking api. +//! +//! Gpu picking is a way to generate a texture of all the rendered entity and +//! use this texture to determine exactly which entity is under the mouse. + use bevy::prelude::*; use bevy_internal::{ reflect::{TypePath, TypeUuid}, render::{ - camera::RenderTarget, - picking::{GpuPickingCamera, GpuPickingMesh}, + picking::{GpuPickingCamera, GpuPickingMesh, GpuPickingPlugin}, render_resource::{AsBindGroup, ShaderRef}, }, - window::{PresentMode, PrimaryWindow, WindowRef}, + window::PresentMode, }; fn main() { App::new() - // .insert_resource(Msaa::Off) .add_plugins(DefaultPlugins.set(WindowPlugin { primary_window: Some(Window { present_mode: PresentMode::AutoNoVsync, @@ -20,6 +23,7 @@ fn main() { ..default() })) .add_plugin(MaterialPlugin::::default()) + .add_plugin(GpuPickingPlugin) .add_systems(Startup, setup) .add_systems(Update, (mouse_picking, save_buffer_to_file, move_cube)) .run(); @@ -122,32 +126,11 @@ fn setup( }, GpuPickingCamera::default(), )); - - // // Spawn a second window - // let second_window = commands - // .spawn(Window { - // title: "Second window".to_owned(), - // ..default() - // }) - // .id(); - - // // second window camera - // commands.spawn(( - // Camera3dBundle { - // transform: Transform::from_xyz(-2.0, 2.5, -5.0).looking_at(Vec3::ZERO, Vec3::Y), - // camera: Camera { - // target: RenderTarget::Window(WindowRef::Entity(second_window)), - // ..default() - // }, - // ..default() - // }, - // GpuPickingCamera::default(), - // )); } fn mouse_picking( mut cursor_moved: EventReader, - gpu_picking_cameras: Query<(&GpuPickingCamera, &Camera)>, + gpu_picking_cameras: Query<&GpuPickingCamera>, material_handle: Query<( Option<&Handle>, Option<&Handle>, @@ -155,8 +138,8 @@ fn mouse_picking( mut materials: ResMut>, mut custom_materials: ResMut>, mut hovered: Local>, - windows: Query, With>, ) { + // Sets the color of the given entity let mut set_color = |entity, color: Color| { let (std_handle, custom_handle) = material_handle.get(entity).expect("Entity should exist"); if let Some(material) = std_handle.and_then(|h| materials.get_mut(h)) { @@ -171,23 +154,9 @@ fn mouse_picking( let Some(moved_event) = cursor_moved.iter().last() else { return; }; let mouse_position = moved_event.position.as_uvec2(); - let window_entity = moved_event.window; - - let primary_window = windows.get(window_entity).unwrap(); - - for (gpu_picking_camera, camera) in &gpu_picking_cameras { - if let RenderTarget::Window(window_ref) = camera.target { - let is_same_window = match window_ref { - WindowRef::Primary => primary_window.is_some(), - WindowRef::Entity(window_ref_entity) => window_entity == window_ref_entity, - }; - if !is_same_window { - continue; - } - } - // This will read the entity index texture and get the entity - // index that is at the given position + for gpu_picking_camera in &gpu_picking_cameras { + // This will read the entity texture and get the entity that is at the given position if let Some(entity) = gpu_picking_camera.get_entity(mouse_position) { if let Some(hovered) = *hovered { if entity != hovered {