tessera_ui/renderer/compute/
pipeline.rs

1//! GPU compute pipeline system for Tessera UI framework.
2//!
3//! This module provides the infrastructure for GPU compute operations in Tessera,
4//! enabling advanced visual effects and post-processing operations that would be
5//! inefficient or impossible to achieve with traditional CPU-based approaches.
6//!
7//! # Architecture Overview
8//!
9//! The compute pipeline system is designed to work seamlessly with the rendering
10//! pipeline, using a ping-pong buffer approach for efficient multi-pass operations.
11//! Each compute pipeline processes a specific type of compute command and operates
12//! on texture data using GPU compute shaders.
13//!
14//! ## Key Components
15//!
16//! - [`ComputablePipeline<C>`]: The main trait for implementing custom compute pipelines
17//! - [`ComputePipelineRegistry`]: Manages and dispatches commands to registered compute pipelines
18//! - [`ComputeResourceManager`]: Manages GPU buffers and resources for compute operations
19//!
20//! # Design Philosophy
21//!
22//! The compute pipeline system embraces WGPU's compute shader capabilities to enable:
23//!
24//! - **Advanced Post-Processing**: Blur, contrast adjustment, color grading, and other image effects
25//! - **Parallel Processing**: Leverage GPU parallelism for computationally intensive operations
26//! - **Real-Time Effects**: Achieve complex visual effects at interactive frame rates
27//! - **Memory Efficiency**: Use GPU memory directly without CPU roundtrips
28//!
29//! # Ping-Pong Rendering
30//!
31//! The system uses a ping-pong approach where:
32//!
33//! 1. **Input Texture**: Contains the result from previous rendering or compute pass
34//! 2. **Output Texture**: Receives the processed result from the current compute operation
35//! 3. **Format Convention**: All textures use `wgpu::TextureFormat::Rgba8Unorm` for compatibility
36//!
37//! This approach enables efficient chaining of multiple compute operations without
38//! intermediate CPU involvement.
39//!
40//! # Implementation Guide
41//!
42//! ## Creating a Custom Compute Pipeline
43//!
44//! To create a custom compute pipeline:
45//!
46//! 1. Define your compute command struct implementing [`ComputeCommand`]
47//! 2. Create a pipeline struct implementing [`ComputablePipeline<YourCommand>`]
48//! 3. Write a compute shader in WGSL
49//! 4. Register the pipeline with [`ComputePipelineRegistry::register`]
50//!
51//! ## Example: Simple Brightness Adjustment Pipeline
52//!
53//! ```rust,ignore
54//! use tessera_ui::{ComputeCommand, ComputablePipeline, compute::resource::ComputeResourceManager};
55//! use wgpu;
56//!
57//! // 1. Define the compute command
58//! #[derive(Debug)]
59//! struct BrightnessCommand {
60//!     brightness: f32,
61//! }
62//!
63//! impl ComputeCommand for BrightnessCommand {}
64//!
65//! // 2. Implement the pipeline
66//! struct BrightnessPipeline {
67//!     compute_pipeline: wgpu::ComputePipeline,
68//!     bind_group_layout: wgpu::BindGroupLayout,
69//! }
70//!
71//! impl BrightnessPipeline {
72//!     fn new(device: &wgpu::Device) -> Self {
73//!         // Create compute shader and pipeline
74//!         let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
75//!             label: Some("Brightness Shader"),
76//!             source: wgpu::ShaderSource::Wgsl(include_str!("brightness.wgsl").into()),
77//!         });
78//!         
79//!         // ... setup bind group layout and pipeline ...
80//!         # unimplemented!()
81//!     }
82//! }
83//!
84//! impl ComputablePipeline<BrightnessCommand> for BrightnessPipeline {
85//!     fn dispatch(
86//!         &mut self,
87//!         device: &wgpu::Device,
88//!         queue: &wgpu::Queue,
89//!         config: &wgpu::SurfaceConfiguration,
90//!         compute_pass: &mut wgpu::ComputePass<'_>,
91//!         command: &BrightnessCommand,
92//!         resource_manager: &mut ComputeResourceManager,
93//!         input_view: &wgpu::TextureView,
94//!         output_view: &wgpu::TextureView,
95//!     ) {
96//!         // Create uniforms buffer with brightness value
97//!         let uniforms = [command.brightness];
98//!         let uniform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
99//!             label: Some("Brightness Uniforms"),
100//!             contents: bytemuck::cast_slice(&uniforms),
101//!             usage: wgpu::BufferUsages::UNIFORM,
102//!         });
103//!         
104//!         // Create bind group with input/output textures and uniforms
105//!         let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
106//!             layout: &self.bind_group_layout,
107//!             entries: &[
108//!                 wgpu::BindGroupEntry { binding: 0, resource: uniform_buffer.as_entire_binding() },
109//!                 wgpu::BindGroupEntry { binding: 1, resource: wgpu::BindingResource::TextureView(input_view) },
110//!                 wgpu::BindGroupEntry { binding: 2, resource: wgpu::BindingResource::TextureView(output_view) },
111//!             ],
112//!             label: Some("brightness_bind_group"),
113//!         });
114//!         
115//!         // Dispatch compute shader
116//!         compute_pass.set_pipeline(&self.compute_pipeline);
117//!         compute_pass.set_bind_group(0, &bind_group, &[]);
118//!         compute_pass.dispatch_workgroups(
119//!             (config.width + 7) / 8,
120//!             (config.height + 7) / 8,
121//!             1
122//!         );
123//!     }
124//! }
125//!
126//! // 3. Register the pipeline
127//! let mut registry = ComputePipelineRegistry::new();
128//! let brightness_pipeline = BrightnessPipeline::new(&device);
129//! registry.register(brightness_pipeline);
130//! ```
131//!
132//! ## Example WGSL Compute Shader
133//!
134//! ```wgsl
135//! @group(0) @binding(0) var<uniform> brightness: f32;
136//! @group(0) @binding(1) var input_texture: texture_2d<f32>;
137//! @group(0) @binding(2) var output_texture: texture_storage_2d<rgba8unorm, write>;
138//!
139//! @compute @workgroup_size(8, 8)
140//! fn main(@builtin(global_invocation_id) global_id: vec3<u32>) {
141//!     let coords = vec2<i32>(global_id.xy);
142//!     let input_color = textureLoad(input_texture, coords, 0);
143//!     let output_color = vec4<f32>(input_color.rgb * brightness, input_color.a);
144//!     textureStore(output_texture, coords, output_color);
145//! }
146//! ```
147//!
148//! # Integration with Basic Components
149//!
150//! The `tessera_basic_components` crate provides several compute pipeline implementations:
151//!
152//! - **BlurPipeline**: Gaussian blur effects for backgrounds and UI elements
153//! - **MeanPipeline**: Average color calculation for adaptive UI themes
154//! - **ContrastPipeline**: Contrast and saturation adjustments
155//!
156//! These pipelines demonstrate real-world usage patterns and can serve as references
157//! for implementing custom compute operations.
158//!
159//! # Performance Considerations
160//!
161//! - **Workgroup Size**: Choose workgroup sizes that align with GPU architecture (typically 8x8 or 16x16)
162//! - **Memory Access**: Optimize memory access patterns in shaders for better cache utilization
163//! - **Resource Reuse**: Use the [`ComputeResourceManager`] to reuse buffers across frames
164//! - **Batch Operations**: Combine multiple similar operations when possible
165//!
166//! # Texture Format Requirements
167//!
168//! Due to WGPU limitations, compute shaders require specific texture formats:
169//!
170//! - **Input Textures**: Can be any readable format, typically from render passes
171//! - **Output Textures**: Must use `wgpu::TextureFormat::Rgba8Unorm` for storage binding
172//! - **sRGB Limitation**: sRGB formats cannot be used as storage textures
173//!
174//! The framework automatically handles format conversions when necessary.
175
176use crate::compute::resource::ComputeResourceManager;
177
178use super::command::ComputeCommand;
179
180/// Core trait for implementing GPU compute pipelines.
181///
182/// This trait defines the interface for compute pipelines that process specific types
183/// of compute commands using GPU compute shaders. Each pipeline is responsible for
184/// setting up compute resources, managing shader dispatch, and processing texture data.
185///
186/// # Type Parameters
187///
188/// * `C` - The specific [`ComputeCommand`] type this pipeline can handle
189///
190/// # Design Principles
191///
192/// - **Single Responsibility**: Each pipeline handles one specific type of compute operation
193/// - **Stateless Operation**: Pipelines should not maintain state between dispatch calls
194/// - **Resource Efficiency**: Reuse GPU resources when possible through the resource manager
195/// - **Thread Safety**: All implementations must be `Send + Sync` for parallel execution
196///
197/// # Integration with Rendering
198///
199/// Compute pipelines operate within the broader rendering pipeline, typically:
200///
201/// 1. **After Rendering**: Process the rendered scene for post-effects
202/// 2. **Between Passes**: Transform data between different rendering stages
203/// 3. **Before Rendering**: Prepare data or textures for subsequent render operations
204///
205/// # Example Implementation Pattern
206///
207/// ```rust,ignore
208/// impl ComputablePipeline<MyCommand> for MyPipeline {
209///     fn dispatch(&mut self, device, queue, config, compute_pass, command,
210///                 resource_manager, input_view, output_view) {
211///         // 1. Create or retrieve uniform buffer
212///         let uniforms = create_uniforms_from_command(command);
213///         let uniform_buffer = device.create_buffer_init(...);
214///         
215///         // 2. Create bind group with textures and uniforms
216///         let bind_group = device.create_bind_group(...);
217///         
218///         // 3. Set pipeline and dispatch
219///         compute_pass.set_pipeline(&self.compute_pipeline);
220///         compute_pass.set_bind_group(0, &bind_group, &[]);
221///         compute_pass.dispatch_workgroups(workgroup_x, workgroup_y, 1);
222///     }
223/// }
224/// ```
225pub trait ComputablePipeline<C: ComputeCommand>: Send + Sync + 'static {
226    /// Dispatches the compute command within an active compute pass.
227    ///
228    /// This method is called once for each compute command that needs to be processed.
229    /// It should set up the necessary GPU resources, bind them to the compute pipeline,
230    /// and dispatch the appropriate number of workgroups to process the input texture.
231    ///
232    /// # Parameters
233    ///
234    /// * `device` - The WGPU device for creating GPU resources
235    /// * `queue` - The WGPU queue for submitting commands and updating buffers
236    /// * `config` - Current surface configuration containing dimensions and format info
237    /// * `compute_pass` - The active compute pass to record commands into
238    /// * `command` - The specific compute command containing operation parameters
239    /// * `resource_manager` - Manager for reusing GPU buffers across operations
240    /// * `input_view` - View of the input texture (result from previous pass)
241    /// * `output_view` - View of the output texture (target for this operation)
242    ///
243    /// # Texture Format Requirements
244    ///
245    /// Due to WGPU limitations, storage textures have specific format requirements:
246    ///
247    /// - **Input Texture**: Can be any readable format, typically from render passes
248    /// - **Output Texture**: Must use `wgpu::TextureFormat::Rgba8Unorm` format
249    /// - **sRGB Limitation**: sRGB formats cannot be used as storage textures
250    ///
251    /// The framework ensures that `output_view` always uses a compatible format
252    /// for storage binding operations.
253    ///
254    /// # Workgroup Dispatch Guidelines
255    ///
256    /// When dispatching workgroups, consider:
257    ///
258    /// - **Workgroup Size**: Match your shader's `@workgroup_size` declaration
259    /// - **Coverage**: Ensure all pixels are processed by calculating appropriate dispatch dimensions
260    /// - **Alignment**: Round up dispatch dimensions to cover the entire texture
261    ///
262    /// Common dispatch pattern:
263    /// ```rust,ignore
264    /// let workgroup_size = 8; // Match shader @workgroup_size(8, 8)
265    /// let dispatch_x = (config.width + workgroup_size - 1) / workgroup_size;
266    /// let dispatch_y = (config.height + workgroup_size - 1) / workgroup_size;
267    /// compute_pass.dispatch_workgroups(dispatch_x, dispatch_y, 1);
268    /// ```
269    ///
270    /// # Resource Management
271    ///
272    /// Use the `resource_manager` to:
273    /// - Store persistent buffers that can be reused across frames
274    /// - Avoid recreating expensive GPU resources
275    /// - Manage buffer lifetimes efficiently
276    ///
277    /// # Error Handling
278    ///
279    /// This method should handle errors gracefully:
280    /// - Validate command parameters before use
281    /// - Ensure texture dimensions are compatible
282    /// - Handle resource creation failures appropriately
283    fn dispatch(
284        &mut self,
285        device: &wgpu::Device,
286        queue: &wgpu::Queue,
287        config: &wgpu::SurfaceConfiguration,
288        compute_pass: &mut wgpu::ComputePass<'_>,
289        command: &C,
290        resource_manager: &mut ComputeResourceManager,
291        input_view: &wgpu::TextureView,
292        output_view: &wgpu::TextureView,
293    );
294}
295
296/// Internal trait for type erasure of computable pipelines.
297///
298/// This trait enables dynamic dispatch of compute commands to their corresponding pipelines
299/// without knowing the specific command type at compile time. It's used internally by
300/// the [`ComputePipelineRegistry`] and should not be implemented directly by users.
301///
302/// The type erasure is achieved through the [`AsAny`] trait, which allows downcasting
303/// from `&dyn ComputeCommand` to concrete command types.
304///
305/// # Implementation Note
306///
307/// This trait is automatically implemented for any type that implements
308/// [`ComputablePipeline<C>`] through the [`ComputablePipelineImpl`] wrapper.
309pub(crate) trait ErasedComputablePipeline: Send + Sync {
310    /// Dispatches a type-erased compute command.
311    fn dispatch_erased(
312        &mut self,
313        device: &wgpu::Device,
314        queue: &wgpu::Queue,
315        config: &wgpu::SurfaceConfiguration,
316        compute_pass: &mut wgpu::ComputePass<'_>,
317        command: &dyn ComputeCommand,
318        resource_manager: &mut ComputeResourceManager,
319        input_view: &wgpu::TextureView,
320        output_view: &wgpu::TextureView,
321    );
322}
323
324/// A wrapper to implement `ErasedComputablePipeline` for any `ComputablePipeline`.
325struct ComputablePipelineImpl<C: ComputeCommand, P: ComputablePipeline<C>> {
326    pipeline: P,
327    _command: std::marker::PhantomData<C>,
328}
329
330impl<C: ComputeCommand + 'static, P: ComputablePipeline<C>> ErasedComputablePipeline
331    for ComputablePipelineImpl<C, P>
332{
333    fn dispatch_erased(
334        &mut self,
335        device: &wgpu::Device,
336        queue: &wgpu::Queue,
337        config: &wgpu::SurfaceConfiguration,
338        compute_pass: &mut wgpu::ComputePass<'_>,
339        command: &dyn ComputeCommand,
340        resource_manager: &mut ComputeResourceManager,
341        input_view: &wgpu::TextureView,
342        output_view: &wgpu::TextureView,
343    ) {
344        if let Some(command) = command.as_any().downcast_ref::<C>() {
345            self.pipeline.dispatch(
346                device,
347                queue,
348                config,
349                compute_pass,
350                command,
351                resource_manager,
352                input_view,
353                output_view,
354            );
355        }
356    }
357}
358
359/// Registry for managing and dispatching compute pipelines.
360///
361/// The `ComputePipelineRegistry` serves as the central hub for all compute pipelines
362/// in the Tessera framework. It maintains a collection of registered pipelines and
363/// handles the dispatch of compute commands to their appropriate pipelines.
364///
365/// # Architecture
366///
367/// The registry uses type erasure to store pipelines of different types in a single
368/// collection. When a compute command needs to be processed, the registry attempts
369/// to dispatch it to all registered pipelines until one handles it successfully.
370///
371/// # Usage Pattern
372///
373/// 1. Create a new registry
374/// 2. Register all required compute pipelines during application initialization
375/// 3. The renderer uses the registry to dispatch commands during frame rendering
376///
377/// # Example
378///
379/// ```rust,ignore
380/// use tessera_ui::renderer::compute::ComputePipelineRegistry;
381///
382/// // Create registry and register pipelines
383/// let mut registry = ComputePipelineRegistry::new();
384/// registry.register(blur_pipeline);
385/// registry.register(contrast_pipeline);
386/// registry.register(brightness_pipeline);
387///
388/// // Registry is now ready for use by the renderer
389/// ```
390///
391/// # Performance Considerations
392///
393/// - All registered pipelines are called for each command until one handles it
394/// - Register more commonly used pipelines first for better average performance
395/// - Consider the frequency of different compute operations when ordering registrations
396///
397/// # Thread Safety
398///
399/// The registry and all registered pipelines must be `Send + Sync` to support
400/// parallel execution in the rendering system.
401#[derive(Default)]
402pub struct ComputePipelineRegistry {
403    pipelines: Vec<Box<dyn ErasedComputablePipeline>>,
404}
405
406impl ComputePipelineRegistry {
407    /// Creates a new empty compute pipeline registry.
408    ///
409    /// # Example
410    ///
411    /// ```
412    /// use tessera_ui::renderer::compute::ComputePipelineRegistry;
413    ///
414    /// let registry = ComputePipelineRegistry::new();
415    /// ```
416    pub fn new() -> Self {
417        Self::default()
418    }
419
420    /// Registers a new compute pipeline for a specific command type.
421    ///
422    /// This method takes ownership of the pipeline and wraps it in a type-erased
423    /// container that can be stored alongside other pipelines of different types.
424    ///
425    /// # Type Parameters
426    ///
427    /// * `C` - The [`ComputeCommand`] type this pipeline handles
428    ///
429    /// # Parameters
430    ///
431    /// * `pipeline` - The pipeline instance to register
432    ///
433    /// # Example
434    ///
435    /// ```rust,ignore
436    /// use tessera_ui::renderer::compute::ComputePipelineRegistry;
437    ///
438    /// let mut registry = ComputePipelineRegistry::new();
439    ///
440    /// // Register custom compute pipelines
441    /// let blur_pipeline = BlurPipeline::new(&device);
442    /// registry.register(blur_pipeline);
443    ///
444    /// let contrast_pipeline = ContrastPipeline::new(&device);
445    /// registry.register(contrast_pipeline);
446    ///
447    /// // Register multiple pipelines for different effects
448    /// registry.register(BrightnessAdjustmentPipeline::new(&device));
449    /// registry.register(ColorGradingPipeline::new(&device));
450    /// ```
451    ///
452    /// # Registration Order
453    ///
454    /// Unlike drawable pipelines, compute pipelines are dispatched to all registered
455    /// pipelines until one handles the command. Consider registering more frequently
456    /// used pipelines first for better performance.
457    ///
458    /// # Thread Safety
459    ///
460    /// The pipeline must implement `Send + Sync` to be compatible with Tessera's
461    /// parallel rendering architecture.
462    pub fn register<C: ComputeCommand + 'static>(
463        &mut self,
464        pipeline: impl ComputablePipeline<C> + 'static,
465    ) {
466        let erased_pipeline = Box::new(ComputablePipelineImpl {
467            pipeline,
468            _command: std::marker::PhantomData,
469        });
470        self.pipelines.push(erased_pipeline);
471    }
472
473    /// Dispatches a command to its corresponding registered pipeline.
474    pub(crate) fn dispatch_erased(
475        &mut self,
476        device: &wgpu::Device,
477        queue: &wgpu::Queue,
478        config: &wgpu::SurfaceConfiguration,
479        compute_pass: &mut wgpu::ComputePass<'_>,
480        command: &dyn ComputeCommand,
481        resource_manager: &mut ComputeResourceManager,
482        input_view: &wgpu::TextureView,
483        output_view: &wgpu::TextureView,
484    ) {
485        for pipeline in self.pipelines.iter_mut() {
486            pipeline.dispatch_erased(
487                device,
488                queue,
489                config,
490                compute_pass,
491                command,
492                resource_manager,
493                input_view,
494                output_view,
495            );
496        }
497    }
498}