frame_builder.rs (56122B)
1 /* This Source Code Form is subject to the terms of the Mozilla Public 2 * License, v. 2.0. If a copy of the MPL was not distributed with this 3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 4 5 use api::{ColorF, DebugFlags, ExternalScrollId, FontRenderMode, ImageKey, MinimapData, PremultipliedColorF}; 6 use api::units::*; 7 use plane_split::BspSplitter; 8 use crate::batch::{BatchBuilder, AlphaBatchBuilder, AlphaBatchContainer}; 9 use crate::clip::{ClipStore, ClipTree}; 10 use crate::command_buffer::{PrimitiveCommand, CommandBufferList, CommandBufferIndex}; 11 use crate::{debug_colors, ChunkPool}; 12 use crate::spatial_node::SpatialNodeType; 13 use crate::spatial_tree::{SpatialTree, SpatialNodeIndex}; 14 use crate::composite::{CompositorKind, CompositeState, CompositeStatePreallocator}; 15 use crate::debug_item::DebugItem; 16 use crate::gpu_types::{ImageBrushPrimitiveData, PrimitiveHeaders, TransformPalette, ZBufferIdGenerator}; 17 use crate::gpu_types::{QuadSegment, TransformData}; 18 use crate::internal_types::{FastHashMap, PlaneSplitter, FrameStamp}; 19 use crate::invalidation::DirtyRegion; 20 use crate::tile_cache::{SliceId, TileCacheInstance}; 21 use crate::picture::{SurfaceInfo, SurfaceIndex, ResolvedSurfaceTexture}; 22 use crate::picture::{SubpixelMode, RasterConfig, PictureCompositeMode}; 23 use crate::prepare::prepare_picture; 24 use crate::prim_store::{PictureIndex, PrimitiveScratchBuffer}; 25 use crate::prim_store::{DeferredResolve, PrimitiveInstance}; 26 use crate::profiler::{self, TransactionProfile}; 27 use crate::render_backend::{DataStores, ScratchBuffer}; 28 use crate::renderer::{GpuBufferAddress, GpuBufferBuilder, GpuBufferBuilderF, GpuBufferBuilderI, GpuBufferF, GpuBufferI, GpuBufferDataF}; 29 use crate::render_target::{PictureCacheTarget, PictureCacheTargetKind}; 30 use crate::render_target::{RenderTargetContext, RenderTargetKind, RenderTarget}; 31 use crate::render_task_graph::{Pass, RenderTaskGraph, RenderTaskId, SubPassSurface}; 32 use crate::render_task_graph::{RenderPass, RenderTaskGraphBuilder}; 33 use crate::render_task::{RenderTaskKind, StaticRenderTaskSurface}; 34 use crate::resource_cache::ResourceCache; 35 use crate::scene::{BuiltScene, SceneProperties}; 36 use crate::space::SpaceMapper; 37 use crate::segment::SegmentBuilder; 38 use crate::surface::SurfaceBuilder; 39 use std::sync::Arc; 40 use std::{f32, mem}; 41 use crate::util::{MaxRect, VecHelper, Preallocator}; 42 use crate::visibility::{update_prim_visibility, FrameVisibilityState, FrameVisibilityContext}; 43 use crate::internal_types::{FrameVec, FrameMemory}; 44 45 #[derive(Clone, Copy, Debug)] 46 #[cfg_attr(feature = "capture", derive(Serialize))] 47 #[cfg_attr(feature = "replay", derive(Deserialize))] 48 pub struct FrameBuilderConfig { 49 pub default_font_render_mode: FontRenderMode, 50 pub dual_source_blending_is_supported: bool, 51 /// True if we're running tests (i.e. via wrench). 52 pub testing: bool, 53 pub gpu_supports_fast_clears: bool, 54 pub gpu_supports_advanced_blend: bool, 55 pub advanced_blend_is_coherent: bool, 56 pub gpu_supports_render_target_partial_update: bool, 57 /// Whether ImageBufferKind::TextureExternal images must first be copied 58 /// to a regular texture before rendering. 59 pub external_images_require_copy: bool, 60 pub batch_lookback_count: usize, 61 pub background_color: Option<ColorF>, 62 pub compositor_kind: CompositorKind, 63 pub tile_size_override: Option<DeviceIntSize>, 64 pub max_surface_override: Option<usize>, 65 pub max_depth_ids: i32, 66 pub max_target_size: i32, 67 pub force_invalidation: bool, 68 pub is_software: bool, 69 pub low_quality_pinch_zoom: bool, 70 pub max_shared_surface_size: i32, 71 pub enable_dithering: bool, 72 pub precise_linear_gradients: bool, 73 pub precise_radial_gradients: bool, 74 pub precise_conic_gradients: bool, 75 } 76 77 /// A set of default / global resources that are re-built each frame. 78 #[cfg_attr(feature = "capture", derive(Serialize))] 79 pub struct FrameGlobalResources { 80 /// The image shader block for the most common / default 81 /// set of image parameters (color white, stretch == rect.size). 82 pub default_image_data: GpuBufferAddress, 83 84 /// A GPU cache config for drawing cut-out rectangle primitives. 85 /// This is used to 'cut out' overlay tiles where a compositor 86 /// surface exists. 87 pub default_black_rect_address: GpuBufferAddress, 88 } 89 90 impl FrameGlobalResources { 91 pub fn new(gpu_buffers: &mut GpuBufferBuilder) -> Self { 92 let mut writer = gpu_buffers.f32.write_blocks(ImageBrushPrimitiveData::NUM_BLOCKS); 93 writer.push(&ImageBrushPrimitiveData { 94 color: PremultipliedColorF::WHITE, 95 background_color: PremultipliedColorF::WHITE, 96 // -ve means use prim rect for stretch size 97 stretch_size: LayoutSize::new(-1.0, 0.0), 98 }); 99 let default_image_data = writer.finish(); 100 101 let mut writer = gpu_buffers.f32.write_blocks(1); 102 writer.push_one(PremultipliedColorF::BLACK); 103 let default_black_rect_address = writer.finish(); 104 105 FrameGlobalResources { 106 default_image_data, 107 default_black_rect_address, 108 } 109 } 110 } 111 112 pub struct FrameScratchBuffer { 113 dirty_region_stack: Vec<DirtyRegion>, 114 surface_stack: Vec<(PictureIndex, SurfaceIndex)>, 115 } 116 117 impl Default for FrameScratchBuffer { 118 fn default() -> Self { 119 FrameScratchBuffer { 120 dirty_region_stack: Vec::new(), 121 surface_stack: Vec::new(), 122 } 123 } 124 } 125 126 impl FrameScratchBuffer { 127 pub fn begin_frame(&mut self) { 128 self.dirty_region_stack.clear(); 129 self.surface_stack.clear(); 130 } 131 } 132 133 /// Produces the frames that are sent to the renderer. 134 #[cfg_attr(feature = "capture", derive(Serialize))] 135 pub struct FrameBuilder { 136 #[cfg_attr(feature = "capture", serde(skip))] 137 prim_headers_prealloc: Preallocator, 138 #[cfg_attr(feature = "capture", serde(skip))] 139 composite_state_prealloc: CompositeStatePreallocator, 140 #[cfg_attr(feature = "capture", serde(skip))] 141 plane_splitters: Vec<PlaneSplitter>, 142 } 143 144 pub struct FrameBuildingContext<'a> { 145 pub global_device_pixel_scale: DevicePixelScale, 146 pub scene_properties: &'a SceneProperties, 147 pub global_screen_world_rect: WorldRect, 148 pub spatial_tree: &'a SpatialTree, 149 pub max_local_clip: LayoutRect, 150 pub debug_flags: DebugFlags, 151 pub fb_config: &'a FrameBuilderConfig, 152 pub root_spatial_node_index: SpatialNodeIndex, 153 } 154 155 pub struct FrameBuildingState<'a> { 156 pub rg_builder: &'a mut RenderTaskGraphBuilder, 157 pub clip_store: &'a mut ClipStore, 158 pub resource_cache: &'a mut ResourceCache, 159 pub transforms: &'a mut TransformPalette, 160 pub segment_builder: SegmentBuilder, 161 pub surfaces: &'a mut Vec<SurfaceInfo>, 162 pub dirty_region_stack: Vec<DirtyRegion>, 163 pub composite_state: &'a mut CompositeState, 164 pub num_visible_primitives: u32, 165 pub plane_splitters: &'a mut [PlaneSplitter], 166 pub surface_builder: SurfaceBuilder, 167 pub cmd_buffers: &'a mut CommandBufferList, 168 pub clip_tree: &'a ClipTree, 169 pub frame_gpu_data: &'a mut GpuBufferBuilder, 170 /// When using a render task to produce pixels that are associated with 171 /// an image key (for example snapshotted pictures), inserting the image 172 /// key / task id association in this hashmap allows the image item to 173 /// register a dependency to the render task. This ensures that the 174 /// render task is produced before the image that renders it if they 175 /// are happening in the same frame. 176 /// This mechanism relies on the item producing the render task to be 177 /// traversed before the image that displays it (in other words, the 178 /// picture must appear before the image in the display list). 179 pub image_dependencies: FastHashMap<ImageKey, RenderTaskId>, 180 pub visited_pictures: &'a mut [bool], 181 } 182 183 impl<'a> FrameBuildingState<'a> { 184 /// Retrieve the current dirty region during primitive traversal. 185 pub fn current_dirty_region(&self) -> &DirtyRegion { 186 self.dirty_region_stack.last().unwrap() 187 } 188 189 /// Push a new dirty region for child primitives to cull / clip against. 190 pub fn push_dirty_region(&mut self, region: DirtyRegion) { 191 self.dirty_region_stack.push(region); 192 } 193 194 /// Pop the top dirty region from the stack. 195 pub fn pop_dirty_region(&mut self) { 196 self.dirty_region_stack.pop().unwrap(); 197 } 198 199 /// Push a primitive command to a set of command buffers 200 pub fn push_prim( 201 &mut self, 202 cmd: &PrimitiveCommand, 203 spatial_node_index: SpatialNodeIndex, 204 targets: &[CommandBufferIndex], 205 ) { 206 for cmd_buffer_index in targets { 207 let cmd_buffer = self.cmd_buffers.get_mut(*cmd_buffer_index); 208 cmd_buffer.add_prim(cmd, spatial_node_index); 209 } 210 } 211 212 /// Push a command to a set of command buffers 213 pub fn push_cmd( 214 &mut self, 215 cmd: &PrimitiveCommand, 216 targets: &[CommandBufferIndex], 217 ) { 218 for cmd_buffer_index in targets { 219 let cmd_buffer = self.cmd_buffers.get_mut(*cmd_buffer_index); 220 cmd_buffer.add_cmd(cmd); 221 } 222 } 223 224 /// Set the active list of segments in a set of command buffers 225 pub fn set_segments( 226 &mut self, 227 segments: &[QuadSegment], 228 targets: &[CommandBufferIndex], 229 ) { 230 for cmd_buffer_index in targets { 231 let cmd_buffer = self.cmd_buffers.get_mut(*cmd_buffer_index); 232 cmd_buffer.set_segments(segments); 233 } 234 } 235 } 236 237 /// Immutable context of a picture when processing children. 238 #[derive(Debug)] 239 pub struct PictureContext { 240 pub pic_index: PictureIndex, 241 pub surface_spatial_node_index: SpatialNodeIndex, 242 pub raster_spatial_node_index: SpatialNodeIndex, 243 pub visibility_spatial_node_index: SpatialNodeIndex, 244 /// The surface that this picture will render on. 245 pub surface_index: SurfaceIndex, 246 pub dirty_region_count: usize, 247 pub subpixel_mode: SubpixelMode, 248 } 249 250 /// Mutable state of a picture that gets modified when 251 /// the children are processed. 252 pub struct PictureState { 253 pub map_local_to_pic: SpaceMapper<LayoutPixel, PicturePixel>, 254 pub map_pic_to_vis: SpaceMapper<PicturePixel, VisPixel>, 255 } 256 257 impl FrameBuilder { 258 pub fn new() -> Self { 259 FrameBuilder { 260 prim_headers_prealloc: Preallocator::new(0), 261 composite_state_prealloc: CompositeStatePreallocator::default(), 262 plane_splitters: Vec::new(), 263 } 264 } 265 266 /// Compute the contribution (bounding rectangles, and resources) of layers and their 267 /// primitives in screen space. 268 fn build_layer_screen_rects_and_cull_layers( 269 &mut self, 270 scene: &mut BuiltScene, 271 present: bool, 272 global_screen_world_rect: WorldRect, 273 resource_cache: &mut ResourceCache, 274 rg_builder: &mut RenderTaskGraphBuilder, 275 global_device_pixel_scale: DevicePixelScale, 276 scene_properties: &SceneProperties, 277 transform_palette: &mut TransformPalette, 278 data_stores: &mut DataStores, 279 scratch: &mut ScratchBuffer, 280 debug_flags: DebugFlags, 281 composite_state: &mut CompositeState, 282 tile_caches: &mut FastHashMap<SliceId, Box<TileCacheInstance>>, 283 spatial_tree: &SpatialTree, 284 cmd_buffers: &mut CommandBufferList, 285 frame_gpu_data: &mut GpuBufferBuilder, 286 frame_memory: &FrameMemory, 287 profile: &mut TransactionProfile, 288 ) { 289 profile_scope!("build_layer_screen_rects_and_cull_layers"); 290 291 let render_picture_cache_slices = present; 292 293 let root_spatial_node_index = spatial_tree.root_reference_frame_index(); 294 295 const MAX_CLIP_COORD: f32 = 1.0e9; 296 297 // Reset all plane splitters. These are retained from frame to frame to reduce 298 // per-frame allocations 299 self.plane_splitters.resize_with(scene.num_plane_splitters, BspSplitter::new); 300 for splitter in &mut self.plane_splitters { 301 splitter.reset(); 302 } 303 304 let frame_context = FrameBuildingContext { 305 global_device_pixel_scale, 306 scene_properties, 307 global_screen_world_rect, 308 spatial_tree, 309 max_local_clip: LayoutRect { 310 min: LayoutPoint::new(-MAX_CLIP_COORD, -MAX_CLIP_COORD), 311 max: LayoutPoint::new(MAX_CLIP_COORD, MAX_CLIP_COORD), 312 }, 313 debug_flags, 314 fb_config: &scene.config, 315 root_spatial_node_index, 316 }; 317 318 scene.picture_graph.build_update_passes( 319 &mut scene.prim_store.pictures, 320 &frame_context, 321 ); 322 323 scene.picture_graph.assign_surfaces( 324 &mut scene.prim_store.pictures, 325 &mut scene.surfaces, 326 tile_caches, 327 &frame_context, 328 ); 329 330 // Add a "fake" surface that we will use as parent for 331 // snapshotted pictures. 332 let root_spatial_node = frame_context.spatial_tree.root_reference_frame_index(); 333 let snapshot_surface = SurfaceIndex(scene.surfaces.len()); 334 scene.surfaces.push(SurfaceInfo::new( 335 root_spatial_node, 336 root_spatial_node, 337 WorldRect::max_rect(), 338 &frame_context.spatial_tree, 339 euclid::Scale::new(1.0), 340 (1.0, 1.0), 341 (1.0, 1.0), 342 false, 343 false, 344 )); 345 346 scene.picture_graph.propagate_bounding_rects( 347 &mut scene.prim_store.pictures, 348 &mut scene.surfaces, 349 &frame_context, 350 ); 351 352 // In order to handle picture snapshots consistently we need 353 // the visibility and prepare passes to visit them first before 354 // traversing the scene. This ensures that out-of-view snapshots 355 // are rendered and that snapshots are consistently produced 356 // relative to the root spatial node. 357 // However it means that the visibility and prepare passes may 358 // visit some pictures multiple times, so we keep track of visited 359 // pictures during each traversal to avoid that. 360 let n_pics = scene.prim_store.pictures.len(); 361 let mut visited_pictures = frame_memory.new_vec_with_capacity(n_pics); 362 for _ in 0..n_pics { 363 visited_pictures.push(false); 364 } 365 366 { 367 profile_scope!("UpdateVisibility"); 368 profile_marker!("UpdateVisibility"); 369 profile.start_time(profiler::FRAME_VISIBILITY_TIME); 370 371 let visibility_context = FrameVisibilityContext { 372 global_device_pixel_scale, 373 spatial_tree, 374 global_screen_world_rect, 375 debug_flags, 376 scene_properties, 377 config: scene.config, 378 root_spatial_node_index, 379 }; 380 381 for pic_index in scene.snapshot_pictures.iter() { 382 let mut visibility_state = FrameVisibilityState { 383 clip_store: &mut scene.clip_store, 384 resource_cache, 385 frame_gpu_data, 386 data_stores, 387 clip_tree: &mut scene.clip_tree, 388 composite_state, 389 rg_builder, 390 prim_instances: &mut scene.prim_instances, 391 surfaces: &mut scene.surfaces, 392 surface_stack: scratch.frame.surface_stack.take(), 393 profile, 394 scratch, 395 visited_pictures: &mut visited_pictures, 396 }; 397 398 let world_culling_rect = WorldRect::max_rect(); 399 400 // For now, snapshots are updated every frame. For the 401 // pictures displaying the snapshot via images pick up 402 // the changes, we have to make sure that the image's 403 // generation counter is incremented early in the frame, 404 // before the main visibility pass visits the image items. 405 let pic = &scene.prim_store.pictures[pic_index.0]; 406 let snapshot = pic.snapshot 407 .unwrap(); 408 let key = snapshot.key.as_image(); 409 visibility_state.resource_cache 410 .increment_image_generation(key); 411 412 if let Some(node) = pic.clip_root { 413 visibility_state.clip_tree.push_clip_root_node(node); 414 } 415 update_prim_visibility( 416 *pic_index, 417 None, 418 &world_culling_rect, 419 &scene.prim_store, 420 true, 421 &visibility_context, 422 &mut visibility_state, 423 &mut None, 424 ); 425 if scene.prim_store.pictures[pic_index.0].clip_root.is_some() { 426 visibility_state.clip_tree.pop_clip_root(); 427 } 428 } 429 430 for pic_index in scene.tile_cache_pictures.iter().rev() { 431 if !render_picture_cache_slices { 432 break; 433 } 434 let pic = &mut scene.prim_store.pictures[pic_index.0]; 435 436 match pic.raster_config { 437 Some(RasterConfig { surface_index, composite_mode: PictureCompositeMode::TileCache { slice_id }, .. }) => { 438 let tile_cache = tile_caches 439 .get_mut(&slice_id) 440 .expect("bug: non-existent tile cache"); 441 442 let mut visibility_state = FrameVisibilityState { 443 clip_store: &mut scene.clip_store, 444 resource_cache, 445 frame_gpu_data, 446 data_stores, 447 clip_tree: &mut scene.clip_tree, 448 composite_state, 449 rg_builder, 450 prim_instances: &mut scene.prim_instances, 451 surfaces: &mut scene.surfaces, 452 surface_stack: scratch.frame.surface_stack.take(), 453 profile, 454 scratch, 455 visited_pictures: &mut visited_pictures, 456 }; 457 458 // If we have a tile cache for this picture, see if any of the 459 // relative transforms have changed, which means we need to 460 // re-map the dependencies of any child primitives. 461 let world_culling_rect = tile_cache.pre_update( 462 surface_index, 463 &visibility_context, 464 &mut visibility_state, 465 ); 466 467 // Push a new surface, supplying the list of clips that should be 468 // ignored, since they are handled by clipping when drawing this surface. 469 visibility_state.push_surface( 470 *pic_index, 471 surface_index, 472 ); 473 visibility_state.clip_tree.push_clip_root_node(tile_cache.shared_clip_node_id); 474 475 update_prim_visibility( 476 *pic_index, 477 None, 478 &world_culling_rect, 479 &scene.prim_store, 480 true, 481 &visibility_context, 482 &mut visibility_state, 483 &mut Some(tile_cache), 484 ); 485 486 // Build the dirty region(s) for this tile cache. 487 tile_cache.post_update( 488 &visibility_context, 489 &mut visibility_state.composite_state, 490 &mut visibility_state.resource_cache, 491 ); 492 493 visibility_state.clip_tree.pop_clip_root(); 494 visibility_state.pop_surface(); 495 visibility_state.scratch.frame.surface_stack = visibility_state.surface_stack.take(); 496 } 497 _ => { 498 panic!("bug: not a tile cache"); 499 } 500 } 501 } 502 503 profile.end_time(profiler::FRAME_VISIBILITY_TIME); 504 } 505 506 profile.start_time(profiler::FRAME_PREPARE_TIME); 507 508 // Reset the visited pictures for the prepare pass. 509 visited_pictures.clear(); 510 for _ in 0..n_pics { 511 visited_pictures.push(false); 512 } 513 let mut frame_state = FrameBuildingState { 514 rg_builder, 515 clip_store: &mut scene.clip_store, 516 resource_cache, 517 transforms: transform_palette, 518 segment_builder: SegmentBuilder::new(), 519 surfaces: &mut scene.surfaces, 520 dirty_region_stack: scratch.frame.dirty_region_stack.take(), 521 composite_state, 522 num_visible_primitives: 0, 523 plane_splitters: &mut self.plane_splitters, 524 surface_builder: SurfaceBuilder::new(), 525 cmd_buffers, 526 clip_tree: &mut scene.clip_tree, 527 frame_gpu_data, 528 image_dependencies: FastHashMap::default(), 529 visited_pictures: &mut visited_pictures, 530 }; 531 532 533 if !scene.snapshot_pictures.is_empty() { 534 // Push a default dirty region which does not cull any 535 // primitive. 536 let mut default_dirty_region = DirtyRegion::new( 537 root_spatial_node_index, 538 root_spatial_node_index, 539 ); 540 default_dirty_region.add_dirty_region( 541 PictureRect::max_rect(), 542 frame_context.spatial_tree, 543 ); 544 frame_state.push_dirty_region(default_dirty_region); 545 546 frame_state.surface_builder.push_surface( 547 snapshot_surface, 548 false, 549 PictureRect::max_rect(), 550 None, 551 frame_state.surfaces, 552 frame_state.rg_builder, 553 ); 554 } 555 556 for pic_index in &scene.snapshot_pictures { 557 558 prepare_picture( 559 *pic_index, 560 &mut scene.prim_store, 561 Some(snapshot_surface), 562 SubpixelMode::Allow, 563 &frame_context, 564 &mut frame_state, 565 data_stores, 566 &mut scratch.primitive, 567 tile_caches, 568 &mut scene.prim_instances 569 ); 570 } 571 572 if !scene.snapshot_pictures.is_empty() { 573 frame_state.surface_builder.pop_empty_surface(); 574 frame_state.pop_dirty_region(); 575 } 576 577 // Push a default dirty region which culls primitives 578 // against the screen world rect, in absence of any 579 // other dirty regions. 580 let mut default_dirty_region = DirtyRegion::new( 581 root_spatial_node_index, 582 root_spatial_node_index, 583 ); 584 default_dirty_region.add_dirty_region( 585 frame_context.global_screen_world_rect.cast_unit(), 586 frame_context.spatial_tree, 587 ); 588 frame_state.push_dirty_region(default_dirty_region); 589 590 for pic_index in &scene.tile_cache_pictures { 591 if !render_picture_cache_slices { 592 break; 593 } 594 595 prepare_picture( 596 *pic_index, 597 &mut scene.prim_store, 598 None, 599 SubpixelMode::Allow, 600 &frame_context, 601 &mut frame_state, 602 data_stores, 603 &mut scratch.primitive, 604 tile_caches, 605 &mut scene.prim_instances 606 ); 607 } 608 609 frame_state.pop_dirty_region(); 610 frame_state.surface_builder.finalize(); 611 profile.end_time(profiler::FRAME_PREPARE_TIME); 612 profile.set(profiler::VISIBLE_PRIMITIVES, frame_state.num_visible_primitives); 613 614 scratch.frame.dirty_region_stack = frame_state.dirty_region_stack.take(); 615 616 { 617 profile_marker!("BlockOnResources"); 618 619 resource_cache.block_until_all_resources_added( 620 frame_gpu_data, 621 profile, 622 ); 623 } 624 } 625 626 pub fn build( 627 &mut self, 628 scene: &mut BuiltScene, 629 present: bool, 630 resource_cache: &mut ResourceCache, 631 rg_builder: &mut RenderTaskGraphBuilder, 632 stamp: FrameStamp, 633 device_origin: DeviceIntPoint, 634 scene_properties: &SceneProperties, 635 data_stores: &mut DataStores, 636 scratch: &mut ScratchBuffer, 637 debug_flags: DebugFlags, 638 tile_caches: &mut FastHashMap<SliceId, Box<TileCacheInstance>>, 639 spatial_tree: &mut SpatialTree, 640 dirty_rects_are_valid: bool, 641 profile: &mut TransactionProfile, 642 minimap_data: FastHashMap<ExternalScrollId, MinimapData>, 643 chunk_pool: Arc<ChunkPool>, 644 ) -> Frame { 645 profile_scope!("build"); 646 profile_marker!("BuildFrame"); 647 648 let mut frame_memory = FrameMemory::new(chunk_pool, stamp.frame_id()); 649 // TODO(gw): Recycle backing vec buffers for gpu buffer builder between frames 650 let mut gpu_buffer_builder = GpuBufferBuilder { 651 f32: GpuBufferBuilderF::new(&frame_memory, 8 * 1024, stamp.frame_id()), 652 i32: GpuBufferBuilderI::new(&frame_memory, 2 * 1024, stamp.frame_id()), 653 }; 654 655 profile.set(profiler::PRIMITIVES, scene.prim_instances.len()); 656 profile.set(profiler::PICTURE_CACHE_SLICES, scene.tile_cache_config.picture_cache_slice_count); 657 scratch.begin_frame(); 658 resource_cache.begin_frame(stamp, profile); 659 660 // TODO(gw): Follow up patches won't clear this, as they'll be assigned 661 // statically during scene building. 662 scene.surfaces.clear(); 663 664 let globals = FrameGlobalResources::new(&mut gpu_buffer_builder); 665 666 spatial_tree.update_tree(scene_properties); 667 let mut transform_palette = spatial_tree.build_transform_palette(&frame_memory); 668 scene.clip_store.begin_frame(&mut scratch.clip_store); 669 670 rg_builder.begin_frame(stamp.frame_id()); 671 672 // TODO(dp): Remove me completely!! 673 let global_device_pixel_scale = DevicePixelScale::new(1.0); 674 675 let output_size = scene.output_rect.size(); 676 let screen_world_rect = (scene.output_rect.to_f32() / global_device_pixel_scale).round_out(); 677 678 let mut composite_state = CompositeState::new( 679 scene.config.compositor_kind, 680 scene.config.max_depth_ids, 681 dirty_rects_are_valid, 682 scene.config.low_quality_pinch_zoom, 683 &frame_memory, 684 ); 685 686 self.composite_state_prealloc.preallocate(&mut composite_state); 687 688 let mut cmd_buffers = CommandBufferList::new(); 689 690 self.build_layer_screen_rects_and_cull_layers( 691 scene, 692 present, 693 screen_world_rect, 694 resource_cache, 695 rg_builder, 696 global_device_pixel_scale, 697 scene_properties, 698 &mut transform_palette, 699 data_stores, 700 scratch, 701 debug_flags, 702 &mut composite_state, 703 tile_caches, 704 spatial_tree, 705 &mut cmd_buffers, 706 &mut gpu_buffer_builder, 707 &frame_memory, 708 profile, 709 ); 710 711 self.render_minimap(&mut scratch.primitive, &spatial_tree, minimap_data); 712 713 profile.start_time(profiler::FRAME_BATCHING_TIME); 714 715 let mut deferred_resolves = frame_memory.new_vec(); 716 717 // Finish creating the frame graph and build it. 718 let render_tasks = rg_builder.end_frame( 719 resource_cache, 720 &mut gpu_buffer_builder, 721 &mut deferred_resolves, 722 scene.config.max_shared_surface_size, 723 &frame_memory, 724 ); 725 726 let mut passes = frame_memory.new_vec(); 727 let mut has_texture_cache_tasks = false; 728 let mut prim_headers = PrimitiveHeaders::new(&frame_memory); 729 self.prim_headers_prealloc.preallocate_framevec(&mut prim_headers.headers_int); 730 self.prim_headers_prealloc.preallocate_framevec(&mut prim_headers.headers_float); 731 732 { 733 profile_marker!("Batching"); 734 735 // Used to generated a unique z-buffer value per primitive. 736 let mut z_generator = ZBufferIdGenerator::new(scene.config.max_depth_ids); 737 let use_dual_source_blending = scene.config.dual_source_blending_is_supported; 738 739 for pass in render_tasks.passes.iter().rev() { 740 let mut ctx = RenderTargetContext { 741 global_device_pixel_scale, 742 prim_store: &scene.prim_store, 743 clip_store: &scene.clip_store, 744 resource_cache, 745 use_dual_source_blending, 746 use_advanced_blending: scene.config.gpu_supports_advanced_blend, 747 break_advanced_blend_batches: !scene.config.advanced_blend_is_coherent, 748 batch_lookback_count: scene.config.batch_lookback_count, 749 spatial_tree, 750 data_stores, 751 surfaces: &scene.surfaces, 752 scratch: &mut scratch.primitive, 753 screen_world_rect, 754 globals: &globals, 755 tile_caches, 756 root_spatial_node_index: spatial_tree.root_reference_frame_index(), 757 frame_memory: &mut frame_memory, 758 }; 759 760 let pass = build_render_pass( 761 pass, 762 output_size, 763 &mut ctx, 764 &mut gpu_buffer_builder, 765 &render_tasks, 766 &scene.clip_store, 767 &mut transform_palette, 768 &mut prim_headers, 769 &mut z_generator, 770 scene.config.gpu_supports_fast_clears, 771 &scene.prim_instances, 772 &cmd_buffers, 773 ); 774 775 has_texture_cache_tasks |= !pass.texture_cache.is_empty(); 776 has_texture_cache_tasks |= !pass.picture_cache.is_empty(); 777 778 passes.push(pass); 779 } 780 781 if present { 782 let mut ctx = RenderTargetContext { 783 global_device_pixel_scale, 784 clip_store: &scene.clip_store, 785 prim_store: &scene.prim_store, 786 resource_cache, 787 use_dual_source_blending, 788 use_advanced_blending: scene.config.gpu_supports_advanced_blend, 789 break_advanced_blend_batches: !scene.config.advanced_blend_is_coherent, 790 batch_lookback_count: scene.config.batch_lookback_count, 791 spatial_tree, 792 data_stores, 793 surfaces: &scene.surfaces, 794 scratch: &mut scratch.primitive, 795 screen_world_rect, 796 globals: &globals, 797 tile_caches, 798 root_spatial_node_index: spatial_tree.root_reference_frame_index(), 799 frame_memory: &mut frame_memory, 800 }; 801 802 self.build_composite_pass( 803 scene, 804 &mut ctx, 805 &mut gpu_buffer_builder, 806 &mut deferred_resolves, 807 &mut composite_state, 808 ); 809 } 810 } 811 812 profile.end_time(profiler::FRAME_BATCHING_TIME); 813 814 resource_cache.end_frame(profile); 815 816 self.prim_headers_prealloc.record_vec(&prim_headers.headers_int); 817 self.composite_state_prealloc.record(&composite_state); 818 819 composite_state.end_frame(); 820 scene.clip_store.end_frame(&mut scratch.clip_store); 821 scratch.end_frame(); 822 823 let gpu_buffer_f = gpu_buffer_builder.f32.finalize(&render_tasks); 824 let gpu_buffer_i = gpu_buffer_builder.i32.finalize(&render_tasks); 825 826 Frame { 827 device_rect: DeviceIntRect::from_origin_and_size( 828 device_origin, 829 scene.output_rect.size(), 830 ), 831 present, 832 passes, 833 transform_palette: transform_palette.finish(), 834 render_tasks, 835 deferred_resolves, 836 has_been_rendered: false, 837 has_texture_cache_tasks, 838 prim_headers, 839 debug_items: mem::replace(&mut scratch.primitive.debug_items, Vec::new()), 840 composite_state, 841 gpu_buffer_f, 842 gpu_buffer_i, 843 allocator_memory: frame_memory, 844 } 845 } 846 847 fn render_minimap( 848 &self, 849 scratch: &mut PrimitiveScratchBuffer, 850 spatial_tree: &SpatialTree, 851 minimap_data_store: FastHashMap<ExternalScrollId, MinimapData>) { 852 // TODO: Replace minimap_data_store with Option<FastHastMap>? 853 if minimap_data_store.is_empty() { 854 return 855 } 856 857 // In our main walk over the spatial tree (below), for nodes inside a 858 // subtree rooted at a root-content node, we need some information from 859 // that enclosing root-content node. To collect this information, do an 860 // preliminary walk over the spatial tree now and collect the root-content 861 // info in a HashMap. 862 struct RootContentInfo { 863 transform: LayoutToWorldTransform, 864 clip: LayoutRect 865 } 866 let mut root_content_info = FastHashMap::<ExternalScrollId, RootContentInfo>::default(); 867 spatial_tree.visit_nodes(|index, node| { 868 if let SpatialNodeType::ScrollFrame(ref scroll_frame_info) = node.node_type { 869 if let Some(minimap_data) = minimap_data_store.get(&scroll_frame_info.external_id) { 870 if minimap_data.is_root_content { 871 let transform = spatial_tree.get_world_viewport_transform(index).into_transform(); 872 root_content_info.insert(scroll_frame_info.external_id, RootContentInfo{ 873 transform, 874 clip: scroll_frame_info.viewport_rect 875 }); 876 } 877 } 878 } 879 }); 880 881 // This is the main walk over the spatial tree. For every scroll frame node which 882 // has minimap data, compute the rects we want to render for that minimap in world 883 // coordinates and add them to `scratch.debug_items`. 884 spatial_tree.visit_nodes(|index, node| { 885 if let SpatialNodeType::ScrollFrame(ref scroll_frame_info) = node.node_type { 886 if let Some(minimap_data) = minimap_data_store.get(&scroll_frame_info.external_id) { 887 const HORIZONTAL_PADDING: f32 = 5.0; 888 const VERTICAL_PADDING: f32 = 10.0; 889 const PAGE_BORDER_COLOR: ColorF = debug_colors::BLACK; 890 const BACKGROUND_COLOR: ColorF = ColorF { r: 0.3, g: 0.3, b: 0.3, a: 0.3}; 891 const DISPLAYPORT_BACKGROUND_COLOR: ColorF = ColorF { r: 1.0, g: 1.0, b: 1.0, a: 0.4}; 892 const LAYOUT_PORT_COLOR: ColorF = debug_colors::RED; 893 const VISUAL_PORT_COLOR: ColorF = debug_colors::BLUE; 894 const DISPLAYPORT_COLOR: ColorF = debug_colors::LIME; 895 896 let viewport = scroll_frame_info.viewport_rect; 897 898 // Scale the minimap to make it 100px wide (if there's space), and the full height 899 // of the scroll frame's viewport, minus some padding. Position it at the left edge 900 // of the scroll frame's viewport. 901 let scale_factor_x = 100f32.min(viewport.width() - (2.0 * HORIZONTAL_PADDING)) 902 / minimap_data.scrollable_rect.width(); 903 let scale_factor_y = (viewport.height() - (2.0 * VERTICAL_PADDING)) 904 / minimap_data.scrollable_rect.height(); 905 if scale_factor_x <= 0.0 || scale_factor_y <= 0.0 { 906 return; 907 } 908 let transform = LayoutTransform::scale(scale_factor_x, scale_factor_y, 1.0) 909 .then_translate(LayoutVector3D::new(HORIZONTAL_PADDING, VERTICAL_PADDING, 0.0)) 910 .then_translate(LayoutVector3D::new(viewport.min.x, viewport.min.y, 0.0)); 911 912 // Transforms for transforming rects in this scroll frame's local coordintes, to world coordinates. 913 // For scroll frames inside a root-content subtree, we apply this transform in two parts 914 // (local to root-content, and root-content to world), so that we can make additional 915 // adjustments in root-content space. For scroll frames outside of a root-content subtree, 916 // the entire world transform will be in `local_to_root_content`. 917 let world_transform = spatial_tree 918 .get_world_viewport_transform(index) 919 .into_transform(); 920 let mut local_to_root_content = 921 world_transform.with_destination::<LayoutPixel>(); 922 let mut root_content_to_world = LayoutToWorldTransform::default(); 923 let mut root_content_clip = None; 924 if minimap_data.root_content_scroll_id != 0 { 925 if let Some(RootContentInfo{transform: root_content_transform, clip}) = root_content_info.get(&ExternalScrollId(minimap_data.root_content_scroll_id, minimap_data.root_content_pipeline_id)) { 926 // Exclude the root-content node's zoom transform from `local_to_root_content`. 927 // This ensures that the minimap remains unaffected by pinch-zooming 928 // (in essence, remaining attached to the *visual* viewport, rather than to 929 // the *layout* viewport which is what happens by default). 930 let zoom_transform = minimap_data.zoom_transform; 931 local_to_root_content = world_transform 932 .then(&root_content_transform.inverse().unwrap()) 933 .then(&zoom_transform.inverse().unwrap()); 934 root_content_to_world = root_content_transform.clone(); 935 root_content_clip = Some(clip); 936 } 937 } 938 939 let mut add_rect = |rect, border, fill| -> Option<()> { 940 const STROKE_WIDTH: f32 = 2.0; 941 // Place rect in scroll frame's local coordinate space 942 let transformed_rect = transform.outer_transformed_box2d(&rect)?; 943 944 // Transform to world coordinates, using root-content coords as an intermediate step. 945 let mut root_content_rect = local_to_root_content.outer_transformed_box2d(&transformed_rect)?; 946 // In root-content coords, apply the root content node's viewport clip. 947 // This prevents subframe minimaps from leaking into the chrome area when the root 948 // scroll frame is scrolled. 949 // TODO: The minimaps of nested subframes can still leak outside of the viewports of 950 // their containing subframes. Should have a more proper fix for this. 951 if let Some(clip) = root_content_clip { 952 root_content_rect = root_content_rect.intersection(clip)?; 953 } 954 let world_rect = root_content_to_world.outer_transformed_box2d(&root_content_rect)?; 955 956 scratch.push_debug_rect_with_stroke_width(world_rect, border, STROKE_WIDTH); 957 958 // Add world coordinate rects to scratch.debug_items 959 if let Some(fill_color) = fill { 960 let interior_world_rect = WorldRect::new( 961 world_rect.min + WorldVector2D::new(STROKE_WIDTH, STROKE_WIDTH), 962 world_rect.max - WorldVector2D::new(STROKE_WIDTH, STROKE_WIDTH) 963 ); 964 scratch.push_debug_rect(interior_world_rect * DevicePixelScale::new(1.0), 1, border, fill_color); 965 } 966 967 Some(()) 968 }; 969 970 add_rect(minimap_data.scrollable_rect, PAGE_BORDER_COLOR, Some(BACKGROUND_COLOR)); 971 add_rect(minimap_data.displayport, DISPLAYPORT_COLOR, Some(DISPLAYPORT_BACKGROUND_COLOR)); 972 // Only render a distinct layout viewport for the root content. 973 // For other scroll frames, the visual and layout viewports coincide. 974 if minimap_data.is_root_content { 975 add_rect(minimap_data.layout_viewport, LAYOUT_PORT_COLOR, None); 976 } 977 add_rect(minimap_data.visual_viewport, VISUAL_PORT_COLOR, None); 978 } 979 } 980 }); 981 } 982 983 fn build_composite_pass( 984 &self, 985 scene: &BuiltScene, 986 ctx: &RenderTargetContext, 987 gpu_buffers: &mut GpuBufferBuilder, 988 deferred_resolves: &mut FrameVec<DeferredResolve>, 989 composite_state: &mut CompositeState, 990 ) { 991 for pic_index in &scene.tile_cache_pictures { 992 let pic = &ctx.prim_store.pictures[pic_index.0]; 993 994 match pic.raster_config { 995 Some(RasterConfig { composite_mode: PictureCompositeMode::TileCache { slice_id }, .. }) => { 996 // Tile cache instances are added to the composite config, rather than 997 // directly added to batches. This allows them to be drawn with various 998 // present modes during render, such as partial present etc. 999 let tile_cache = &ctx.tile_caches[&slice_id]; 1000 let map_local_to_world = SpaceMapper::new_with_target( 1001 ctx.root_spatial_node_index, 1002 tile_cache.spatial_node_index, 1003 ctx.screen_world_rect, 1004 ctx.spatial_tree, 1005 ); 1006 let world_clip_rect = map_local_to_world 1007 .map(&tile_cache.local_clip_rect) 1008 .expect("bug: unable to map clip rect"); 1009 let device_clip_rect = (world_clip_rect * ctx.global_device_pixel_scale).round(); 1010 1011 composite_state.push_surface( 1012 tile_cache, 1013 device_clip_rect, 1014 ctx.resource_cache, 1015 &mut gpu_buffers.f32, 1016 deferred_resolves, 1017 ); 1018 } 1019 _ => { 1020 panic!("bug: found a top-level prim that isn't a tile cache"); 1021 } 1022 } 1023 } 1024 } 1025 } 1026 1027 /// Processes this pass to prepare it for rendering. 1028 /// 1029 /// Among other things, this allocates output regions for each of our tasks 1030 /// (added via `add_render_task`) in a RenderTarget and assigns it into that 1031 /// target. 1032 pub fn build_render_pass( 1033 src_pass: &Pass, 1034 screen_size: DeviceIntSize, 1035 ctx: &mut RenderTargetContext, 1036 gpu_buffer_builder: &mut GpuBufferBuilder, 1037 render_tasks: &RenderTaskGraph, 1038 clip_store: &ClipStore, 1039 transforms: &mut TransformPalette, 1040 prim_headers: &mut PrimitiveHeaders, 1041 z_generator: &mut ZBufferIdGenerator, 1042 gpu_supports_fast_clears: bool, 1043 prim_instances: &[PrimitiveInstance], 1044 cmd_buffers: &CommandBufferList, 1045 ) -> RenderPass { 1046 profile_scope!("build_render_pass"); 1047 1048 // TODO(gw): In this initial frame graph work, we try to maintain the existing 1049 // build_render_pass code as closely as possible, to make the review 1050 // simpler and reduce chance of regressions. However, future work should 1051 // include refactoring this to more closely match the built frame graph. 1052 let mut pass = RenderPass::new(src_pass, ctx.frame_memory); 1053 1054 for sub_pass in &src_pass.sub_passes { 1055 match sub_pass.surface { 1056 SubPassSurface::Dynamic { target_kind, texture_id, used_rect } => { 1057 match target_kind { 1058 RenderTargetKind::Color => { 1059 let mut target = RenderTarget::new( 1060 RenderTargetKind::Color, 1061 false, 1062 texture_id, 1063 screen_size, 1064 gpu_supports_fast_clears, 1065 Some(used_rect), 1066 &ctx.frame_memory, 1067 ); 1068 1069 for task_id in &sub_pass.task_ids { 1070 target.add_task( 1071 *task_id, 1072 ctx, 1073 gpu_buffer_builder, 1074 render_tasks, 1075 clip_store, 1076 transforms, 1077 ); 1078 } 1079 1080 pass.color.targets.push(target); 1081 } 1082 RenderTargetKind::Alpha => { 1083 let mut target = RenderTarget::new( 1084 RenderTargetKind::Alpha, 1085 false, 1086 texture_id, 1087 screen_size, 1088 gpu_supports_fast_clears, 1089 Some(used_rect), 1090 &ctx.frame_memory, 1091 ); 1092 1093 for task_id in &sub_pass.task_ids { 1094 target.add_task( 1095 *task_id, 1096 ctx, 1097 gpu_buffer_builder, 1098 render_tasks, 1099 clip_store, 1100 transforms, 1101 ); 1102 } 1103 1104 pass.alpha.targets.push(target); 1105 } 1106 } 1107 } 1108 SubPassSurface::Persistent { surface: StaticRenderTaskSurface::PictureCache { ref surface, .. }, .. } => { 1109 assert_eq!(sub_pass.task_ids.len(), 1); 1110 let task_id = sub_pass.task_ids[0]; 1111 let task = &render_tasks[task_id]; 1112 let target_rect = task.get_target_rect(); 1113 1114 match task.kind { 1115 RenderTaskKind::Picture(ref pic_task) => { 1116 let cmd_buffer = cmd_buffers.get(pic_task.cmd_buffer_index); 1117 let mut dirty_rect = pic_task.scissor_rect.expect("bug: must be set for cache tasks"); 1118 let mut valid_rect = pic_task.valid_rect.expect("bug: must be set for cache tasks"); 1119 1120 // If we have a surface size, clip the dirty and vaild rects 1121 // to that size. This ensures that native compositors will 1122 // pass sanity checks (Bug 1971296). 1123 if let ResolvedSurfaceTexture::Native { size, .. } = surface { 1124 let surface_size_rect = <DeviceIntRect>::from_size(*size); 1125 dirty_rect = dirty_rect.intersection(&surface_size_rect).unwrap_or_default(); 1126 valid_rect = valid_rect.intersection(&surface_size_rect).unwrap_or_default(); 1127 } 1128 1129 let batcher = AlphaBatchBuilder::new( 1130 screen_size, 1131 ctx.break_advanced_blend_batches, 1132 ctx.batch_lookback_count, 1133 task_id, 1134 task_id.into(), 1135 &ctx.frame_memory, 1136 ); 1137 1138 let mut batch_builder = BatchBuilder::new(batcher); 1139 1140 cmd_buffer.iter_prims(&mut |cmd, spatial_node_index, segments| { 1141 batch_builder.add_prim_to_batch( 1142 cmd, 1143 spatial_node_index, 1144 ctx, 1145 render_tasks, 1146 prim_headers, 1147 transforms, 1148 pic_task.raster_spatial_node_index, 1149 pic_task.surface_spatial_node_index, 1150 z_generator, 1151 prim_instances, 1152 gpu_buffer_builder, 1153 segments, 1154 ); 1155 }); 1156 1157 let batcher = batch_builder.finalize(); 1158 1159 let mut batch_containers = ctx.frame_memory.new_vec(); 1160 let mut alpha_batch_container = AlphaBatchContainer::new( 1161 Some(dirty_rect), 1162 &ctx.frame_memory 1163 ); 1164 1165 batcher.build( 1166 &mut batch_containers, 1167 &mut alpha_batch_container, 1168 target_rect, 1169 None, 1170 ); 1171 debug_assert!(batch_containers.is_empty()); 1172 1173 let target = PictureCacheTarget { 1174 surface: surface.clone(), 1175 clear_color: pic_task.clear_color, 1176 kind: PictureCacheTargetKind::Draw { 1177 alpha_batch_container, 1178 }, 1179 dirty_rect, 1180 valid_rect, 1181 }; 1182 1183 pass.picture_cache.push(target); 1184 } 1185 RenderTaskKind::TileComposite(ref tile_task) => { 1186 let mut dirty_rect = tile_task.scissor_rect; 1187 let mut valid_rect = tile_task.valid_rect; 1188 // If we have a surface size, clip the dirty and vaild rects 1189 // to that size. This ensures that native compositors will 1190 // pass sanity checks (Bug 1971296). 1191 if let ResolvedSurfaceTexture::Native { size, .. } = surface { 1192 let surface_size_rect = <DeviceIntRect>::from_size(*size); 1193 dirty_rect = dirty_rect.intersection(&surface_size_rect).unwrap_or_default(); 1194 valid_rect = valid_rect.intersection(&surface_size_rect).unwrap_or_default(); 1195 } 1196 1197 let target = PictureCacheTarget { 1198 surface: surface.clone(), 1199 clear_color: Some(tile_task.clear_color), 1200 kind: PictureCacheTargetKind::Blit { 1201 task_id: tile_task.task_id.expect("bug: no source task_id set"), 1202 sub_rect_offset: tile_task.sub_rect_offset, 1203 }, 1204 dirty_rect, 1205 valid_rect, 1206 }; 1207 1208 pass.picture_cache.push(target); 1209 } 1210 _ => { 1211 unreachable!(); 1212 } 1213 }; 1214 } 1215 SubPassSurface::Persistent { surface: StaticRenderTaskSurface::TextureCache { target_kind, texture, .. } } => { 1216 let texture = pass.texture_cache 1217 .entry(texture) 1218 .or_insert_with(|| 1219 RenderTarget::new( 1220 target_kind, 1221 true, 1222 texture, 1223 screen_size, 1224 gpu_supports_fast_clears, 1225 None, 1226 &ctx.frame_memory 1227 ) 1228 ); 1229 for task_id in &sub_pass.task_ids { 1230 texture.add_task( 1231 *task_id, 1232 ctx, 1233 gpu_buffer_builder, 1234 render_tasks, 1235 clip_store, 1236 transforms, 1237 ); 1238 } 1239 } 1240 SubPassSurface::Persistent { surface: StaticRenderTaskSurface::ReadOnly { .. } } => { 1241 panic!("Should not create a render pass for read-only task locations."); 1242 } 1243 } 1244 } 1245 1246 pass.color.build( 1247 ctx, 1248 render_tasks, 1249 prim_headers, 1250 transforms, 1251 z_generator, 1252 prim_instances, 1253 cmd_buffers, 1254 gpu_buffer_builder, 1255 ); 1256 pass.alpha.build( 1257 ctx, 1258 render_tasks, 1259 prim_headers, 1260 transforms, 1261 z_generator, 1262 prim_instances, 1263 cmd_buffers, 1264 gpu_buffer_builder, 1265 ); 1266 1267 for target in &mut pass.texture_cache.values_mut() { 1268 target.build( 1269 ctx, 1270 render_tasks, 1271 prim_headers, 1272 transforms, 1273 z_generator, 1274 prim_instances, 1275 cmd_buffers, 1276 gpu_buffer_builder, 1277 ); 1278 } 1279 1280 pass 1281 } 1282 1283 /// A rendering-oriented representation of the frame built by the render backend 1284 /// and presented to the renderer. 1285 /// 1286 /// # Safety 1287 /// 1288 /// The frame's allocator memory must be dropped after all of the frame's containers. 1289 /// This is handled in the renderer and in `RenderedDocument`'s Drop implementation. 1290 #[cfg_attr(feature = "capture", derive(Serialize))] 1291 #[cfg_attr(feature = "replay", derive(Deserialize))] 1292 pub struct Frame { 1293 /// The rectangle to show the frame in, on screen. 1294 pub device_rect: DeviceIntRect, 1295 pub present: bool, 1296 pub passes: FrameVec<RenderPass>, 1297 1298 pub transform_palette: FrameVec<TransformData>, 1299 pub render_tasks: RenderTaskGraph, 1300 pub prim_headers: PrimitiveHeaders, 1301 1302 /// List of textures that we don't know about yet 1303 /// from the backend thread. The render thread 1304 /// will use a callback to resolve these and 1305 /// patch the data structures. 1306 pub deferred_resolves: FrameVec<DeferredResolve>, 1307 1308 /// True if this frame contains any render tasks 1309 /// that write to the texture cache. 1310 pub has_texture_cache_tasks: bool, 1311 1312 /// True if this frame has been drawn by the 1313 /// renderer. 1314 pub has_been_rendered: bool, 1315 1316 /// Debugging information to overlay for this frame. 1317 pub debug_items: Vec<DebugItem>, 1318 1319 /// Contains picture cache tiles, and associated information. 1320 /// Used by the renderer to composite tiles into the framebuffer, 1321 /// or hand them off to an OS compositor. 1322 pub composite_state: CompositeState, 1323 1324 /// Main GPU data buffer constructed (primarily) during the prepare 1325 /// pass for primitives that were visible and dirty. 1326 pub gpu_buffer_f: GpuBufferF, 1327 pub gpu_buffer_i: GpuBufferI, 1328 1329 /// The backing store for the frame's allocator. 1330 /// 1331 /// # Safety 1332 /// 1333 /// Must not be dropped while frame allocations are alive. 1334 /// 1335 /// Rust has deterministic drop order [1]. We rely on `allocator_memory` 1336 /// being the last member of the `Frame` struct so that it is dropped 1337 /// after the frame's containers. 1338 /// 1339 /// [1]: https://doc.rust-lang.org/reference/destructors.html 1340 pub allocator_memory: FrameMemory, 1341 } 1342 1343 impl Frame { 1344 // This frame must be flushed if it writes to the 1345 // texture cache, and hasn't been drawn yet. 1346 pub fn must_be_drawn(&self) -> bool { 1347 self.has_texture_cache_tasks && !self.has_been_rendered 1348 } 1349 1350 // Returns true if this frame doesn't alter what is on screen currently. 1351 pub fn is_nop(&self) -> bool { 1352 // If there are no off-screen passes, that implies that there are no 1353 // picture cache tiles, and no texture cache tasks being updates. If this 1354 // is the case, we can consider the frame a nop (higher level checks 1355 // test if a composite is needed due to picture cache surfaces moving 1356 // or external surfaces being updated). 1357 self.passes.is_empty() 1358 } 1359 }