batch.rs (140630B)
1 /* This Source Code Form is subject to the terms of the Mozilla Public 2 * License, v. 2.0. If a copy of the MPL was not distributed with this 3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 4 5 use api::{AlphaType, ClipMode, ImageBufferKind}; 6 use api::{FontInstanceFlags, YuvColorSpace, YuvFormat, ColorDepth, ColorRange, PremultipliedColorF}; 7 use api::units::*; 8 use crate::clip::{ClipNodeFlags, ClipNodeRange, ClipItemKind, ClipStore}; 9 use crate::command_buffer::PrimitiveCommand; 10 use crate::composite::CompositorSurfaceKind; 11 use crate::pattern::PatternKind; 12 use crate::spatial_tree::{SpatialTree, SpatialNodeIndex, CoordinateSystemId}; 13 use glyph_rasterizer::{GlyphFormat, SubpixelDirection}; 14 use crate::gpu_types::{BrushFlags, BrushInstance, ImageSource, PrimitiveHeaders, UvRectKind, ZBufferId, ZBufferIdGenerator}; 15 use crate::gpu_types::SplitCompositeInstance; 16 use crate::gpu_types::{PrimitiveInstanceData, RasterizationSpace, GlyphInstance}; 17 use crate::gpu_types::{PrimitiveHeader, PrimitiveHeaderIndex, TransformPaletteId, TransformPalette}; 18 use crate::gpu_types::{ImageBrushUserData, get_shader_opacity, BoxShadowData, MaskInstance}; 19 use crate::gpu_types::{ClipMaskInstanceCommon, ClipMaskInstanceRect, ClipMaskInstanceBoxShadow}; 20 use crate::internal_types::{FastHashMap, Filter, FrameAllocator, FrameMemory, FrameVec, Swizzle, TextureSource}; 21 use crate::picture::{Picture3DContext, PictureCompositeMode, calculate_screen_uv}; 22 use crate::prim_store::{PrimitiveInstanceKind, ClipData}; 23 use crate::prim_store::{PrimitiveInstance, PrimitiveOpacity, SegmentInstanceIndex}; 24 use crate::prim_store::{BrushSegment, ClipMaskKind, ClipTaskIndex}; 25 use crate::prim_store::VECS_PER_SEGMENT; 26 use crate::quad; 27 use crate::render_target::RenderTargetContext; 28 use crate::render_task_graph::{RenderTaskId, RenderTaskGraph}; 29 use crate::render_task::{RenderTaskAddress, RenderTaskKind, SubPass}; 30 use crate::renderer::{BlendMode, GpuBufferAddress, GpuBufferBlockF, GpuBufferBuilder, ShaderColorMode}; 31 use crate::renderer::MAX_VERTEX_TEXTURE_WIDTH; 32 use crate::resource_cache::{GlyphFetchResult, ImageProperties}; 33 use crate::space::SpaceMapper; 34 use crate::visibility::{PrimitiveVisibilityFlags, VisibilityState}; 35 use smallvec::SmallVec; 36 use std::{f32, i32, usize}; 37 use crate::util::{project_rect, MaxRect, TransformedRectKind, ScaleOffset}; 38 use crate::segment::EdgeAaSegmentMask; 39 40 41 // Special sentinel value recognized by the shader. It is considered to be 42 // a dummy task that doesn't mask out anything. 43 const OPAQUE_TASK_ADDRESS: RenderTaskAddress = RenderTaskAddress(0x7fffffff); 44 45 /// Used to signal there are no segments provided with this primitive. 46 pub const INVALID_SEGMENT_INDEX: i32 = 0xffff; 47 48 /// Size in device pixels for tiles that clip masks are drawn in. 49 const CLIP_RECTANGLE_TILE_SIZE: i32 = 128; 50 51 /// The minimum size of a clip mask before trying to draw in tiles. 52 const CLIP_RECTANGLE_AREA_THRESHOLD: f32 = (CLIP_RECTANGLE_TILE_SIZE * CLIP_RECTANGLE_TILE_SIZE * 4) as f32; 53 54 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] 55 #[cfg_attr(feature = "capture", derive(Serialize))] 56 #[cfg_attr(feature = "replay", derive(Deserialize))] 57 pub enum BrushBatchKind { 58 Solid, 59 Image(ImageBufferKind), 60 Blend, 61 MixBlend { 62 task_id: RenderTaskId, 63 backdrop_id: RenderTaskId, 64 }, 65 YuvImage(ImageBufferKind, YuvFormat, ColorDepth, YuvColorSpace, ColorRange), 66 LinearGradient, 67 Opacity, 68 } 69 70 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] 71 #[cfg_attr(feature = "capture", derive(Serialize))] 72 #[cfg_attr(feature = "replay", derive(Deserialize))] 73 pub enum BatchKind { 74 SplitComposite, 75 TextRun(GlyphFormat), 76 Brush(BrushBatchKind), 77 Quad(PatternKind), 78 } 79 80 /// Input textures for a primitive, without consideration of clip mask 81 #[derive(Copy, Clone, Debug)] 82 #[cfg_attr(feature = "capture", derive(Serialize))] 83 #[cfg_attr(feature = "replay", derive(Deserialize))] 84 pub struct TextureSet { 85 pub colors: [TextureSource; 3], 86 } 87 88 impl TextureSet { 89 const UNTEXTURED: TextureSet = TextureSet { 90 colors: [ 91 TextureSource::Invalid, 92 TextureSource::Invalid, 93 TextureSource::Invalid, 94 ], 95 }; 96 97 /// A textured primitive 98 fn prim_textured( 99 color: TextureSource, 100 ) -> Self { 101 TextureSet { 102 colors: [ 103 color, 104 TextureSource::Invalid, 105 TextureSource::Invalid, 106 ], 107 } 108 } 109 110 fn is_compatible_with(&self, other: &TextureSet) -> bool { 111 self.colors[0].is_compatible(&other.colors[0]) && 112 self.colors[1].is_compatible(&other.colors[1]) && 113 self.colors[2].is_compatible(&other.colors[2]) 114 } 115 } 116 117 impl TextureSource { 118 fn combine(&self, other: TextureSource) -> TextureSource { 119 if other == TextureSource::Invalid { 120 *self 121 } else { 122 other 123 } 124 } 125 } 126 127 /// Optional textures that can be used as a source in the shaders. 128 /// Textures that are not used by the batch are equal to TextureId::invalid(). 129 #[derive(Copy, Clone, Debug)] 130 #[cfg_attr(feature = "capture", derive(Serialize))] 131 #[cfg_attr(feature = "replay", derive(Deserialize))] 132 pub struct BatchTextures { 133 pub input: TextureSet, 134 pub clip_mask: TextureSource, 135 } 136 137 impl BatchTextures { 138 /// An empty batch textures (no binding slots set) 139 pub fn empty() -> BatchTextures { 140 BatchTextures { 141 input: TextureSet::UNTEXTURED, 142 clip_mask: TextureSource::Invalid, 143 } 144 } 145 146 /// A textured primitive with optional clip mask 147 pub fn prim_textured( 148 color: TextureSource, 149 clip_mask: TextureSource, 150 ) -> BatchTextures { 151 BatchTextures { 152 input: TextureSet::prim_textured(color), 153 clip_mask, 154 } 155 } 156 157 /// An untextured primitive with optional clip mask 158 pub fn prim_untextured( 159 clip_mask: TextureSource, 160 ) -> BatchTextures { 161 BatchTextures { 162 input: TextureSet::UNTEXTURED, 163 clip_mask, 164 } 165 } 166 167 /// A composite style effect with single input texture 168 pub fn composite_rgb( 169 texture: TextureSource, 170 ) -> BatchTextures { 171 BatchTextures { 172 input: TextureSet { 173 colors: [ 174 texture, 175 TextureSource::Invalid, 176 TextureSource::Invalid, 177 ], 178 }, 179 clip_mask: TextureSource::Invalid, 180 } 181 } 182 183 /// A composite style effect with up to 3 input textures 184 pub fn composite_yuv( 185 color0: TextureSource, 186 color1: TextureSource, 187 color2: TextureSource, 188 ) -> BatchTextures { 189 BatchTextures { 190 input: TextureSet { 191 colors: [color0, color1, color2], 192 }, 193 clip_mask: TextureSource::Invalid, 194 } 195 } 196 197 pub fn is_compatible_with(&self, other: &BatchTextures) -> bool { 198 if !self.clip_mask.is_compatible(&other.clip_mask) { 199 return false; 200 } 201 202 self.input.is_compatible_with(&other.input) 203 } 204 205 pub fn combine_textures(&self, other: BatchTextures) -> Option<BatchTextures> { 206 if !self.is_compatible_with(&other) { 207 return None; 208 } 209 210 let mut new_textures = BatchTextures::empty(); 211 212 new_textures.clip_mask = self.clip_mask.combine(other.clip_mask); 213 214 for i in 0 .. 3 { 215 new_textures.input.colors[i] = self.input.colors[i].combine(other.input.colors[i]); 216 } 217 218 Some(new_textures) 219 } 220 221 fn merge(&mut self, other: &BatchTextures) { 222 self.clip_mask = self.clip_mask.combine(other.clip_mask); 223 224 for (s, o) in self.input.colors.iter_mut().zip(other.input.colors.iter()) { 225 *s = s.combine(*o); 226 } 227 } 228 } 229 230 #[derive(Copy, Clone, Debug)] 231 #[cfg_attr(feature = "capture", derive(Serialize))] 232 #[cfg_attr(feature = "replay", derive(Deserialize))] 233 pub struct BatchKey { 234 pub kind: BatchKind, 235 pub blend_mode: BlendMode, 236 pub textures: BatchTextures, 237 } 238 239 impl BatchKey { 240 pub fn new(kind: BatchKind, blend_mode: BlendMode, textures: BatchTextures) -> Self { 241 BatchKey { 242 kind, 243 blend_mode, 244 textures, 245 } 246 } 247 248 pub fn is_compatible_with(&self, other: &BatchKey) -> bool { 249 self.kind == other.kind && self.blend_mode == other.blend_mode && self.textures.is_compatible_with(&other.textures) 250 } 251 } 252 253 pub struct BatchRects { 254 /// Union of all of the batch's item rects. 255 /// 256 /// Very often we can skip iterating over item rects by testing against 257 /// this one first. 258 batch: PictureRect, 259 /// When the batch rectangle above isn't a good enough approximation, we 260 /// store per item rects. 261 items: Option<FrameVec<PictureRect>>, 262 // TODO: batch rects don't need to be part of the frame but they currently 263 // are. It may be cleaner to remove them from the frame's final data structure 264 // and not use the frame's allocator. 265 allocator: FrameAllocator, 266 } 267 268 impl BatchRects { 269 fn new(allocator: FrameAllocator) -> Self { 270 BatchRects { 271 batch: PictureRect::zero(), 272 items: None, 273 allocator, 274 } 275 } 276 277 #[inline] 278 fn add_rect(&mut self, rect: &PictureRect) { 279 let union = self.batch.union(rect); 280 // If we have already started storing per-item rects, continue doing so. 281 // Otherwise, check whether only storing the batch rect is a good enough 282 // approximation. 283 if let Some(items) = &mut self.items { 284 items.push(*rect); 285 } else if self.batch.area() + rect.area() < union.area() { 286 let mut items = self.allocator.clone().new_vec_with_capacity(16); 287 items.push(self.batch); 288 items.push(*rect); 289 self.items = Some(items); 290 } 291 292 self.batch = union; 293 } 294 295 #[inline] 296 fn intersects(&mut self, rect: &PictureRect) -> bool { 297 if !self.batch.intersects(rect) { 298 return false; 299 } 300 301 if let Some(items) = &self.items { 302 items.iter().any(|item| item.intersects(rect)) 303 } else { 304 // If we don't have per-item rects it means the batch rect is a good 305 // enough approximation and we didn't bother storing per-rect items. 306 true 307 } 308 } 309 } 310 311 312 pub struct AlphaBatchList { 313 pub batches: FrameVec<PrimitiveBatch>, 314 pub batch_rects: FrameVec<BatchRects>, 315 current_batch_index: usize, 316 current_z_id: ZBufferId, 317 break_advanced_blend_batches: bool, 318 } 319 320 impl AlphaBatchList { 321 fn new(break_advanced_blend_batches: bool, preallocate: usize, memory: &FrameMemory) -> Self { 322 AlphaBatchList { 323 batches: memory.new_vec_with_capacity(preallocate), 324 batch_rects: memory.new_vec_with_capacity(preallocate), 325 current_z_id: ZBufferId::invalid(), 326 current_batch_index: usize::MAX, 327 break_advanced_blend_batches, 328 } 329 } 330 331 /// Clear all current batches in this list. This is typically used 332 /// when a primitive is encountered that occludes all previous 333 /// content in this batch list. 334 fn clear(&mut self) { 335 self.current_batch_index = usize::MAX; 336 self.current_z_id = ZBufferId::invalid(); 337 self.batches.clear(); 338 self.batch_rects.clear(); 339 } 340 341 pub fn set_params_and_get_batch( 342 &mut self, 343 key: BatchKey, 344 features: BatchFeatures, 345 // The bounding box of everything at this Z plane. We expect potentially 346 // multiple primitive segments coming with the same `z_id`. 347 z_bounding_rect: &PictureRect, 348 z_id: ZBufferId, 349 ) -> &mut FrameVec<PrimitiveInstanceData> { 350 if z_id != self.current_z_id || 351 self.current_batch_index == usize::MAX || 352 !self.batches[self.current_batch_index].key.is_compatible_with(&key) 353 { 354 let mut selected_batch_index = None; 355 356 match key.blend_mode { 357 BlendMode::Advanced(_) if self.break_advanced_blend_batches => { 358 // don't try to find a batch 359 } 360 _ => { 361 for (batch_index, batch) in self.batches.iter().enumerate().rev() { 362 // For normal batches, we only need to check for overlaps for batches 363 // other than the first batch we consider. If the first batch 364 // is compatible, then we know there isn't any potential overlap 365 // issues to worry about. 366 if batch.key.is_compatible_with(&key) { 367 selected_batch_index = Some(batch_index); 368 break; 369 } 370 371 // check for intersections 372 if self.batch_rects[batch_index].intersects(z_bounding_rect) { 373 break; 374 } 375 } 376 } 377 } 378 379 if selected_batch_index.is_none() { 380 // Text runs tend to have a lot of instances per batch, causing a lot of reallocation 381 // churn as items are added one by one, so we give it a head start. Ideally we'd start 382 // with a larger number, closer to 1k but in some bad cases with lots of batch break 383 // we would be wasting a lot of memory. 384 // Generally it is safe to preallocate small-ish values for other batch kinds because 385 // the items are small and there are no zero-sized batches so there will always be 386 // at least one allocation. 387 let prealloc = match key.kind { 388 BatchKind::TextRun(..) => 128, 389 _ => 16, 390 }; 391 let mut new_batch = PrimitiveBatch::new(key, self.batches.allocator().clone()); 392 new_batch.instances.reserve(prealloc); 393 selected_batch_index = Some(self.batches.len()); 394 self.batches.push(new_batch); 395 self.batch_rects.push(BatchRects::new(self.batches.allocator().clone())); 396 } 397 398 self.current_batch_index = selected_batch_index.unwrap(); 399 self.batch_rects[self.current_batch_index].add_rect(z_bounding_rect); 400 self.current_z_id = z_id; 401 } 402 403 let batch = &mut self.batches[self.current_batch_index]; 404 batch.features |= features; 405 batch.key.textures.merge(&key.textures); 406 407 &mut batch.instances 408 } 409 } 410 411 pub struct OpaqueBatchList { 412 pub pixel_area_threshold_for_new_batch: f32, 413 pub batches: FrameVec<PrimitiveBatch>, 414 pub current_batch_index: usize, 415 lookback_count: usize, 416 } 417 418 impl OpaqueBatchList { 419 fn new(pixel_area_threshold_for_new_batch: f32, lookback_count: usize, memory: &FrameMemory) -> Self { 420 OpaqueBatchList { 421 batches: memory.new_vec(), 422 pixel_area_threshold_for_new_batch, 423 current_batch_index: usize::MAX, 424 lookback_count, 425 } 426 } 427 428 /// Clear all current batches in this list. This is typically used 429 /// when a primitive is encountered that occludes all previous 430 /// content in this batch list. 431 fn clear(&mut self) { 432 self.current_batch_index = usize::MAX; 433 self.batches.clear(); 434 } 435 436 pub fn set_params_and_get_batch( 437 &mut self, 438 key: BatchKey, 439 features: BatchFeatures, 440 // The bounding box of everything at the current Z, whatever it is. We expect potentially 441 // multiple primitive segments produced by a primitive, which we allow to check 442 // `current_batch_index` instead of iterating the batches. 443 z_bounding_rect: &PictureRect, 444 ) -> &mut FrameVec<PrimitiveInstanceData> { 445 // If the area of this primitive is larger than the given threshold, 446 // then it is large enough to warrant breaking a batch for. In this 447 // case we just see if it can be added to the existing batch or 448 // create a new one. 449 let is_large_occluder = z_bounding_rect.area() > self.pixel_area_threshold_for_new_batch; 450 // Since primitives of the same kind tend to come in succession, we keep track 451 // of the current batch index to skip the search in some cases. We ignore the 452 // current batch index in the case of large occluders to make sure they get added 453 // at the top of the bach list. 454 if is_large_occluder || self.current_batch_index == usize::MAX || 455 !self.batches[self.current_batch_index].key.is_compatible_with(&key) { 456 let mut selected_batch_index = None; 457 if is_large_occluder { 458 if let Some(batch) = self.batches.last() { 459 if batch.key.is_compatible_with(&key) { 460 selected_batch_index = Some(self.batches.len() - 1); 461 } 462 } 463 } else { 464 // Otherwise, look back through a reasonable number of batches. 465 for (batch_index, batch) in self.batches.iter().enumerate().rev().take(self.lookback_count) { 466 if batch.key.is_compatible_with(&key) { 467 selected_batch_index = Some(batch_index); 468 break; 469 } 470 } 471 } 472 473 if selected_batch_index.is_none() { 474 let new_batch = PrimitiveBatch::new(key, self.batches.allocator().clone()); 475 selected_batch_index = Some(self.batches.len()); 476 self.batches.push(new_batch); 477 } 478 479 self.current_batch_index = selected_batch_index.unwrap(); 480 } 481 482 let batch = &mut self.batches[self.current_batch_index]; 483 batch.features |= features; 484 batch.key.textures.merge(&key.textures); 485 486 &mut batch.instances 487 } 488 489 fn finalize(&mut self) { 490 // Reverse the instance arrays in the opaque batches 491 // to get maximum z-buffer efficiency by drawing 492 // front-to-back. 493 // TODO(gw): Maybe we can change the batch code to 494 // build these in reverse and avoid having 495 // to reverse the instance array here. 496 for batch in &mut self.batches { 497 batch.instances.reverse(); 498 } 499 } 500 } 501 502 #[cfg_attr(feature = "capture", derive(Serialize))] 503 #[cfg_attr(feature = "replay", derive(Deserialize))] 504 pub struct PrimitiveBatch { 505 pub key: BatchKey, 506 pub instances: FrameVec<PrimitiveInstanceData>, 507 pub features: BatchFeatures, 508 } 509 510 bitflags! { 511 /// Features of the batch that, if not requested, may allow a fast-path. 512 /// 513 /// Rather than breaking batches when primitives request different features, 514 /// we always request the minimum amount of features to satisfy all items in 515 /// the batch. 516 /// The goal is to let the renderer be optionally select more specialized 517 /// versions of a shader if the batch doesn't require code certain code paths. 518 /// Not all shaders necessarily implement all of these features. 519 #[cfg_attr(feature = "capture", derive(Serialize))] 520 #[cfg_attr(feature = "replay", derive(Deserialize))] 521 #[derive(Debug, Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)] 522 pub struct BatchFeatures: u8 { 523 const ALPHA_PASS = 1 << 0; 524 const ANTIALIASING = 1 << 1; 525 const REPETITION = 1 << 2; 526 /// Indicates a primitive in this batch may use a clip mask. 527 const CLIP_MASK = 1 << 3; 528 } 529 } 530 531 impl PrimitiveBatch { 532 fn new(key: BatchKey, allocator: FrameAllocator) -> PrimitiveBatch { 533 PrimitiveBatch { 534 key, 535 instances: FrameVec::new_in(allocator), 536 features: BatchFeatures::empty(), 537 } 538 } 539 540 fn merge(&mut self, other: PrimitiveBatch) { 541 self.instances.extend(other.instances); 542 self.features |= other.features; 543 self.key.textures.merge(&other.key.textures); 544 } 545 } 546 547 #[cfg_attr(feature = "capture", derive(Serialize))] 548 #[cfg_attr(feature = "replay", derive(Deserialize))] 549 pub struct AlphaBatchContainer { 550 pub opaque_batches: FrameVec<PrimitiveBatch>, 551 pub alpha_batches: FrameVec<PrimitiveBatch>, 552 /// The overall scissor rect for this render task, if one 553 /// is required. 554 pub task_scissor_rect: Option<DeviceIntRect>, 555 /// The rectangle of the owning render target that this 556 /// set of batches affects. 557 pub task_rect: DeviceIntRect, 558 } 559 560 impl AlphaBatchContainer { 561 pub fn new( 562 task_scissor_rect: Option<DeviceIntRect>, 563 memory: &FrameMemory, 564 ) -> AlphaBatchContainer { 565 AlphaBatchContainer { 566 opaque_batches: memory.new_vec(), 567 alpha_batches: memory.new_vec(), 568 task_scissor_rect, 569 task_rect: DeviceIntRect::zero(), 570 } 571 } 572 573 pub fn is_empty(&self) -> bool { 574 self.opaque_batches.is_empty() && 575 self.alpha_batches.is_empty() 576 } 577 578 fn merge(&mut self, builder: AlphaBatchBuilder, task_rect: &DeviceIntRect) { 579 self.task_rect = self.task_rect.union(task_rect); 580 581 for other_batch in builder.opaque_batch_list.batches { 582 let batch_index = self.opaque_batches.iter().position(|batch| { 583 batch.key.is_compatible_with(&other_batch.key) 584 }); 585 586 match batch_index { 587 Some(batch_index) => { 588 self.opaque_batches[batch_index].merge(other_batch); 589 } 590 None => { 591 self.opaque_batches.push(other_batch); 592 } 593 } 594 } 595 596 let mut min_batch_index = 0; 597 598 for other_batch in builder.alpha_batch_list.batches { 599 let batch_index = self.alpha_batches.iter().skip(min_batch_index).position(|batch| { 600 batch.key.is_compatible_with(&other_batch.key) 601 }); 602 603 match batch_index { 604 Some(batch_index) => { 605 let index = batch_index + min_batch_index; 606 self.alpha_batches[index].merge(other_batch); 607 min_batch_index = index; 608 } 609 None => { 610 self.alpha_batches.push(other_batch); 611 min_batch_index = self.alpha_batches.len(); 612 } 613 } 614 } 615 } 616 } 617 618 /// Each segment can optionally specify a per-segment 619 /// texture set and one user data field. 620 #[derive(Debug, Copy, Clone)] 621 struct SegmentInstanceData { 622 textures: TextureSet, 623 specific_resource_address: i32, 624 } 625 626 /// Encapsulates the logic of building batches for items that are blended. 627 pub struct AlphaBatchBuilder { 628 pub alpha_batch_list: AlphaBatchList, 629 pub opaque_batch_list: OpaqueBatchList, 630 pub render_task_id: RenderTaskId, 631 render_task_address: RenderTaskAddress, 632 } 633 634 impl AlphaBatchBuilder { 635 pub fn new( 636 screen_size: DeviceIntSize, 637 break_advanced_blend_batches: bool, 638 lookback_count: usize, 639 render_task_id: RenderTaskId, 640 render_task_address: RenderTaskAddress, 641 memory: &FrameMemory, 642 ) -> Self { 643 // The threshold for creating a new batch is 644 // one quarter the screen size. 645 let batch_area_threshold = (screen_size.width * screen_size.height) as f32 / 4.0; 646 647 AlphaBatchBuilder { 648 alpha_batch_list: AlphaBatchList::new(break_advanced_blend_batches, 128, memory), 649 opaque_batch_list: OpaqueBatchList::new(batch_area_threshold, lookback_count, memory), 650 render_task_id, 651 render_task_address, 652 } 653 } 654 655 /// Clear all current batches in this builder. This is typically used 656 /// when a primitive is encountered that occludes all previous 657 /// content in this batch list. 658 fn clear(&mut self) { 659 self.alpha_batch_list.clear(); 660 self.opaque_batch_list.clear(); 661 } 662 663 pub fn build( 664 mut self, 665 batch_containers: &mut FrameVec<AlphaBatchContainer>, 666 merged_batches: &mut AlphaBatchContainer, 667 task_rect: DeviceIntRect, 668 task_scissor_rect: Option<DeviceIntRect>, 669 ) { 670 self.opaque_batch_list.finalize(); 671 672 if task_scissor_rect.is_none() { 673 merged_batches.merge(self, &task_rect); 674 } else { 675 batch_containers.push(AlphaBatchContainer { 676 alpha_batches: self.alpha_batch_list.batches, 677 opaque_batches: self.opaque_batch_list.batches, 678 task_scissor_rect, 679 task_rect, 680 }); 681 } 682 } 683 684 pub fn push_single_instance( 685 &mut self, 686 key: BatchKey, 687 features: BatchFeatures, 688 bounding_rect: &PictureRect, 689 z_id: ZBufferId, 690 instance: PrimitiveInstanceData, 691 ) { 692 self.set_params_and_get_batch(key, features, bounding_rect, z_id) 693 .push(instance); 694 } 695 696 pub fn set_params_and_get_batch( 697 &mut self, 698 key: BatchKey, 699 features: BatchFeatures, 700 bounding_rect: &PictureRect, 701 z_id: ZBufferId, 702 ) -> &mut FrameVec<PrimitiveInstanceData> { 703 match key.blend_mode { 704 BlendMode::None => { 705 self.opaque_batch_list 706 .set_params_and_get_batch(key, features, bounding_rect) 707 } 708 BlendMode::Alpha | 709 BlendMode::PremultipliedAlpha | 710 BlendMode::PremultipliedDestOut | 711 BlendMode::SubpixelDualSource | 712 BlendMode::Advanced(_) | 713 BlendMode::MultiplyDualSource | 714 BlendMode::Screen | 715 BlendMode::Exclusion | 716 BlendMode::PlusLighter => { 717 self.alpha_batch_list 718 .set_params_and_get_batch(key, features, bounding_rect, z_id) 719 } 720 } 721 } 722 } 723 724 /// Supports (recursively) adding a list of primitives and pictures to an alpha batch 725 /// builder. In future, it will support multiple dirty regions / slices, allowing the 726 /// contents of a picture to be spliced into multiple batch builders. 727 pub struct BatchBuilder { 728 /// A temporary buffer that is used during glyph fetching, stored here 729 /// to reduce memory allocations. 730 glyph_fetch_buffer: Vec<GlyphFetchResult>, 731 732 batcher: AlphaBatchBuilder, 733 } 734 735 impl BatchBuilder { 736 pub fn new(batcher: AlphaBatchBuilder) -> Self { 737 BatchBuilder { 738 glyph_fetch_buffer: Vec::new(), 739 batcher, 740 } 741 } 742 743 pub fn finalize(self) -> AlphaBatchBuilder { 744 self.batcher 745 } 746 747 fn add_brush_instance_to_batches( 748 &mut self, 749 batch_key: BatchKey, 750 features: BatchFeatures, 751 bounding_rect: &PictureRect, 752 z_id: ZBufferId, 753 segment_index: i32, 754 edge_flags: EdgeAaSegmentMask, 755 clip_task_address: RenderTaskAddress, 756 brush_flags: BrushFlags, 757 prim_header_index: PrimitiveHeaderIndex, 758 resource_address: i32, 759 ) { 760 assert!( 761 !(brush_flags.contains(BrushFlags::NORMALIZED_UVS) 762 && features.contains(BatchFeatures::REPETITION)), 763 "Normalized UVs are not supported with repetition." 764 ); 765 let instance = BrushInstance { 766 segment_index, 767 edge_flags, 768 clip_task_address, 769 brush_flags, 770 prim_header_index, 771 resource_address, 772 }; 773 774 self.batcher.push_single_instance( 775 batch_key, 776 features, 777 bounding_rect, 778 z_id, 779 PrimitiveInstanceData::from(instance), 780 ); 781 } 782 783 fn add_split_composite_instance_to_batches( 784 &mut self, 785 batch_key: BatchKey, 786 features: BatchFeatures, 787 bounding_rect: &PictureRect, 788 z_id: ZBufferId, 789 prim_header_index: PrimitiveHeaderIndex, 790 polygons_address: i32, 791 ) { 792 let render_task_address = self.batcher.render_task_address; 793 794 self.batcher.push_single_instance( 795 batch_key, 796 features, 797 bounding_rect, 798 z_id, 799 PrimitiveInstanceData::from(SplitCompositeInstance { 800 prim_header_index, 801 render_task_address, 802 polygons_address, 803 z: z_id, 804 }), 805 ); 806 } 807 808 /// Clear all current batchers. This is typically used when a primitive 809 /// is encountered that occludes all previous content in this batch list. 810 fn clear_batches(&mut self) { 811 self.batcher.clear(); 812 } 813 814 // Adds a primitive to a batch. 815 // It can recursively call itself in some situations, for 816 // example if it encounters a picture where the items 817 // in that picture are being drawn into the same target. 818 pub fn add_prim_to_batch( 819 &mut self, 820 cmd: &PrimitiveCommand, 821 prim_spatial_node_index: SpatialNodeIndex, 822 ctx: &RenderTargetContext, 823 render_tasks: &RenderTaskGraph, 824 prim_headers: &mut PrimitiveHeaders, 825 transforms: &mut TransformPalette, 826 root_spatial_node_index: SpatialNodeIndex, 827 surface_spatial_node_index: SpatialNodeIndex, 828 z_generator: &mut ZBufferIdGenerator, 829 prim_instances: &[PrimitiveInstance], 830 gpu_buffer_builder: &mut GpuBufferBuilder, 831 segments: &[RenderTaskId], 832 ) { 833 let (prim_instance_index, extra_prim_gpu_address) = match cmd { 834 PrimitiveCommand::Simple { prim_instance_index } => { 835 (prim_instance_index, None) 836 } 837 PrimitiveCommand::Complex { prim_instance_index, gpu_address } => { 838 (prim_instance_index, Some(gpu_address.as_int())) 839 } 840 PrimitiveCommand::Instance { prim_instance_index, gpu_buffer_address } => { 841 (prim_instance_index, Some(gpu_buffer_address.as_int())) 842 } 843 PrimitiveCommand::Quad { pattern, pattern_input, prim_instance_index, gpu_buffer_address, quad_flags, edge_flags, transform_id, src_color_task_id } => { 844 let prim_instance = &prim_instances[prim_instance_index.0 as usize]; 845 let prim_info = &prim_instance.vis; 846 let bounding_rect = &prim_info.clip_chain.pic_coverage_rect; 847 let render_task_address = self.batcher.render_task_address; 848 849 if segments.is_empty() { 850 let z_id = z_generator.next(); 851 852 quad::add_to_batch( 853 *pattern, 854 *pattern_input, 855 render_task_address, 856 *transform_id, 857 *gpu_buffer_address, 858 *quad_flags, 859 *edge_flags, 860 INVALID_SEGMENT_INDEX as u8, 861 *src_color_task_id, 862 z_id, 863 render_tasks, 864 gpu_buffer_builder, 865 |key, instance| { 866 let batch = self.batcher.set_params_and_get_batch( 867 key, 868 BatchFeatures::empty(), 869 bounding_rect, 870 z_id, 871 ); 872 batch.push(instance); 873 }, 874 ); 875 } else { 876 for (i, task_id) in segments.iter().enumerate() { 877 // TODO(gw): edge_flags should be per-segment, when used for more than composites 878 debug_assert!(edge_flags.is_empty()); 879 880 let z_id = z_generator.next(); 881 882 quad::add_to_batch( 883 *pattern, 884 *pattern_input, 885 render_task_address, 886 *transform_id, 887 *gpu_buffer_address, 888 *quad_flags, 889 *edge_flags, 890 i as u8, 891 *task_id, 892 z_id, 893 render_tasks, 894 gpu_buffer_builder, 895 |key, instance| { 896 let batch = self.batcher.set_params_and_get_batch( 897 key, 898 BatchFeatures::empty(), 899 bounding_rect, 900 z_id, 901 ); 902 batch.push(instance); 903 }, 904 ); 905 } 906 } 907 908 return; 909 } 910 }; 911 912 let prim_instance = &prim_instances[prim_instance_index.0 as usize]; 913 let is_anti_aliased = ctx.data_stores.prim_has_anti_aliasing(prim_instance); 914 915 let brush_flags = if is_anti_aliased { 916 BrushFlags::FORCE_AA 917 } else { 918 BrushFlags::empty() 919 }; 920 921 let vis_flags = match prim_instance.vis.state { 922 VisibilityState::Culled => { 923 return; 924 } 925 VisibilityState::PassThrough | 926 VisibilityState::Unset => { 927 panic!("bug: invalid visibility state"); 928 } 929 VisibilityState::Visible { vis_flags, .. } => { 930 vis_flags 931 } 932 }; 933 934 // If this primitive is a backdrop, that means that it is known to cover 935 // the entire picture cache background. In that case, the renderer will 936 // use the backdrop color as a clear color, and so we can drop this 937 // primitive and any prior primitives from the batch lists for this 938 // picture cache slice. 939 if vis_flags.contains(PrimitiveVisibilityFlags::IS_BACKDROP) { 940 self.clear_batches(); 941 return; 942 } 943 944 let transform_id = transforms 945 .get_id( 946 prim_spatial_node_index, 947 root_spatial_node_index, 948 ctx.spatial_tree, 949 ); 950 951 // TODO(gw): Calculating this for every primitive is a bit 952 // wasteful. We should probably cache this in 953 // the scroll node... 954 let transform_kind = transform_id.transform_kind(); 955 let prim_info = &prim_instance.vis; 956 let bounding_rect = &prim_info.clip_chain.pic_coverage_rect; 957 958 let mut z_id = z_generator.next(); 959 960 let prim_rect = ctx.data_stores.get_local_prim_rect( 961 prim_instance, 962 &ctx.prim_store.pictures, 963 ctx.surfaces, 964 ); 965 966 let mut batch_features = BatchFeatures::empty(); 967 if ctx.data_stores.prim_may_need_repetition(prim_instance) { 968 batch_features |= BatchFeatures::REPETITION; 969 } 970 971 if transform_kind != TransformedRectKind::AxisAligned || is_anti_aliased { 972 batch_features |= BatchFeatures::ANTIALIASING; 973 } 974 975 // Check if the primitive might require a clip mask. 976 if prim_info.clip_task_index != ClipTaskIndex::INVALID { 977 batch_features |= BatchFeatures::CLIP_MASK; 978 } 979 980 if !bounding_rect.is_empty() { 981 debug_assert_eq!(prim_info.clip_chain.pic_spatial_node_index, surface_spatial_node_index, 982 "The primitive's bounding box is specified in a different coordinate system from the current batch!"); 983 } 984 985 if let PrimitiveInstanceKind::Picture { pic_index, .. } = prim_instance.kind { 986 let picture = &ctx.prim_store.pictures[pic_index.0]; 987 if let Some(snapshot) = picture.snapshot { 988 if snapshot.detached { 989 return; 990 } 991 } 992 993 let blend_mode = BlendMode::PremultipliedAlpha; 994 let prim_cache_address = ctx.globals.default_image_data; 995 996 match picture.raster_config { 997 Some(ref raster_config) => { 998 // If the child picture was rendered in local space, we can safely 999 // interpolate the UV coordinates with perspective correction. 1000 let brush_flags = brush_flags | BrushFlags::PERSPECTIVE_INTERPOLATION; 1001 1002 let surface = &ctx.surfaces[raster_config.surface_index.0]; 1003 let mut local_clip_rect = prim_info.clip_chain.local_clip_rect; 1004 1005 // If we are drawing with snapping enabled, form a simple transform that just applies 1006 // the scale / translation from the raster transform. Otherwise, in edge cases where the 1007 // intermediate surface has a non-identity but axis-aligned transform (e.g. a 180 degree 1008 // rotation) it can be applied twice. 1009 let transform_id = if surface.surface_spatial_node_index == surface.raster_spatial_node_index { 1010 transform_id 1011 } else { 1012 let map_local_to_raster = SpaceMapper::new_with_target( 1013 root_spatial_node_index, 1014 surface.surface_spatial_node_index, 1015 LayoutRect::max_rect(), 1016 ctx.spatial_tree, 1017 ); 1018 1019 let raster_rect = map_local_to_raster 1020 .map(&prim_rect) 1021 .unwrap(); 1022 1023 let sx = (raster_rect.max.x - raster_rect.min.x) / (prim_rect.max.x - prim_rect.min.x); 1024 let sy = (raster_rect.max.y - raster_rect.min.y) / (prim_rect.max.y - prim_rect.min.y); 1025 1026 let tx = raster_rect.min.x - sx * prim_rect.min.x; 1027 let ty = raster_rect.min.y - sy * prim_rect.min.y; 1028 1029 let transform = ScaleOffset::new(sx, sy, tx, ty); 1030 1031 let raster_clip_rect = map_local_to_raster 1032 .map(&prim_info.clip_chain.local_clip_rect) 1033 .unwrap(); 1034 local_clip_rect = transform.unmap_rect(&raster_clip_rect); 1035 1036 transforms.get_custom(transform.to_transform()) 1037 }; 1038 1039 let picture_prim_header = PrimitiveHeader { 1040 local_rect: prim_rect, 1041 local_clip_rect, 1042 specific_prim_address: prim_cache_address.as_int(), 1043 transform_id, 1044 z: z_id, 1045 render_task_address: self.batcher.render_task_address, 1046 user_data: [0; 4], // Will be overridden by most uses 1047 }; 1048 1049 let mut is_opaque = prim_info.clip_task_index == ClipTaskIndex::INVALID 1050 && surface.is_opaque 1051 && transform_kind == TransformedRectKind::AxisAligned 1052 && !is_anti_aliased; 1053 1054 let pic_task_id = picture.primary_render_task_id.unwrap(); 1055 1056 let pic_task = &render_tasks[pic_task_id]; 1057 match pic_task.sub_pass { 1058 Some(SubPass::Masks { .. }) => { 1059 is_opaque = false; 1060 } 1061 None => {} 1062 } 1063 match raster_config.composite_mode { 1064 PictureCompositeMode::TileCache { .. } => { 1065 // TODO(gw): For now, TileCache is still a composite mode, even though 1066 // it will only exist as a top level primitive and never 1067 // be encountered during batching. Consider making TileCache 1068 // a standalone type, not a picture. 1069 return; 1070 } 1071 PictureCompositeMode::IntermediateSurface { .. } => { 1072 // TODO(gw): As an optimization, support making this a pass-through 1073 // and/or drawing directly from here when possible 1074 // (e.g. if not wrapped by filters / different spatial node). 1075 return; 1076 } 1077 _=>{} 1078 } 1079 1080 let (clip_task_address, clip_mask_texture_id) = ctx.get_prim_clip_task_and_texture( 1081 prim_info.clip_task_index, 1082 render_tasks, 1083 ).unwrap(); 1084 1085 let (uv_rect_address, texture) = render_tasks.resolve_location( 1086 pic_task_id, 1087 1088 ).unwrap(); 1089 1090 // The set of input textures that most composite modes use, 1091 // howevr some override it. 1092 let textures = BatchTextures::prim_textured( 1093 texture, 1094 clip_mask_texture_id, 1095 ); 1096 1097 let (key, prim_user_data, resource_address) = match raster_config.composite_mode { 1098 PictureCompositeMode::TileCache { .. } 1099 | PictureCompositeMode::IntermediateSurface { .. } 1100 => return, 1101 PictureCompositeMode::Filter(ref filter) => { 1102 assert!(filter.is_visible()); 1103 match filter { 1104 Filter::Blur { .. } => { 1105 let kind = BatchKind::Brush( 1106 BrushBatchKind::Image(ImageBufferKind::Texture2D) 1107 ); 1108 1109 let key = BatchKey::new( 1110 kind, 1111 blend_mode, 1112 textures, 1113 ); 1114 1115 let prim_user_data = ImageBrushUserData { 1116 color_mode: ShaderColorMode::Image, 1117 alpha_type: AlphaType::PremultipliedAlpha, 1118 raster_space: RasterizationSpace::Screen, 1119 opacity: 1.0, 1120 }.encode(); 1121 1122 (key, prim_user_data, uv_rect_address.as_int()) 1123 } 1124 Filter::DropShadows(shadows) => { 1125 // Draw an instance per shadow first, following by the content. 1126 1127 // The shadows and the content get drawn as a brush image. 1128 let kind = BatchKind::Brush( 1129 BrushBatchKind::Image(ImageBufferKind::Texture2D), 1130 ); 1131 1132 // Gets the saved render task ID of the content, which is 1133 // deeper in the render task graph than the direct child. 1134 let secondary_id = picture.secondary_render_task_id.expect("no secondary!?"); 1135 let content_source = { 1136 let secondary_task = &render_tasks[secondary_id]; 1137 let texture_id = secondary_task.get_target_texture(); 1138 TextureSource::TextureCache( 1139 texture_id, 1140 Swizzle::default(), 1141 ) 1142 }; 1143 1144 // Retrieve the UV rect addresses for shadow/content. 1145 let shadow_uv_rect_address = uv_rect_address; 1146 let shadow_textures = textures; 1147 1148 let content_uv_rect_address = render_tasks[secondary_id] 1149 .get_texture_address() 1150 .as_int(); 1151 1152 // Build BatchTextures for shadow/content 1153 let content_textures = BatchTextures::prim_textured( 1154 content_source, 1155 clip_mask_texture_id, 1156 ); 1157 1158 // Build batch keys for shadow/content 1159 let shadow_key = BatchKey::new(kind, blend_mode, shadow_textures); 1160 let content_key = BatchKey::new(kind, blend_mode, content_textures); 1161 1162 for (shadow, shadow_prim_address) in shadows.iter().zip(picture.extra_gpu_data.iter()) { 1163 let shadow_rect = picture_prim_header.local_rect.translate(shadow.offset); 1164 1165 let shadow_prim_header = PrimitiveHeader { 1166 local_rect: shadow_rect, 1167 specific_prim_address: shadow_prim_address.as_int(), 1168 z: z_id, 1169 user_data: ImageBrushUserData { 1170 color_mode: ShaderColorMode::Alpha, 1171 alpha_type: AlphaType::PremultipliedAlpha, 1172 raster_space: RasterizationSpace::Screen, 1173 opacity: 1.0, 1174 }.encode(), 1175 ..picture_prim_header 1176 }; 1177 let shadow_prim_header_index = prim_headers.push(&shadow_prim_header); 1178 1179 self.add_brush_instance_to_batches( 1180 shadow_key, 1181 batch_features, 1182 bounding_rect, 1183 z_id, 1184 INVALID_SEGMENT_INDEX, 1185 EdgeAaSegmentMask::all(), 1186 clip_task_address, 1187 brush_flags, 1188 shadow_prim_header_index, 1189 shadow_uv_rect_address.as_int(), 1190 ); 1191 } 1192 1193 // Update z_id for the content 1194 z_id = z_generator.next(); 1195 1196 let prim_user_data = ImageBrushUserData { 1197 color_mode: ShaderColorMode::Image, 1198 alpha_type: AlphaType::PremultipliedAlpha, 1199 raster_space: RasterizationSpace::Screen, 1200 opacity: 1.0, 1201 }.encode(); 1202 1203 (content_key, prim_user_data, content_uv_rect_address) 1204 } 1205 Filter::Opacity(_, amount) => { 1206 let amount = (amount * 65536.0) as i32; 1207 1208 let key = BatchKey::new( 1209 BatchKind::Brush(BrushBatchKind::Opacity), 1210 BlendMode::PremultipliedAlpha, 1211 textures, 1212 ); 1213 1214 let prim_user_data = [ 1215 uv_rect_address.as_int(), 1216 amount, 1217 0, 1218 0, 1219 ]; 1220 1221 (key, prim_user_data, 0) 1222 } 1223 _ => { 1224 // Must be kept in sync with brush_blend.glsl 1225 let filter_mode = filter.as_int(); 1226 1227 let user_data = match filter { 1228 Filter::Identity => 0x10000i32, // matches `Contrast(1)` 1229 Filter::Contrast(amount) | 1230 Filter::Grayscale(amount) | 1231 Filter::Invert(amount) | 1232 Filter::Saturate(amount) | 1233 Filter::Sepia(amount) | 1234 Filter::Brightness(amount) => { 1235 (amount * 65536.0) as i32 1236 } 1237 Filter::SrgbToLinear | Filter::LinearToSrgb => 0, 1238 Filter::HueRotate(angle) => { 1239 (0.01745329251 * angle * 65536.0) as i32 1240 } 1241 Filter::ColorMatrix(_) => { 1242 picture.extra_gpu_data[0].as_int() 1243 } 1244 Filter::Flood(_) => { 1245 picture.extra_gpu_data[0].as_int() 1246 } 1247 1248 // These filters are handled via different paths. 1249 Filter::ComponentTransfer | 1250 Filter::Blur { .. } | 1251 Filter::DropShadows(..) | 1252 Filter::Opacity(..) | 1253 Filter::SVGGraphNode(..) => unreachable!(), 1254 }; 1255 1256 // Other filters that may introduce opacity are handled via different 1257 // paths. 1258 if let Filter::ColorMatrix(..) = filter { 1259 is_opaque = false; 1260 } 1261 1262 let blend_mode = if is_opaque { 1263 BlendMode::None 1264 } else { 1265 BlendMode::PremultipliedAlpha 1266 }; 1267 1268 let key = BatchKey::new( 1269 BatchKind::Brush(BrushBatchKind::Blend), 1270 blend_mode, 1271 textures, 1272 ); 1273 1274 let prim_user_data = [ 1275 uv_rect_address.as_int(), 1276 filter_mode, 1277 user_data, 1278 0, 1279 ]; 1280 1281 (key, prim_user_data, 0) 1282 } 1283 } 1284 } 1285 PictureCompositeMode::ComponentTransferFilter(handle) => { 1286 // This is basically the same as the general filter case above 1287 // except we store a little more data in the filter mode and 1288 // a gpu cache handle in the user data. 1289 let filter_data = &ctx.data_stores.filter_data[handle]; 1290 let filter_mode : i32 = Filter::ComponentTransfer.as_int() | 1291 ((filter_data.data.r_func.to_int() << 28 | 1292 filter_data.data.g_func.to_int() << 24 | 1293 filter_data.data.b_func.to_int() << 20 | 1294 filter_data.data.a_func.to_int() << 16) as i32); 1295 1296 let user_data = filter_data.gpu_buffer_address.as_int(); 1297 1298 let key = BatchKey::new( 1299 BatchKind::Brush(BrushBatchKind::Blend), 1300 BlendMode::PremultipliedAlpha, 1301 textures, 1302 ); 1303 1304 let prim_user_data = [ 1305 uv_rect_address.as_int(), 1306 filter_mode, 1307 user_data, 1308 0, 1309 ]; 1310 1311 (key, prim_user_data, 0) 1312 } 1313 PictureCompositeMode::MixBlend(mode) if BlendMode::from_mix_blend_mode( 1314 mode, 1315 ctx.use_advanced_blending, 1316 !ctx.break_advanced_blend_batches, 1317 ctx.use_dual_source_blending, 1318 ).is_some() => { 1319 let key = BatchKey::new( 1320 BatchKind::Brush( 1321 BrushBatchKind::Image(ImageBufferKind::Texture2D), 1322 ), 1323 BlendMode::from_mix_blend_mode( 1324 mode, 1325 ctx.use_advanced_blending, 1326 !ctx.break_advanced_blend_batches, 1327 ctx.use_dual_source_blending, 1328 ).unwrap(), 1329 textures, 1330 ); 1331 1332 let prim_user_data = ImageBrushUserData { 1333 color_mode: match key.blend_mode { 1334 BlendMode::MultiplyDualSource => ShaderColorMode::MultiplyDualSource, 1335 _ => ShaderColorMode::Image, 1336 }, 1337 alpha_type: AlphaType::PremultipliedAlpha, 1338 raster_space: RasterizationSpace::Screen, 1339 opacity: 1.0, 1340 }.encode(); 1341 1342 (key, prim_user_data, uv_rect_address.as_int()) 1343 } 1344 PictureCompositeMode::MixBlend(mode) => { 1345 let backdrop_id = picture.secondary_render_task_id.expect("no backdrop!?"); 1346 1347 let color0 = render_tasks[backdrop_id].get_target_texture(); 1348 let color1 = render_tasks[pic_task_id].get_target_texture(); 1349 1350 // Create a separate brush instance for each batcher. For most cases, 1351 // there is only one batcher. However, in the case of drawing onto 1352 // a picture cache, there is one batcher per tile. Although not 1353 // currently used, the implementation of mix-blend-mode now supports 1354 // doing partial readbacks per-tile. In future, this will be enabled 1355 // and allow mix-blends to operate on picture cache surfaces without 1356 // a separate isolated intermediate surface. 1357 1358 let batch_key = BatchKey::new( 1359 BatchKind::Brush( 1360 BrushBatchKind::MixBlend { 1361 task_id: self.batcher.render_task_id, 1362 backdrop_id, 1363 }, 1364 ), 1365 BlendMode::PremultipliedAlpha, 1366 BatchTextures { 1367 input: TextureSet { 1368 colors: [ 1369 TextureSource::TextureCache( 1370 color0, 1371 Swizzle::default(), 1372 ), 1373 TextureSource::TextureCache( 1374 color1, 1375 Swizzle::default(), 1376 ), 1377 TextureSource::Invalid, 1378 ], 1379 }, 1380 clip_mask: clip_mask_texture_id, 1381 }, 1382 ); 1383 let src_uv_address = render_tasks[pic_task_id].get_texture_address(); 1384 let readback_uv_address = render_tasks[backdrop_id].get_texture_address(); 1385 let prim_header = PrimitiveHeader { 1386 user_data: [ 1387 mode as u32 as i32, 1388 readback_uv_address.as_int(), 1389 src_uv_address.as_int(), 1390 0, 1391 ], 1392 ..picture_prim_header 1393 }; 1394 let prim_header_index = prim_headers.push(&prim_header); 1395 1396 let instance = BrushInstance { 1397 segment_index: INVALID_SEGMENT_INDEX, 1398 edge_flags: EdgeAaSegmentMask::all(), 1399 clip_task_address, 1400 brush_flags, 1401 prim_header_index, 1402 resource_address: 0, 1403 }; 1404 1405 self.batcher.push_single_instance( 1406 batch_key, 1407 batch_features, 1408 bounding_rect, 1409 z_id, 1410 PrimitiveInstanceData::from(instance), 1411 ); 1412 1413 return; 1414 } 1415 PictureCompositeMode::Blit(_) => { 1416 match picture.context_3d { 1417 Picture3DContext::In { root_data: Some(_), .. } => { 1418 unreachable!("bug: should not have a raster_config"); 1419 } 1420 Picture3DContext::In { root_data: None, .. } => { 1421 // TODO(gw): Store this inside the split picture so that we 1422 // don't need to pass in extra_prim_gpu_address for 1423 // every prim instance. 1424 // TODO(gw): Ideally we'd skip adding 3d child prims to batches 1425 // without gpu cache address but it's currently 1426 // used by the prepare pass. Refactor this! 1427 let extra_prim_gpu_address = match extra_prim_gpu_address { 1428 Some(prim_address) => prim_address, 1429 None => return, 1430 }; 1431 1432 // Need a new z-id for each child preserve-3d context added 1433 // by this inner loop. 1434 let z_id = z_generator.next(); 1435 1436 let prim_header = PrimitiveHeader { 1437 z: z_id, 1438 transform_id: transforms 1439 .get_id( 1440 prim_spatial_node_index, 1441 root_spatial_node_index, 1442 ctx.spatial_tree, 1443 ), 1444 user_data: [ 1445 uv_rect_address.as_int(), 1446 BrushFlags::PERSPECTIVE_INTERPOLATION.bits() as i32, 1447 0, 1448 clip_task_address.0 as i32, 1449 ], 1450 ..picture_prim_header 1451 }; 1452 let prim_header_index = prim_headers.push(&prim_header); 1453 1454 let key = BatchKey::new( 1455 BatchKind::SplitComposite, 1456 BlendMode::PremultipliedAlpha, 1457 textures, 1458 ); 1459 1460 self.add_split_composite_instance_to_batches( 1461 key, 1462 BatchFeatures::CLIP_MASK, 1463 &prim_info.clip_chain.pic_coverage_rect, 1464 z_id, 1465 prim_header_index, 1466 extra_prim_gpu_address, 1467 ); 1468 1469 return; 1470 } 1471 Picture3DContext::Out { .. } => { 1472 let textures = TextureSet { 1473 colors: [ 1474 texture, 1475 TextureSource::Invalid, 1476 TextureSource::Invalid, 1477 ], 1478 }; 1479 let batch_params = BrushBatchParameters::shared( 1480 BrushBatchKind::Image(ImageBufferKind::Texture2D), 1481 textures, 1482 ImageBrushUserData { 1483 color_mode: ShaderColorMode::Image, 1484 alpha_type: AlphaType::PremultipliedAlpha, 1485 raster_space: RasterizationSpace::Screen, 1486 opacity: 1.0, 1487 }.encode(), 1488 uv_rect_address.as_int(), 1489 ); 1490 1491 let prim_header = PrimitiveHeader { 1492 specific_prim_address: prim_cache_address.as_int(), 1493 user_data: batch_params.prim_user_data, 1494 ..picture_prim_header 1495 }; 1496 let prim_header_index = prim_headers.push(&prim_header); 1497 1498 let (opacity, blend_mode) = if is_opaque { 1499 (PrimitiveOpacity::opaque(), BlendMode::None) 1500 } else { 1501 (PrimitiveOpacity::translucent(), BlendMode::PremultipliedAlpha) 1502 }; 1503 1504 self.add_segmented_prim_to_batch( 1505 None, 1506 opacity, 1507 &batch_params, 1508 blend_mode, 1509 batch_features, 1510 brush_flags, 1511 EdgeAaSegmentMask::all(), 1512 prim_header_index, 1513 bounding_rect, 1514 transform_kind, 1515 z_id, 1516 prim_info.clip_task_index, 1517 ctx, 1518 render_tasks, 1519 ); 1520 1521 return; 1522 } 1523 } 1524 } 1525 PictureCompositeMode::SVGFEGraph(..) => { 1526 let kind = BatchKind::Brush( 1527 BrushBatchKind::Image(ImageBufferKind::Texture2D) 1528 ); 1529 let key = BatchKey::new( 1530 kind, 1531 blend_mode, 1532 textures, 1533 ); 1534 1535 let prim_user_data = ImageBrushUserData { 1536 color_mode: ShaderColorMode::Image, 1537 alpha_type: AlphaType::PremultipliedAlpha, 1538 raster_space: RasterizationSpace::Screen, 1539 opacity: 1.0, 1540 }.encode(); 1541 1542 (key, prim_user_data, uv_rect_address.as_int()) 1543 } 1544 }; 1545 1546 let prim_header = PrimitiveHeader { 1547 z: z_id, 1548 user_data: prim_user_data, 1549 ..picture_prim_header 1550 }; 1551 let prim_header_index = prim_headers.push(&prim_header); 1552 1553 self.add_brush_instance_to_batches( 1554 key, 1555 batch_features, 1556 bounding_rect, 1557 z_id, 1558 INVALID_SEGMENT_INDEX, 1559 EdgeAaSegmentMask::all(), 1560 clip_task_address, 1561 brush_flags, 1562 prim_header_index, 1563 resource_address, 1564 ); 1565 } 1566 None => { 1567 unreachable!(); 1568 } 1569 } 1570 1571 return; 1572 } 1573 1574 let base_prim_header = PrimitiveHeader { 1575 local_rect: prim_rect, 1576 local_clip_rect: prim_info.clip_chain.local_clip_rect, 1577 transform_id, 1578 z: z_id, 1579 render_task_address: self.batcher.render_task_address, 1580 specific_prim_address: GpuBufferAddress::INVALID.as_int(), // Will be overridden by most uses 1581 user_data: [0; 4], // Will be overridden by most uses 1582 }; 1583 1584 let common_data = ctx.data_stores.as_common_data(prim_instance); 1585 1586 let needs_blending = !common_data.opacity.is_opaque || 1587 prim_info.clip_task_index != ClipTaskIndex::INVALID || 1588 transform_kind == TransformedRectKind::Complex || 1589 is_anti_aliased; 1590 1591 let blend_mode = if needs_blending { 1592 BlendMode::PremultipliedAlpha 1593 } else { 1594 BlendMode::None 1595 }; 1596 1597 let segment_instance_index = match prim_instance.kind { 1598 PrimitiveInstanceKind::Rectangle { segment_instance_index, .. } 1599 | PrimitiveInstanceKind::YuvImage { segment_instance_index, .. } => segment_instance_index, 1600 _ => SegmentInstanceIndex::UNUSED, 1601 }; 1602 1603 let (prim_cache_address, segments) = if segment_instance_index == SegmentInstanceIndex::UNUSED { 1604 (common_data.gpu_buffer_address, None) 1605 } else { 1606 let segment_instance = &ctx.scratch.segment_instances[segment_instance_index]; 1607 let segments = Some(&ctx.scratch.segments[segment_instance.segments_range]); 1608 (segment_instance.gpu_data, segments) 1609 }; 1610 1611 // The following primitives lower to the image brush shader in the same way. 1612 let img_brush_data = match prim_instance.kind { 1613 PrimitiveInstanceKind::CachedLinearGradient { data_handle, ref visible_tiles_range, .. } => { 1614 let prim_data = &ctx.data_stores.linear_grad[data_handle]; 1615 Some((prim_data.src_color, Some(visible_tiles_range), prim_data.brush_segments.as_slice())) 1616 } 1617 PrimitiveInstanceKind::RadialGradient { data_handle, ref visible_tiles_range, .. } => { 1618 let prim_data = &ctx.data_stores.radial_grad[data_handle]; 1619 Some((prim_data.src_color, Some(visible_tiles_range), prim_data.brush_segments.as_slice())) 1620 } 1621 PrimitiveInstanceKind::ConicGradient { data_handle, ref visible_tiles_range, .. } => { 1622 let prim_data = &ctx.data_stores.conic_grad[data_handle]; 1623 Some((prim_data.src_color, Some(visible_tiles_range), prim_data.brush_segments.as_slice())) 1624 } 1625 PrimitiveInstanceKind::ImageBorder { data_handle, .. } => { 1626 let prim_data = &ctx.data_stores.image_border[data_handle]; 1627 Some((prim_data.kind.src_color, None, prim_data.kind.brush_segments.as_slice())) 1628 } 1629 _ => None, 1630 }; 1631 1632 if let Some((src_color, visible_tiles_range, brush_segments)) = img_brush_data { 1633 let src_color = render_tasks.resolve_location(src_color); 1634 1635 let (uv_rect_address, texture_source) = match src_color { 1636 Some(src) => src, 1637 None => { 1638 return; 1639 } 1640 }; 1641 1642 let textures = TextureSet::prim_textured(texture_source); 1643 1644 let prim_user_data = ImageBrushUserData { 1645 color_mode: ShaderColorMode::Image, 1646 alpha_type: AlphaType::PremultipliedAlpha, 1647 raster_space: RasterizationSpace::Local, 1648 opacity: 1.0, 1649 }.encode(); 1650 1651 let prim_header = PrimitiveHeader { 1652 specific_prim_address: common_data.gpu_buffer_address.as_int(), 1653 user_data: prim_user_data, 1654 ..base_prim_header 1655 }; 1656 1657 let batch_kind = BrushBatchKind::Image(texture_source.image_buffer_kind()); 1658 1659 if visible_tiles_range.map_or(true, |r| r.is_empty()) { 1660 let batch_params = BrushBatchParameters::shared( 1661 batch_kind, 1662 textures, 1663 prim_user_data, 1664 uv_rect_address.as_int(), 1665 ); 1666 1667 let segments = if brush_segments.is_empty() { 1668 None 1669 } else { 1670 Some(&brush_segments[..]) 1671 }; 1672 1673 let prim_header_index = prim_headers.push(&prim_header); 1674 1675 self.add_segmented_prim_to_batch( 1676 segments, 1677 common_data.opacity, 1678 &batch_params, 1679 blend_mode, 1680 batch_features, 1681 brush_flags, 1682 common_data.edge_aa_mask, 1683 prim_header_index, 1684 bounding_rect, 1685 transform_kind, 1686 z_id, 1687 prim_info.clip_task_index, 1688 ctx, 1689 render_tasks, 1690 ); 1691 } else { 1692 let visible_tiles = &ctx.scratch.gradient_tiles[*visible_tiles_range.unwrap()]; 1693 1694 let (clip_task_address, clip_mask) = ctx.get_prim_clip_task_and_texture( 1695 prim_info.clip_task_index, 1696 render_tasks, 1697 ).unwrap(); 1698 1699 let batch_key = BatchKey { 1700 blend_mode, 1701 kind: BatchKind::Brush(batch_kind), 1702 textures: BatchTextures { 1703 input: textures, 1704 clip_mask, 1705 }, 1706 }; 1707 1708 for tile in visible_tiles { 1709 let tile_prim_header = PrimitiveHeader { 1710 local_rect: tile.local_rect, 1711 local_clip_rect: tile.local_clip_rect, 1712 ..prim_header 1713 }; 1714 let prim_header_index = prim_headers.push(&tile_prim_header); 1715 1716 self.add_brush_instance_to_batches( 1717 batch_key, 1718 batch_features, 1719 bounding_rect, 1720 z_id, 1721 INVALID_SEGMENT_INDEX, 1722 common_data.edge_aa_mask, 1723 clip_task_address, 1724 brush_flags | BrushFlags::PERSPECTIVE_INTERPOLATION, 1725 prim_header_index, 1726 uv_rect_address.as_int(), 1727 ); 1728 } 1729 } 1730 1731 return; 1732 } 1733 1734 match prim_instance.kind { 1735 // Handled above. 1736 PrimitiveInstanceKind::Picture { .. } => {} 1737 PrimitiveInstanceKind::CachedLinearGradient { .. } => { } 1738 PrimitiveInstanceKind::RadialGradient { .. } => { } 1739 PrimitiveInstanceKind::ConicGradient { .. } => { } 1740 PrimitiveInstanceKind::ImageBorder { .. } => {} 1741 PrimitiveInstanceKind::BoxShadow { .. } => { 1742 unreachable!("BUG: Should not hit box-shadow here as they are handled by quad infra"); 1743 } 1744 PrimitiveInstanceKind::NormalBorder { data_handle, ref render_task_ids, .. } => { 1745 let prim_data = &ctx.data_stores.normal_border[data_handle]; 1746 let task_ids = &ctx.scratch.border_cache_handles[*render_task_ids]; 1747 let mut segment_data: SmallVec<[SegmentInstanceData; 8]> = SmallVec::new(); 1748 1749 // Collect the segment instance data from each render 1750 // task for each valid edge / corner of the border. 1751 1752 for task_id in task_ids { 1753 if let Some((uv_rect_address, texture)) = render_tasks.resolve_location(*task_id) { 1754 segment_data.push( 1755 SegmentInstanceData { 1756 textures: TextureSet::prim_textured(texture), 1757 specific_resource_address: uv_rect_address.as_int(), 1758 } 1759 ); 1760 } 1761 } 1762 1763 // TODO: it would be less error-prone to get this info from the texture cache. 1764 let image_buffer_kind = ImageBufferKind::Texture2D; 1765 1766 let batch_params = BrushBatchParameters::instanced( 1767 BrushBatchKind::Image(image_buffer_kind), 1768 ImageBrushUserData { 1769 color_mode: ShaderColorMode::Image, 1770 alpha_type: AlphaType::PremultipliedAlpha, 1771 raster_space: RasterizationSpace::Local, 1772 opacity: 1.0, 1773 }.encode(), 1774 segment_data, 1775 ); 1776 1777 let prim_header = PrimitiveHeader { 1778 specific_prim_address: prim_cache_address.as_int(), 1779 user_data: batch_params.prim_user_data, 1780 ..base_prim_header 1781 }; 1782 let prim_header_index = prim_headers.push(&prim_header); 1783 1784 let border_data = &prim_data.kind; 1785 self.add_segmented_prim_to_batch( 1786 Some(border_data.brush_segments.as_slice()), 1787 common_data.opacity, 1788 &batch_params, 1789 blend_mode, 1790 batch_features, 1791 brush_flags, 1792 common_data.edge_aa_mask, 1793 prim_header_index, 1794 bounding_rect, 1795 transform_kind, 1796 z_id, 1797 prim_info.clip_task_index, 1798 ctx, 1799 render_tasks, 1800 ); 1801 } 1802 PrimitiveInstanceKind::TextRun { data_handle, run_index, .. } => { 1803 let run = &ctx.prim_store.text_runs[run_index]; 1804 let subpx_dir = run.used_font.get_subpx_dir(); 1805 1806 // The GPU cache data is stored in the template and reused across 1807 // frames and display lists. 1808 let prim_data = &ctx.data_stores.text_run[data_handle]; 1809 1810 // The local prim rect is only informative for text primitives, as 1811 // thus is not directly necessary for any drawing of the text run. 1812 // However the glyph offsets are relative to the prim rect origin 1813 // less the unsnapped reference frame offset. We also want the 1814 // the snapped reference frame offset, because cannot recalculate 1815 // it as it ignores the animated components for the transform. As 1816 // such, we adjust the prim rect origin here, and replace the size 1817 // with the unsnapped and snapped offsets respectively. This has 1818 // the added bonus of avoiding quantization effects when storing 1819 // floats in the extra header integers. 1820 let glyph_keys = &ctx.scratch.glyph_keys[run.glyph_keys_range]; 1821 let prim_header = PrimitiveHeader { 1822 local_rect: LayoutRect { 1823 min: prim_rect.min - run.reference_frame_relative_offset, 1824 max: run.snapped_reference_frame_relative_offset.to_point(), 1825 }, 1826 specific_prim_address: prim_cache_address.as_int(), 1827 user_data: [ 1828 (run.raster_scale * 65535.0).round() as i32, 1829 0, 1830 0, 1831 0, 1832 ], 1833 ..base_prim_header 1834 }; 1835 let prim_header_index = prim_headers.push(&prim_header); 1836 let base_instance = GlyphInstance::new( 1837 prim_header_index, 1838 ); 1839 let batcher = &mut self.batcher; 1840 1841 let (clip_task_address, clip_mask_texture_id) = ctx.get_prim_clip_task_and_texture( 1842 prim_info.clip_task_index, 1843 render_tasks, 1844 ).unwrap(); 1845 1846 // The run.used_font.clone() is here instead of instead of inline in the `fetch_glyph` 1847 // function call to work around a miscompilation. 1848 // https://github.com/rust-lang/rust/issues/80111 1849 let font = run.used_font.clone(); 1850 ctx.resource_cache.fetch_glyphs( 1851 font, 1852 &glyph_keys, 1853 &gpu_buffer_builder.f32, 1854 &mut self.glyph_fetch_buffer, 1855 |texture_id, glyph_format, glyphs| { 1856 debug_assert_ne!(texture_id, TextureSource::Invalid); 1857 1858 let subpx_dir = subpx_dir.limit_by(glyph_format); 1859 1860 let textures = BatchTextures::prim_textured( 1861 texture_id, 1862 clip_mask_texture_id, 1863 ); 1864 1865 let kind = BatchKind::TextRun(glyph_format); 1866 1867 let (blend_mode, color_mode) = match glyph_format { 1868 GlyphFormat::Subpixel | 1869 GlyphFormat::TransformedSubpixel => { 1870 debug_assert!(ctx.use_dual_source_blending); 1871 ( 1872 BlendMode::SubpixelDualSource, 1873 ShaderColorMode::SubpixelDualSource, 1874 ) 1875 } 1876 GlyphFormat::Alpha | 1877 GlyphFormat::TransformedAlpha | 1878 GlyphFormat::Bitmap => { 1879 ( 1880 BlendMode::PremultipliedAlpha, 1881 ShaderColorMode::Alpha, 1882 ) 1883 } 1884 GlyphFormat::ColorBitmap => { 1885 ( 1886 BlendMode::PremultipliedAlpha, 1887 if run.shadow { 1888 // Ignore color and only sample alpha when shadowing. 1889 ShaderColorMode::BitmapShadow 1890 } else { 1891 ShaderColorMode::ColorBitmap 1892 }, 1893 ) 1894 } 1895 }; 1896 1897 // Calculate a tighter bounding rect of just the glyphs passed to this 1898 // callback from request_glyphs(), rather than using the bounds of the 1899 // entire text run. This improves batching when glyphs are fragmented 1900 // over multiple textures in the texture cache. 1901 // This code is taken from the ps_text_run shader. 1902 let tight_bounding_rect = { 1903 let snap_bias = match subpx_dir { 1904 SubpixelDirection::None => DeviceVector2D::new(0.5, 0.5), 1905 SubpixelDirection::Horizontal => DeviceVector2D::new(0.125, 0.5), 1906 SubpixelDirection::Vertical => DeviceVector2D::new(0.5, 0.125), 1907 }; 1908 let text_offset = prim_header.local_rect.max.to_vector(); 1909 1910 let pic_bounding_rect = if run.used_font.flags.contains(FontInstanceFlags::TRANSFORM_GLYPHS) { 1911 let mut device_bounding_rect = DeviceRect::default(); 1912 1913 let glyph_transform = ctx.spatial_tree.get_relative_transform( 1914 prim_spatial_node_index, 1915 root_spatial_node_index, 1916 ).into_transform() 1917 .with_destination::<WorldPixel>() 1918 .then(&euclid::Transform3D::from_scale(ctx.global_device_pixel_scale)); 1919 1920 let glyph_translation = DeviceVector2D::new(glyph_transform.m41, glyph_transform.m42); 1921 1922 let mut use_tight_bounding_rect = true; 1923 for glyph in glyphs { 1924 let glyph_offset = prim_data.glyphs[glyph.index_in_text_run as usize].point + prim_header.local_rect.min.to_vector(); 1925 1926 let transformed_offset = match glyph_transform.transform_point2d(glyph_offset) { 1927 Some(transformed_offset) => transformed_offset, 1928 None => { 1929 use_tight_bounding_rect = false; 1930 break; 1931 } 1932 }; 1933 let raster_glyph_offset = (transformed_offset + snap_bias).floor(); 1934 let raster_text_offset = ( 1935 glyph_transform.transform_vector2d(text_offset) + 1936 glyph_translation + 1937 DeviceVector2D::new(0.5, 0.5) 1938 ).floor() - glyph_translation; 1939 1940 let device_glyph_rect = DeviceRect::from_origin_and_size( 1941 glyph.offset + raster_glyph_offset.to_vector() + raster_text_offset, 1942 glyph.size.to_f32(), 1943 ); 1944 1945 device_bounding_rect = device_bounding_rect.union(&device_glyph_rect); 1946 } 1947 1948 if use_tight_bounding_rect { 1949 let map_device_to_surface: SpaceMapper<PicturePixel, DevicePixel> = SpaceMapper::new_with_target( 1950 root_spatial_node_index, 1951 surface_spatial_node_index, 1952 device_bounding_rect, 1953 ctx.spatial_tree, 1954 ); 1955 1956 match map_device_to_surface.unmap(&device_bounding_rect) { 1957 Some(r) => r.intersection(bounding_rect), 1958 None => Some(*bounding_rect), 1959 } 1960 } else { 1961 Some(*bounding_rect) 1962 } 1963 } else { 1964 let mut local_bounding_rect = LayoutRect::default(); 1965 1966 let glyph_raster_scale = run.raster_scale * ctx.global_device_pixel_scale.get(); 1967 1968 for glyph in glyphs { 1969 let glyph_offset = prim_data.glyphs[glyph.index_in_text_run as usize].point + prim_header.local_rect.min.to_vector(); 1970 let glyph_scale = LayoutToDeviceScale::new(glyph_raster_scale / glyph.scale); 1971 let raster_glyph_offset = (glyph_offset * LayoutToDeviceScale::new(glyph_raster_scale) + snap_bias).floor() / glyph.scale; 1972 let local_glyph_rect = LayoutRect::from_origin_and_size( 1973 (glyph.offset + raster_glyph_offset.to_vector()) / glyph_scale + text_offset, 1974 glyph.size.to_f32() / glyph_scale, 1975 ); 1976 1977 local_bounding_rect = local_bounding_rect.union(&local_glyph_rect); 1978 } 1979 1980 let map_prim_to_surface: SpaceMapper<LayoutPixel, PicturePixel> = SpaceMapper::new_with_target( 1981 surface_spatial_node_index, 1982 prim_spatial_node_index, 1983 *bounding_rect, 1984 ctx.spatial_tree, 1985 ); 1986 map_prim_to_surface.map(&local_bounding_rect) 1987 }; 1988 1989 let intersected = match pic_bounding_rect { 1990 // The text run may have been clipped, for example if part of it is offscreen. 1991 // So intersect our result with the original bounding rect. 1992 Some(rect) => rect.intersection(bounding_rect).unwrap_or_else(PictureRect::zero), 1993 // If space mapping went off the rails, fall back to the old behavior. 1994 //TODO: consider skipping the glyph run completely in this case. 1995 None => *bounding_rect, 1996 }; 1997 1998 intersected 1999 }; 2000 2001 let key = BatchKey::new(kind, blend_mode, textures); 2002 2003 let batch = batcher.alpha_batch_list.set_params_and_get_batch( 2004 key, 2005 batch_features, 2006 &tight_bounding_rect, 2007 z_id, 2008 ); 2009 2010 batch.reserve(glyphs.len()); 2011 for glyph in glyphs { 2012 batch.push(base_instance.build( 2013 clip_task_address, 2014 subpx_dir, 2015 glyph.index_in_text_run, 2016 glyph.uv_rect_address, 2017 color_mode, 2018 glyph.subpx_offset_x, 2019 glyph.subpx_offset_y, 2020 glyph.is_packed_glyph, 2021 )); 2022 } 2023 }, 2024 ); 2025 } 2026 PrimitiveInstanceKind::LineDecoration { ref render_task, .. } => { 2027 let (clip_task_address, clip_mask_texture_id) = ctx.get_prim_clip_task_and_texture( 2028 prim_info.clip_task_index, 2029 render_tasks, 2030 ).unwrap(); 2031 2032 let (batch_kind, textures, prim_user_data, specific_resource_address) = match render_task { 2033 Some(task_id) => { 2034 let (uv_rect_address, texture) = render_tasks.resolve_location(*task_id).unwrap(); 2035 let textures = BatchTextures::prim_textured( 2036 texture, 2037 clip_mask_texture_id, 2038 ); 2039 ( 2040 BrushBatchKind::Image(texture.image_buffer_kind()), 2041 textures, 2042 ImageBrushUserData { 2043 color_mode: ShaderColorMode::Image, 2044 alpha_type: AlphaType::PremultipliedAlpha, 2045 raster_space: RasterizationSpace::Local, 2046 opacity: 1.0, 2047 }.encode(), 2048 uv_rect_address.as_int(), 2049 ) 2050 } 2051 None => { 2052 ( 2053 BrushBatchKind::Solid, 2054 BatchTextures::prim_untextured(clip_mask_texture_id), 2055 [get_shader_opacity(1.0), 0, 0, 0], 2056 0, 2057 ) 2058 } 2059 }; 2060 2061 let prim_header = PrimitiveHeader { 2062 specific_prim_address: prim_cache_address.as_int(), 2063 user_data: prim_user_data, 2064 ..base_prim_header 2065 }; 2066 let prim_header_index = prim_headers.push(&prim_header); 2067 2068 let batch_key = BatchKey { 2069 blend_mode, 2070 kind: BatchKind::Brush(batch_kind), 2071 textures, 2072 }; 2073 2074 self.add_brush_instance_to_batches( 2075 batch_key, 2076 batch_features, 2077 bounding_rect, 2078 z_id, 2079 INVALID_SEGMENT_INDEX, 2080 common_data.edge_aa_mask, 2081 clip_task_address, 2082 brush_flags | BrushFlags::PERSPECTIVE_INTERPOLATION, 2083 prim_header_index, 2084 specific_resource_address, 2085 ); 2086 } 2087 PrimitiveInstanceKind::Rectangle { use_legacy_path, .. } => { 2088 debug_assert!(use_legacy_path); 2089 let batch_params = BrushBatchParameters::shared( 2090 BrushBatchKind::Solid, 2091 TextureSet::UNTEXTURED, 2092 [get_shader_opacity(1.0), 0, 0, 0], 2093 0, 2094 ); 2095 2096 let prim_header = PrimitiveHeader { 2097 specific_prim_address: prim_cache_address.as_int(), 2098 user_data: batch_params.prim_user_data, 2099 ..base_prim_header 2100 }; 2101 let prim_header_index = prim_headers.push(&prim_header); 2102 2103 self.add_segmented_prim_to_batch( 2104 segments, 2105 common_data.opacity, 2106 &batch_params, 2107 blend_mode, 2108 batch_features, 2109 brush_flags, 2110 common_data.edge_aa_mask, 2111 prim_header_index, 2112 bounding_rect, 2113 transform_kind, 2114 z_id, 2115 prim_info.clip_task_index, 2116 ctx, 2117 render_tasks, 2118 ); 2119 } 2120 PrimitiveInstanceKind::YuvImage { data_handle, segment_instance_index, compositor_surface_kind, .. } => { 2121 if compositor_surface_kind.needs_cutout() { 2122 self.add_compositor_surface_cutout( 2123 prim_rect, 2124 prim_info.clip_chain.local_clip_rect, 2125 prim_info.clip_task_index, 2126 transform_id, 2127 z_id, 2128 bounding_rect, 2129 ctx, 2130 render_tasks, 2131 prim_headers, 2132 ); 2133 2134 return; 2135 } 2136 2137 let yuv_image_data = &ctx.data_stores.yuv_image[data_handle].kind; 2138 let mut textures = TextureSet::UNTEXTURED; 2139 let mut uv_rect_addresses = [0; 3]; 2140 2141 //yuv channel 2142 let channel_count = yuv_image_data.format.get_plane_num(); 2143 debug_assert!(channel_count <= 3); 2144 for channel in 0 .. channel_count { 2145 2146 let src_channel = render_tasks.resolve_location(yuv_image_data.src_yuv[channel]); 2147 2148 let (uv_rect_address, texture_source) = match src_channel { 2149 Some(src) => src, 2150 None => { 2151 warn!("Warnings: skip a PrimitiveKind::YuvImage"); 2152 return; 2153 } 2154 }; 2155 2156 textures.colors[channel] = texture_source; 2157 uv_rect_addresses[channel] = uv_rect_address.as_int(); 2158 } 2159 2160 // All yuv textures should be the same type. 2161 let buffer_kind = textures.colors[0].image_buffer_kind(); 2162 assert!( 2163 textures.colors[1 .. yuv_image_data.format.get_plane_num()] 2164 .iter() 2165 .all(|&tid| buffer_kind == tid.image_buffer_kind()) 2166 ); 2167 2168 let kind = BrushBatchKind::YuvImage( 2169 buffer_kind, 2170 yuv_image_data.format, 2171 yuv_image_data.color_depth, 2172 yuv_image_data.color_space, 2173 yuv_image_data.color_range, 2174 ); 2175 2176 let batch_params = BrushBatchParameters::shared( 2177 kind, 2178 textures, 2179 [ 2180 uv_rect_addresses[0], 2181 uv_rect_addresses[1], 2182 uv_rect_addresses[2], 2183 0, 2184 ], 2185 0, 2186 ); 2187 2188 debug_assert_ne!(segment_instance_index, SegmentInstanceIndex::INVALID); 2189 2190 let prim_header = PrimitiveHeader { 2191 specific_prim_address: prim_cache_address.as_int(), 2192 user_data: batch_params.prim_user_data, 2193 ..base_prim_header 2194 }; 2195 let prim_header_index = prim_headers.push(&prim_header); 2196 2197 self.add_segmented_prim_to_batch( 2198 segments, 2199 common_data.opacity, 2200 &batch_params, 2201 blend_mode, 2202 batch_features, 2203 brush_flags, 2204 common_data.edge_aa_mask, 2205 prim_header_index, 2206 bounding_rect, 2207 transform_kind, 2208 z_id, 2209 prim_info.clip_task_index, 2210 ctx, 2211 render_tasks, 2212 ); 2213 } 2214 PrimitiveInstanceKind::Image { data_handle, image_instance_index, compositor_surface_kind, .. } => { 2215 if compositor_surface_kind.needs_cutout() { 2216 self.add_compositor_surface_cutout( 2217 prim_rect, 2218 prim_info.clip_chain.local_clip_rect, 2219 prim_info.clip_task_index, 2220 transform_id, 2221 z_id, 2222 bounding_rect, 2223 ctx, 2224 render_tasks, 2225 prim_headers, 2226 ); 2227 2228 return; 2229 } 2230 2231 let image_data = &ctx.data_stores.image[data_handle].kind; 2232 let image_instance = &ctx.prim_store.images[image_instance_index]; 2233 let prim_user_data = ImageBrushUserData { 2234 color_mode: ShaderColorMode::Image, 2235 alpha_type: image_data.alpha_type, 2236 raster_space: RasterizationSpace::Local, 2237 opacity: 1.0, 2238 }.encode(); 2239 2240 let blend_mode = if needs_blending { 2241 match image_data.alpha_type { 2242 AlphaType::PremultipliedAlpha => BlendMode::PremultipliedAlpha, 2243 AlphaType::Alpha => BlendMode::Alpha, 2244 } 2245 } else { 2246 BlendMode::None 2247 }; 2248 2249 if image_instance.visible_tiles.is_empty() { 2250 if cfg!(debug_assertions) { 2251 match ctx.resource_cache.get_image_properties(image_data.key) { 2252 Some(ImageProperties { tiling: None, .. }) | None => (), 2253 other => panic!("Non-tiled image with no visible images detected! Properties {:?}", other), 2254 } 2255 } 2256 2257 let src_color = render_tasks.resolve_location(image_instance.src_color); 2258 2259 let (uv_rect_address, texture_source) = match src_color { 2260 Some(src) => src, 2261 None => { 2262 return; 2263 } 2264 }; 2265 2266 let batch_params = BrushBatchParameters::shared( 2267 BrushBatchKind::Image(texture_source.image_buffer_kind()), 2268 TextureSet::prim_textured(texture_source), 2269 prim_user_data, 2270 uv_rect_address.as_int(), 2271 ); 2272 2273 debug_assert_ne!(image_instance.segment_instance_index, SegmentInstanceIndex::INVALID); 2274 let (prim_cache_address, segments) = if image_instance.segment_instance_index == SegmentInstanceIndex::UNUSED { 2275 (prim_cache_address, None) 2276 } else { 2277 let segment_instance = &ctx.scratch.segment_instances[image_instance.segment_instance_index]; 2278 let segments = Some(&ctx.scratch.segments[segment_instance.segments_range]); 2279 (segment_instance.gpu_data, segments) 2280 }; 2281 2282 let local_rect = image_instance.adjustment.map_local_rect(&prim_rect); 2283 let local_clip_rect = image_instance.tight_local_clip_rect 2284 .intersection_unchecked(&local_rect); 2285 2286 let prim_header = PrimitiveHeader { 2287 local_rect, 2288 local_clip_rect, 2289 specific_prim_address: prim_cache_address.as_int(), 2290 user_data: batch_params.prim_user_data, 2291 ..base_prim_header 2292 }; 2293 2294 let prim_header_index = prim_headers.push(&prim_header); 2295 2296 let brush_flags = match image_instance.normalized_uvs { 2297 true => brush_flags | BrushFlags::NORMALIZED_UVS, 2298 false => brush_flags, 2299 }; 2300 2301 self.add_segmented_prim_to_batch( 2302 segments, 2303 common_data.opacity, 2304 &batch_params, 2305 blend_mode, 2306 batch_features, 2307 brush_flags, 2308 common_data.edge_aa_mask, 2309 prim_header_index, 2310 bounding_rect, 2311 transform_kind, 2312 z_id, 2313 prim_info.clip_task_index, 2314 ctx, 2315 render_tasks, 2316 ); 2317 } else { 2318 const VECS_PER_SPECIFIC_BRUSH: usize = 3; 2319 let max_tiles_per_header = (MAX_VERTEX_TEXTURE_WIDTH - VECS_PER_SPECIFIC_BRUSH) / VECS_PER_SEGMENT; 2320 2321 let (clip_task_address, clip_mask_texture_id) = ctx.get_prim_clip_task_and_texture( 2322 prim_info.clip_task_index, 2323 render_tasks, 2324 ).unwrap(); 2325 2326 // use temporary block storage since we don't know the number of visible tiles beforehand 2327 let mut gpu_blocks = Vec::<GpuBufferBlockF>::with_capacity(3 + max_tiles_per_header * 2); 2328 for chunk in image_instance.visible_tiles.chunks(max_tiles_per_header) { 2329 gpu_blocks.clear(); 2330 gpu_blocks.push(image_data.color.premultiplied().into()); //color 2331 gpu_blocks.push(PremultipliedColorF::WHITE.into()); //bg color 2332 gpu_blocks.push([-1.0, 0.0, 0.0, 0.0].into()); //stretch size 2333 // negative first value makes the shader code ignore it and use the local size instead 2334 for tile in chunk { 2335 let tile_rect = tile.local_rect.translate(-prim_rect.min.to_vector()); 2336 gpu_blocks.push(tile_rect.into()); 2337 gpu_blocks.push([0.0; 4].into()); 2338 } 2339 2340 let mut writer = gpu_buffer_builder.f32.write_blocks(gpu_blocks.len()); 2341 for block in &gpu_blocks { 2342 writer.push_one(*block); 2343 } 2344 let specific_prim_address = writer.finish(); 2345 2346 let prim_header = PrimitiveHeader { 2347 local_clip_rect: image_instance.tight_local_clip_rect, 2348 specific_prim_address: specific_prim_address.as_int(), 2349 user_data: prim_user_data, 2350 ..base_prim_header 2351 }; 2352 let prim_header_index = prim_headers.push(&prim_header); 2353 2354 for (i, tile) in chunk.iter().enumerate() { 2355 let (uv_rect_address, texture) = match render_tasks.resolve_location(tile.src_color) { 2356 Some(result) => result, 2357 None => { 2358 return; 2359 } 2360 }; 2361 2362 let textures = BatchTextures::prim_textured( 2363 texture, 2364 clip_mask_texture_id, 2365 ); 2366 2367 let batch_key = BatchKey { 2368 blend_mode, 2369 kind: BatchKind::Brush(BrushBatchKind::Image(texture.image_buffer_kind())), 2370 textures, 2371 }; 2372 2373 self.add_brush_instance_to_batches( 2374 batch_key, 2375 batch_features, 2376 bounding_rect, 2377 z_id, 2378 i as i32, 2379 tile.edge_flags, 2380 clip_task_address, 2381 brush_flags | BrushFlags::SEGMENT_RELATIVE | BrushFlags::PERSPECTIVE_INTERPOLATION, 2382 prim_header_index, 2383 uv_rect_address.as_int(), 2384 ); 2385 } 2386 } 2387 } 2388 } 2389 PrimitiveInstanceKind::LinearGradient { data_handle, ref visible_tiles_range, .. } => { 2390 let prim_data = &ctx.data_stores.linear_grad[data_handle]; 2391 2392 let user_data = [extra_prim_gpu_address.unwrap(), 0, 0, 0]; 2393 2394 if visible_tiles_range.is_empty() { 2395 let batch_params = BrushBatchParameters::shared( 2396 BrushBatchKind::LinearGradient, 2397 TextureSet::UNTEXTURED, 2398 user_data, 2399 0, 2400 ); 2401 2402 let prim_header = PrimitiveHeader { 2403 user_data: user_data, 2404 specific_prim_address: prim_data.gpu_buffer_address.as_int(), 2405 ..base_prim_header 2406 }; 2407 let prim_header_index = prim_headers.push(&prim_header); 2408 2409 let segments = if prim_data.brush_segments.is_empty() { 2410 None 2411 } else { 2412 Some(prim_data.brush_segments.as_slice()) 2413 }; 2414 self.add_segmented_prim_to_batch( 2415 segments, 2416 prim_data.opacity, 2417 &batch_params, 2418 blend_mode, 2419 batch_features, 2420 brush_flags, 2421 prim_data.edge_aa_mask, 2422 prim_header_index, 2423 bounding_rect, 2424 transform_kind, 2425 z_id, 2426 prim_info.clip_task_index, 2427 ctx, 2428 render_tasks, 2429 ); 2430 } else { 2431 let visible_tiles = &ctx.scratch.gradient_tiles[*visible_tiles_range]; 2432 2433 let (clip_task_address, clip_mask_texture_id) = ctx.get_prim_clip_task_and_texture( 2434 prim_info.clip_task_index, 2435 render_tasks, 2436 ).unwrap(); 2437 2438 let key = BatchKey { 2439 blend_mode, 2440 kind: BatchKind::Brush(BrushBatchKind::LinearGradient), 2441 textures: BatchTextures::prim_untextured(clip_mask_texture_id), 2442 }; 2443 2444 for tile in visible_tiles { 2445 let tile_prim_header = PrimitiveHeader { 2446 specific_prim_address: tile.address.as_int(), 2447 local_rect: tile.local_rect, 2448 local_clip_rect: tile.local_clip_rect, 2449 user_data: user_data, 2450 ..base_prim_header 2451 }; 2452 let prim_header_index = prim_headers.push(&tile_prim_header); 2453 2454 self.add_brush_instance_to_batches( 2455 key, 2456 batch_features, 2457 bounding_rect, 2458 z_id, 2459 INVALID_SEGMENT_INDEX, 2460 prim_data.edge_aa_mask, 2461 clip_task_address, 2462 brush_flags | BrushFlags::PERSPECTIVE_INTERPOLATION, 2463 prim_header_index, 2464 0, 2465 ); 2466 } 2467 } 2468 } 2469 PrimitiveInstanceKind::BackdropCapture { .. } => {} 2470 PrimitiveInstanceKind::BackdropRender { pic_index, .. } => { 2471 let blend_mode = BlendMode::PremultipliedAlpha; 2472 let pic_task_id = ctx.prim_store.pictures[pic_index.0].primary_render_task_id; 2473 2474 let (clip_task_address, clip_mask_texture_id) = ctx.get_prim_clip_task_and_texture( 2475 prim_info.clip_task_index, 2476 render_tasks, 2477 ).unwrap(); 2478 2479 let kind = BatchKind::Brush( 2480 BrushBatchKind::Image(ImageBufferKind::Texture2D) 2481 ); 2482 let (_, texture) = render_tasks.resolve_location(pic_task_id).unwrap(); 2483 let textures = BatchTextures::prim_textured( 2484 texture, 2485 clip_mask_texture_id, 2486 ); 2487 let key = BatchKey::new( 2488 kind, 2489 blend_mode, 2490 textures, 2491 ); 2492 2493 let prim_header = PrimitiveHeader { 2494 specific_prim_address: ctx.globals.default_image_data.as_int(), 2495 user_data: ImageBrushUserData { 2496 color_mode: ShaderColorMode::Image, 2497 alpha_type: AlphaType::PremultipliedAlpha, 2498 raster_space: RasterizationSpace::Screen, 2499 opacity: 1.0, 2500 }.encode(), 2501 ..base_prim_header 2502 }; 2503 let prim_header_index = prim_headers.push(&prim_header); 2504 2505 let pic_task = &render_tasks[pic_task_id.unwrap()]; 2506 let pic_info = match pic_task.kind { 2507 RenderTaskKind::Picture(ref info) => info, 2508 _ => panic!("bug: not a picture"), 2509 }; 2510 let target_rect = pic_task.get_target_rect(); 2511 2512 let backdrop_rect = DeviceRect::from_origin_and_size( 2513 pic_info.content_origin, 2514 target_rect.size().to_f32(), 2515 ); 2516 2517 let map_prim_to_backdrop = SpaceMapper::new_with_target( 2518 pic_info.surface_spatial_node_index, 2519 prim_spatial_node_index, 2520 WorldRect::max_rect(), 2521 ctx.spatial_tree, 2522 ); 2523 2524 let points = [ 2525 map_prim_to_backdrop.map_point(prim_rect.top_left()), 2526 map_prim_to_backdrop.map_point(prim_rect.top_right()), 2527 map_prim_to_backdrop.map_point(prim_rect.bottom_left()), 2528 map_prim_to_backdrop.map_point(prim_rect.bottom_right()), 2529 ]; 2530 2531 if points.iter().any(|p| p.is_none()) { 2532 return; 2533 } 2534 2535 let uvs = [ 2536 calculate_screen_uv(points[0].unwrap() * pic_info.device_pixel_scale, backdrop_rect), 2537 calculate_screen_uv(points[1].unwrap() * pic_info.device_pixel_scale, backdrop_rect), 2538 calculate_screen_uv(points[2].unwrap() * pic_info.device_pixel_scale, backdrop_rect), 2539 calculate_screen_uv(points[3].unwrap() * pic_info.device_pixel_scale, backdrop_rect), 2540 ]; 2541 2542 let source = ImageSource { 2543 p0: target_rect.min.to_f32(), 2544 p1: target_rect.max.to_f32(), 2545 user_data: [0.0; 4], 2546 uv_rect_kind: UvRectKind::Quad { 2547 top_left: uvs[0], 2548 top_right: uvs[1], 2549 bottom_left: uvs[2], 2550 bottom_right: uvs[3], 2551 }, 2552 }; 2553 2554 let uv_rect_handle = source.write_gpu_blocks(&mut gpu_buffer_builder.f32); 2555 let uv_rect_address = gpu_buffer_builder.f32.resolve_handle(uv_rect_handle); 2556 2557 self.add_brush_instance_to_batches( 2558 key, 2559 batch_features, 2560 bounding_rect, 2561 z_id, 2562 INVALID_SEGMENT_INDEX, 2563 EdgeAaSegmentMask::all(), 2564 clip_task_address, 2565 brush_flags, 2566 prim_header_index, 2567 uv_rect_address.as_int(), 2568 ); 2569 } 2570 } 2571 } 2572 2573 /// Draw a (potentially masked) alpha cutout so that a video underlay will be blended 2574 /// through by the compositor 2575 fn add_compositor_surface_cutout( 2576 &mut self, 2577 prim_rect: LayoutRect, 2578 local_clip_rect: LayoutRect, 2579 clip_task_index: ClipTaskIndex, 2580 transform_id: TransformPaletteId, 2581 z_id: ZBufferId, 2582 bounding_rect: &PictureRect, 2583 ctx: &RenderTargetContext, 2584 render_tasks: &RenderTaskGraph, 2585 prim_headers: &mut PrimitiveHeaders, 2586 ) { 2587 let (clip_task_address, clip_mask_texture_id) = ctx.get_prim_clip_task_and_texture( 2588 clip_task_index, 2589 render_tasks, 2590 ).unwrap(); 2591 2592 let prim_header = PrimitiveHeader { 2593 local_rect: prim_rect, 2594 local_clip_rect, 2595 specific_prim_address: ctx.globals.default_black_rect_address.as_int(), 2596 transform_id, 2597 z: z_id, 2598 render_task_address: self.batcher.render_task_address, 2599 user_data: [get_shader_opacity(1.0), 0, 0, 0], 2600 }; 2601 let prim_header_index = prim_headers.push(&prim_header); 2602 2603 let batch_key = BatchKey { 2604 blend_mode: BlendMode::PremultipliedDestOut, 2605 kind: BatchKind::Brush(BrushBatchKind::Solid), 2606 textures: BatchTextures::prim_untextured(clip_mask_texture_id), 2607 }; 2608 2609 self.add_brush_instance_to_batches( 2610 batch_key, 2611 BatchFeatures::ALPHA_PASS | BatchFeatures::CLIP_MASK, 2612 bounding_rect, 2613 z_id, 2614 INVALID_SEGMENT_INDEX, 2615 EdgeAaSegmentMask::empty(), 2616 clip_task_address, 2617 BrushFlags::empty(), 2618 prim_header_index, 2619 0, 2620 ); 2621 } 2622 2623 /// Add a single segment instance to a batch. 2624 /// 2625 /// `edge_aa_mask` Specifies the edges that are *allowed* to have anti-aliasing, if and only 2626 /// if the segments enable it. 2627 /// In other words passing EdgeAaSegmentFlags::all() does not necessarily mean all edges will 2628 /// be anti-aliased, only that they could be. 2629 fn add_segment_to_batch( 2630 &mut self, 2631 segment: &BrushSegment, 2632 segment_data: &SegmentInstanceData, 2633 segment_index: i32, 2634 batch_kind: BrushBatchKind, 2635 prim_header_index: PrimitiveHeaderIndex, 2636 alpha_blend_mode: BlendMode, 2637 features: BatchFeatures, 2638 brush_flags: BrushFlags, 2639 edge_aa_mask: EdgeAaSegmentMask, 2640 bounding_rect: &PictureRect, 2641 transform_kind: TransformedRectKind, 2642 z_id: ZBufferId, 2643 prim_opacity: PrimitiveOpacity, 2644 clip_task_index: ClipTaskIndex, 2645 ctx: &RenderTargetContext, 2646 render_tasks: &RenderTaskGraph, 2647 ) { 2648 debug_assert!(clip_task_index != ClipTaskIndex::INVALID); 2649 2650 // Get GPU address of clip task for this segment, or None if 2651 // the entire segment is clipped out. 2652 if let Some((clip_task_address, clip_mask)) = ctx.get_clip_task_and_texture( 2653 clip_task_index, 2654 segment_index, 2655 render_tasks, 2656 ) { 2657 // If a got a valid (or OPAQUE) clip task address, add the segment. 2658 let is_inner = segment.edge_flags.is_empty(); 2659 let needs_blending = !prim_opacity.is_opaque || 2660 clip_task_address != OPAQUE_TASK_ADDRESS || 2661 (!is_inner && transform_kind == TransformedRectKind::Complex) || 2662 brush_flags.contains(BrushFlags::FORCE_AA); 2663 2664 let textures = BatchTextures { 2665 input: segment_data.textures, 2666 clip_mask, 2667 }; 2668 2669 let batch_key = BatchKey { 2670 blend_mode: if needs_blending { alpha_blend_mode } else { BlendMode::None }, 2671 kind: BatchKind::Brush(batch_kind), 2672 textures, 2673 }; 2674 2675 self.add_brush_instance_to_batches( 2676 batch_key, 2677 features, 2678 bounding_rect, 2679 z_id, 2680 segment_index, 2681 segment.edge_flags & edge_aa_mask, 2682 clip_task_address, 2683 brush_flags | BrushFlags::PERSPECTIVE_INTERPOLATION | segment.brush_flags, 2684 prim_header_index, 2685 segment_data.specific_resource_address, 2686 ); 2687 } 2688 } 2689 2690 /// Add any segment(s) from a brush to batches. 2691 /// 2692 /// `edge_aa_mask` Specifies the edges that are *allowed* to have anti-aliasing, if and only 2693 /// if the segments enable it. 2694 /// In other words passing EdgeAaSegmentFlags::all() does not necessarily mean all edges will 2695 /// be anti-aliased, only that they could be. 2696 fn add_segmented_prim_to_batch( 2697 &mut self, 2698 brush_segments: Option<&[BrushSegment]>, 2699 prim_opacity: PrimitiveOpacity, 2700 params: &BrushBatchParameters, 2701 blend_mode: BlendMode, 2702 features: BatchFeatures, 2703 brush_flags: BrushFlags, 2704 edge_aa_mask: EdgeAaSegmentMask, 2705 prim_header_index: PrimitiveHeaderIndex, 2706 bounding_rect: &PictureRect, 2707 transform_kind: TransformedRectKind, 2708 z_id: ZBufferId, 2709 clip_task_index: ClipTaskIndex, 2710 ctx: &RenderTargetContext, 2711 render_tasks: &RenderTaskGraph, 2712 ) { 2713 match (brush_segments, ¶ms.segment_data) { 2714 (Some(ref brush_segments), SegmentDataKind::Instanced(ref segment_data)) => { 2715 // In this case, we have both a list of segments, and a list of 2716 // per-segment instance data. Zip them together to build batches. 2717 debug_assert_eq!(brush_segments.len(), segment_data.len()); 2718 for (segment_index, (segment, segment_data)) in brush_segments 2719 .iter() 2720 .zip(segment_data.iter()) 2721 .enumerate() 2722 { 2723 self.add_segment_to_batch( 2724 segment, 2725 segment_data, 2726 segment_index as i32, 2727 params.batch_kind, 2728 prim_header_index, 2729 blend_mode, 2730 features, 2731 brush_flags, 2732 edge_aa_mask, 2733 bounding_rect, 2734 transform_kind, 2735 z_id, 2736 prim_opacity, 2737 clip_task_index, 2738 ctx, 2739 render_tasks, 2740 ); 2741 } 2742 } 2743 (Some(ref brush_segments), SegmentDataKind::Shared(ref segment_data)) => { 2744 // A list of segments, but the per-segment data is common 2745 // between all segments. 2746 for (segment_index, segment) in brush_segments 2747 .iter() 2748 .enumerate() 2749 { 2750 self.add_segment_to_batch( 2751 segment, 2752 segment_data, 2753 segment_index as i32, 2754 params.batch_kind, 2755 prim_header_index, 2756 blend_mode, 2757 features, 2758 brush_flags, 2759 edge_aa_mask, 2760 bounding_rect, 2761 transform_kind, 2762 z_id, 2763 prim_opacity, 2764 clip_task_index, 2765 ctx, 2766 render_tasks, 2767 ); 2768 } 2769 } 2770 (None, SegmentDataKind::Shared(ref segment_data)) => { 2771 // No segments, and thus no per-segment instance data. 2772 // Note: the blend mode already takes opacity into account 2773 2774 let (clip_task_address, clip_mask) = ctx.get_prim_clip_task_and_texture( 2775 clip_task_index, 2776 render_tasks, 2777 ).unwrap(); 2778 2779 let textures = BatchTextures { 2780 input: segment_data.textures, 2781 clip_mask, 2782 }; 2783 2784 let batch_key = BatchKey { 2785 blend_mode, 2786 kind: BatchKind::Brush(params.batch_kind), 2787 textures, 2788 }; 2789 2790 self.add_brush_instance_to_batches( 2791 batch_key, 2792 features, 2793 bounding_rect, 2794 z_id, 2795 INVALID_SEGMENT_INDEX, 2796 edge_aa_mask, 2797 clip_task_address, 2798 brush_flags | BrushFlags::PERSPECTIVE_INTERPOLATION, 2799 prim_header_index, 2800 segment_data.specific_resource_address, 2801 ); 2802 } 2803 (None, SegmentDataKind::Instanced(..)) => { 2804 // We should never hit the case where there are no segments, 2805 // but a list of segment instance data. 2806 unreachable!(); 2807 } 2808 } 2809 } 2810 } 2811 2812 /// Either a single texture / user data for all segments, 2813 /// or a list of one per segment. 2814 enum SegmentDataKind { 2815 Shared(SegmentInstanceData), 2816 Instanced(SmallVec<[SegmentInstanceData; 8]>), 2817 } 2818 2819 /// The parameters that are specific to a kind of brush, 2820 /// used by the common method to add a brush to batches. 2821 struct BrushBatchParameters { 2822 batch_kind: BrushBatchKind, 2823 prim_user_data: [i32; 4], 2824 segment_data: SegmentDataKind, 2825 } 2826 2827 impl BrushBatchParameters { 2828 /// This brush instance has a list of per-segment 2829 /// instance data. 2830 fn instanced( 2831 batch_kind: BrushBatchKind, 2832 prim_user_data: [i32; 4], 2833 segment_data: SmallVec<[SegmentInstanceData; 8]>, 2834 ) -> Self { 2835 BrushBatchParameters { 2836 batch_kind, 2837 prim_user_data, 2838 segment_data: SegmentDataKind::Instanced(segment_data), 2839 } 2840 } 2841 2842 /// This brush instance shares the per-segment data 2843 /// across all segments. 2844 fn shared( 2845 batch_kind: BrushBatchKind, 2846 textures: TextureSet, 2847 prim_user_data: [i32; 4], 2848 specific_resource_address: i32, 2849 ) -> Self { 2850 BrushBatchParameters { 2851 batch_kind, 2852 prim_user_data, 2853 segment_data: SegmentDataKind::Shared( 2854 SegmentInstanceData { 2855 textures, 2856 specific_resource_address, 2857 } 2858 ), 2859 } 2860 } 2861 } 2862 2863 /// A list of clip instances to be drawn into a target. 2864 #[cfg_attr(feature = "capture", derive(Serialize))] 2865 #[cfg_attr(feature = "replay", derive(Deserialize))] 2866 pub struct ClipMaskInstanceList { 2867 pub mask_instances_fast: FrameVec<MaskInstance>, 2868 pub mask_instances_slow: FrameVec<MaskInstance>, 2869 2870 pub mask_instances_fast_with_scissor: FastHashMap<DeviceIntRect, FrameVec<MaskInstance>>, 2871 pub mask_instances_slow_with_scissor: FastHashMap<DeviceIntRect, FrameVec<MaskInstance>>, 2872 2873 pub image_mask_instances: FastHashMap<TextureSource, FrameVec<PrimitiveInstanceData>>, 2874 pub image_mask_instances_with_scissor: FastHashMap<(DeviceIntRect, TextureSource), FrameVec<PrimitiveInstanceData>>, 2875 } 2876 2877 impl ClipMaskInstanceList { 2878 pub fn new(memory: &FrameMemory) -> Self { 2879 ClipMaskInstanceList { 2880 mask_instances_fast: memory.new_vec(), 2881 mask_instances_slow: memory.new_vec(), 2882 mask_instances_fast_with_scissor: FastHashMap::default(), 2883 mask_instances_slow_with_scissor: FastHashMap::default(), 2884 image_mask_instances: FastHashMap::default(), 2885 image_mask_instances_with_scissor: FastHashMap::default(), 2886 } 2887 } 2888 2889 pub fn is_empty(&self) -> bool { 2890 // Destructure self to make sure we don't forget to update this method if 2891 // a new member is added. 2892 let ClipMaskInstanceList { 2893 mask_instances_fast, 2894 mask_instances_slow, 2895 mask_instances_fast_with_scissor, 2896 mask_instances_slow_with_scissor, 2897 image_mask_instances, 2898 image_mask_instances_with_scissor, 2899 } = self; 2900 2901 mask_instances_fast.is_empty() 2902 && mask_instances_slow.is_empty() 2903 && mask_instances_fast_with_scissor.is_empty() 2904 && mask_instances_slow_with_scissor.is_empty() 2905 && image_mask_instances.is_empty() 2906 && image_mask_instances_with_scissor.is_empty() 2907 } 2908 } 2909 2910 /// A list of clip instances to be drawn into a target. 2911 #[derive(Debug)] 2912 #[cfg_attr(feature = "capture", derive(Serialize))] 2913 #[cfg_attr(feature = "replay", derive(Deserialize))] 2914 pub struct ClipBatchList { 2915 /// Rectangle draws fill up the rectangles with rounded corners. 2916 pub slow_rectangles: FrameVec<ClipMaskInstanceRect>, 2917 pub fast_rectangles: FrameVec<ClipMaskInstanceRect>, 2918 pub box_shadows: FastHashMap<TextureSource, FrameVec<ClipMaskInstanceBoxShadow>>, 2919 } 2920 2921 impl ClipBatchList { 2922 fn new(memory: &FrameMemory) -> Self { 2923 ClipBatchList { 2924 slow_rectangles: memory.new_vec(), 2925 fast_rectangles: memory.new_vec(), 2926 box_shadows: FastHashMap::default(), 2927 } 2928 } 2929 2930 pub fn is_empty(&self) -> bool { 2931 self.slow_rectangles.is_empty() 2932 && self.fast_rectangles.is_empty() 2933 && self.box_shadows.is_empty() 2934 } 2935 } 2936 2937 /// Batcher managing draw calls into the clip mask (in the RT cache). 2938 #[derive(Debug)] 2939 #[cfg_attr(feature = "capture", derive(Serialize))] 2940 #[cfg_attr(feature = "replay", derive(Deserialize))] 2941 pub struct ClipBatcher { 2942 /// The first clip in each clip task. This will overwrite all pixels 2943 /// in the clip region, so we can skip doing a clear and write with 2944 /// blending disabled, which is a big performance win on Intel GPUs. 2945 pub primary_clips: ClipBatchList, 2946 /// Any subsequent clip masks (rare) for a clip task get drawn in 2947 /// a second pass with multiplicative blending enabled. 2948 pub secondary_clips: ClipBatchList, 2949 2950 gpu_supports_fast_clears: bool, 2951 } 2952 2953 impl ClipBatcher { 2954 pub fn new( 2955 gpu_supports_fast_clears: bool, 2956 memory: &FrameMemory, 2957 ) -> Self { 2958 ClipBatcher { 2959 primary_clips: ClipBatchList::new(memory), 2960 secondary_clips: ClipBatchList::new(memory), 2961 gpu_supports_fast_clears, 2962 } 2963 } 2964 2965 pub fn add_clip_region( 2966 &mut self, 2967 local_pos: LayoutPoint, 2968 sub_rect: DeviceRect, 2969 clip_data: ClipData, 2970 task_origin: DevicePoint, 2971 screen_origin: DevicePoint, 2972 device_pixel_scale: f32, 2973 ) { 2974 let instance = ClipMaskInstanceRect { 2975 common: ClipMaskInstanceCommon { 2976 clip_transform_id: TransformPaletteId::IDENTITY, 2977 prim_transform_id: TransformPaletteId::IDENTITY, 2978 sub_rect, 2979 task_origin, 2980 screen_origin, 2981 device_pixel_scale, 2982 }, 2983 local_pos, 2984 clip_data, 2985 }; 2986 2987 self.primary_clips.slow_rectangles.push(instance); 2988 } 2989 2990 /// Where appropriate, draw a clip rectangle as a small series of tiles, 2991 /// instead of one large rectangle. 2992 fn add_tiled_clip_mask( 2993 &mut self, 2994 mask_screen_rect: DeviceRect, 2995 local_clip_rect: LayoutRect, 2996 clip_spatial_node_index: SpatialNodeIndex, 2997 spatial_tree: &SpatialTree, 2998 world_rect: &WorldRect, 2999 global_device_pixel_scale: DevicePixelScale, 3000 common: &ClipMaskInstanceCommon, 3001 is_first_clip: bool, 3002 ) -> bool { 3003 // Only try to draw in tiles if the clip mark is big enough. 3004 if mask_screen_rect.area() < CLIP_RECTANGLE_AREA_THRESHOLD { 3005 return false; 3006 } 3007 3008 let mask_screen_rect_size = mask_screen_rect.size().to_i32(); 3009 let clip_spatial_node = spatial_tree.get_spatial_node(clip_spatial_node_index); 3010 3011 // Only support clips that are axis-aligned to the root coordinate space, 3012 // for now, to simplify the logic below. This handles the vast majority 3013 // of real world cases, but could be expanded in future if needed. 3014 if clip_spatial_node.coordinate_system_id != CoordinateSystemId::root() { 3015 return false; 3016 } 3017 3018 // Get the world rect of the clip rectangle. If we can't transform it due 3019 // to the matrix, just fall back to drawing the entire clip mask. 3020 let transform = spatial_tree.get_world_transform( 3021 clip_spatial_node_index, 3022 ); 3023 let world_clip_rect = match project_rect( 3024 &transform.into_transform(), 3025 &local_clip_rect, 3026 &world_rect, 3027 ) { 3028 Some(rect) => rect, 3029 None => return false, 3030 }; 3031 3032 // Work out how many tiles to draw this clip mask in, stretched across the 3033 // device rect of the primitive clip mask. 3034 let world_device_rect = world_clip_rect * global_device_pixel_scale; 3035 let x_tiles = (mask_screen_rect_size.width + CLIP_RECTANGLE_TILE_SIZE-1) / CLIP_RECTANGLE_TILE_SIZE; 3036 let y_tiles = (mask_screen_rect_size.height + CLIP_RECTANGLE_TILE_SIZE-1) / CLIP_RECTANGLE_TILE_SIZE; 3037 3038 // Because we only run this code path for axis-aligned rects (the root coord system check above), 3039 // and only for rectangles (not rounded etc), the world_device_rect is not conservative - we know 3040 // that there is no inner_rect, and the world_device_rect should be the real, axis-aligned clip rect. 3041 let mask_origin = mask_screen_rect.min.to_vector(); 3042 let clip_list = self.get_batch_list(is_first_clip); 3043 3044 for y in 0 .. y_tiles { 3045 for x in 0 .. x_tiles { 3046 let p0 = DeviceIntPoint::new( 3047 x * CLIP_RECTANGLE_TILE_SIZE, 3048 y * CLIP_RECTANGLE_TILE_SIZE, 3049 ); 3050 let p1 = DeviceIntPoint::new( 3051 (p0.x + CLIP_RECTANGLE_TILE_SIZE).min(mask_screen_rect_size.width), 3052 (p0.y + CLIP_RECTANGLE_TILE_SIZE).min(mask_screen_rect_size.height), 3053 ); 3054 let normalized_sub_rect = DeviceIntRect { 3055 min: p0, 3056 max: p1, 3057 }.to_f32(); 3058 let world_sub_rect = normalized_sub_rect.translate(mask_origin); 3059 3060 // If the clip rect completely contains this tile rect, then drawing 3061 // these pixels would be redundant - since this clip can't possibly 3062 // affect the pixels in this tile, skip them! 3063 if !world_device_rect.contains_box(&world_sub_rect) { 3064 clip_list.slow_rectangles.push(ClipMaskInstanceRect { 3065 common: ClipMaskInstanceCommon { 3066 sub_rect: normalized_sub_rect, 3067 ..*common 3068 }, 3069 local_pos: local_clip_rect.min, 3070 clip_data: ClipData::uniform(local_clip_rect.size(), 0.0, ClipMode::Clip), 3071 }); 3072 } 3073 } 3074 } 3075 3076 true 3077 } 3078 3079 /// Retrieve the correct clip batch list to append to, depending 3080 /// on whether this is the first clip mask for a clip task. 3081 fn get_batch_list( 3082 &mut self, 3083 is_first_clip: bool, 3084 ) -> &mut ClipBatchList { 3085 if is_first_clip && !self.gpu_supports_fast_clears { 3086 &mut self.primary_clips 3087 } else { 3088 &mut self.secondary_clips 3089 } 3090 } 3091 3092 pub fn add( 3093 &mut self, 3094 clip_node_range: ClipNodeRange, 3095 root_spatial_node_index: SpatialNodeIndex, 3096 render_tasks: &RenderTaskGraph, 3097 clip_store: &ClipStore, 3098 transforms: &mut TransformPalette, 3099 actual_rect: DeviceRect, 3100 surface_device_pixel_scale: DevicePixelScale, 3101 task_origin: DevicePoint, 3102 screen_origin: DevicePoint, 3103 ctx: &RenderTargetContext, 3104 ) -> bool { 3105 let mut is_first_clip = true; 3106 let mut clear_to_one = false; 3107 3108 for i in 0 .. clip_node_range.count { 3109 let clip_instance = clip_store.get_instance_from_range(&clip_node_range, i); 3110 let clip_node = &ctx.data_stores.clip[clip_instance.handle]; 3111 3112 let clip_transform_id = transforms.get_id( 3113 clip_node.item.spatial_node_index, 3114 ctx.root_spatial_node_index, 3115 ctx.spatial_tree, 3116 ); 3117 3118 let prim_transform_id = transforms.get_id( 3119 root_spatial_node_index, 3120 ctx.root_spatial_node_index, 3121 ctx.spatial_tree, 3122 ); 3123 3124 let common = ClipMaskInstanceCommon { 3125 sub_rect: DeviceRect::from_size(actual_rect.size()), 3126 task_origin, 3127 screen_origin, 3128 device_pixel_scale: surface_device_pixel_scale.0, 3129 clip_transform_id, 3130 prim_transform_id, 3131 }; 3132 3133 let added_clip = match clip_node.item.kind { 3134 ClipItemKind::Image { .. } => { 3135 unreachable!(); 3136 } 3137 ClipItemKind::BoxShadow { ref source } => { 3138 let task_id = source 3139 .render_task 3140 .expect("bug: render task handle not allocated"); 3141 let (uv_rect_address, texture) = render_tasks.resolve_location(task_id).unwrap(); 3142 3143 self.get_batch_list(is_first_clip) 3144 .box_shadows 3145 .entry(texture) 3146 .or_insert_with(|| ctx.frame_memory.new_vec()) 3147 .push(ClipMaskInstanceBoxShadow { 3148 common, 3149 resource_address: uv_rect_address.as_int(), 3150 shadow_data: BoxShadowData { 3151 src_rect_size: source.original_alloc_size, 3152 clip_mode: source.clip_mode as i32, 3153 stretch_mode_x: source.stretch_mode_x as i32, 3154 stretch_mode_y: source.stretch_mode_y as i32, 3155 dest_rect: source.prim_shadow_rect, 3156 }, 3157 }); 3158 3159 true 3160 } 3161 ClipItemKind::Rectangle { rect, mode: ClipMode::ClipOut } => { 3162 self.get_batch_list(is_first_clip) 3163 .slow_rectangles 3164 .push(ClipMaskInstanceRect { 3165 common, 3166 local_pos: rect.min, 3167 clip_data: ClipData::uniform(rect.size(), 0.0, ClipMode::ClipOut), 3168 }); 3169 3170 true 3171 } 3172 ClipItemKind::Rectangle { rect, mode: ClipMode::Clip } => { 3173 if clip_instance.flags.contains(ClipNodeFlags::SAME_COORD_SYSTEM) { 3174 false 3175 } else { 3176 if self.add_tiled_clip_mask( 3177 actual_rect, 3178 rect, 3179 clip_node.item.spatial_node_index, 3180 ctx.spatial_tree, 3181 &ctx.screen_world_rect, 3182 ctx.global_device_pixel_scale, 3183 &common, 3184 is_first_clip, 3185 ) { 3186 clear_to_one |= is_first_clip; 3187 } else { 3188 self.get_batch_list(is_first_clip) 3189 .slow_rectangles 3190 .push(ClipMaskInstanceRect { 3191 common, 3192 local_pos: rect.min, 3193 clip_data: ClipData::uniform(rect.size(), 0.0, ClipMode::Clip), 3194 }); 3195 } 3196 3197 true 3198 } 3199 } 3200 ClipItemKind::RoundedRectangle { rect, ref radius, mode, .. } => { 3201 let batch_list = self.get_batch_list(is_first_clip); 3202 let instance = ClipMaskInstanceRect { 3203 common, 3204 local_pos: rect.min, 3205 clip_data: ClipData::rounded_rect(rect.size(), radius, mode), 3206 }; 3207 if clip_instance.flags.contains(ClipNodeFlags::USE_FAST_PATH) { 3208 batch_list.fast_rectangles.push(instance); 3209 } else { 3210 batch_list.slow_rectangles.push(instance); 3211 } 3212 3213 true 3214 } 3215 }; 3216 3217 is_first_clip &= !added_clip; 3218 } 3219 3220 clear_to_one 3221 } 3222 } 3223 3224 impl<'a, 'rc> RenderTargetContext<'a, 'rc> { 3225 /// Retrieve the GPU task address for a given clip task instance. 3226 /// Returns None if the segment was completely clipped out. 3227 /// Returns Some(OPAQUE_TASK_ADDRESS) if no clip mask is needed. 3228 /// Returns Some(task_address) if there was a valid clip mask. 3229 fn get_clip_task_and_texture( 3230 &self, 3231 clip_task_index: ClipTaskIndex, 3232 offset: i32, 3233 render_tasks: &RenderTaskGraph, 3234 ) -> Option<(RenderTaskAddress, TextureSource)> { 3235 match self.scratch.clip_mask_instances[clip_task_index.0 as usize + offset as usize] { 3236 ClipMaskKind::Mask(task_id) => { 3237 Some(( 3238 task_id.into(), 3239 TextureSource::TextureCache( 3240 render_tasks[task_id].get_target_texture(), 3241 Swizzle::default(), 3242 ) 3243 )) 3244 } 3245 ClipMaskKind::None => { 3246 Some((OPAQUE_TASK_ADDRESS, TextureSource::Invalid)) 3247 } 3248 ClipMaskKind::Clipped => { 3249 None 3250 } 3251 } 3252 } 3253 3254 /// Helper function to get the clip task address for a 3255 /// non-segmented primitive. 3256 fn get_prim_clip_task_and_texture( 3257 &self, 3258 clip_task_index: ClipTaskIndex, 3259 render_tasks: &RenderTaskGraph, 3260 ) -> Option<(RenderTaskAddress, TextureSource)> { 3261 self.get_clip_task_and_texture( 3262 clip_task_index, 3263 0, 3264 render_tasks, 3265 ) 3266 } 3267 } 3268 3269 impl CompositorSurfaceKind { 3270 /// Returns true if the type of compositor surface needs an alpha cutout rendered 3271 fn needs_cutout(&self) -> bool { 3272 match self { 3273 CompositorSurfaceKind::Underlay => true, 3274 CompositorSurfaceKind::Overlay | CompositorSurfaceKind::Blit => false, 3275 } 3276 } 3277 }