tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

prepare.rs (78700B)


      1 /* This Source Code Form is subject to the terms of the Mozilla Public
      2 * License, v. 2.0. If a copy of the MPL was not distributed with this
      3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      4 
      5 //! # Prepare pass
      6 //!
      7 //! TODO: document this!
      8 
      9 use api::{ColorF, DebugFlags};
     10 use api::{BoxShadowClipMode, BorderStyle, ClipMode};
     11 use api::units::*;
     12 use euclid::Scale;
     13 use smallvec::SmallVec;
     14 use crate::composite::CompositorSurfaceKind;
     15 use crate::command_buffer::{CommandBufferIndex, PrimitiveCommand};
     16 use crate::image_tiling::{self, Repetition};
     17 use crate::border::{get_max_scale_for_border, build_border_instances};
     18 use crate::clip::{ClipStore, ClipNodeRange};
     19 use crate::pattern::Pattern;
     20 use crate::renderer::{GpuBufferAddress, GpuBufferBuilderF, GpuBufferWriterF, GpuBufferDataF};
     21 use crate::spatial_tree::{SpatialNodeIndex, SpatialTree};
     22 use crate::clip::{ClipDataStore, ClipNodeFlags, ClipChainInstance, ClipItemKind};
     23 use crate::frame_builder::{FrameBuildingContext, FrameBuildingState, PictureContext, PictureState};
     24 use crate::gpu_types::{BrushFlags, LinearGradientBrushData};
     25 use crate::internal_types::{FastHashMap, PlaneSplitAnchor, Filter};
     26 use crate::picture::{ClusterFlags, PictureCompositeMode, PicturePrimitive};
     27 use crate::picture::{PrimitiveList, PrimitiveCluster, SurfaceIndex, SubpixelMode, Picture3DContext};
     28 use crate::tile_cache::{SliceId, TileCacheInstance};
     29 use crate::prim_store::line_dec::MAX_LINE_DECORATION_RESOLUTION;
     30 use crate::prim_store::*;
     31 use crate::quad;
     32 use crate::prim_store::gradient::GradientGpuBlockBuilder;
     33 use crate::render_backend::DataStores;
     34 use crate::render_task_graph::RenderTaskId;
     35 use crate::render_task_cache::RenderTaskCacheKeyKind;
     36 use crate::render_task_cache::{RenderTaskCacheKey, to_cache_size, RenderTaskParent};
     37 use crate::render_task::{EmptyTask, MaskSubPass, RenderTask, RenderTaskKind, SubPass};
     38 use crate::segment::SegmentBuilder;
     39 use crate::util::{clamp_to_scale_factor, ScaleOffset};
     40 use crate::visibility::{compute_conservative_visible_rect, PrimitiveVisibility, VisibilityState};
     41 
     42 
     43 const MAX_MASK_SIZE: i32 = 4096;
     44 
     45 const MIN_BRUSH_SPLIT_AREA: f32 = 128.0 * 128.0;
     46 
     47 /// The entry point of the preapre pass.
     48 pub fn prepare_picture(
     49    pic_index: PictureIndex,
     50    store: &mut PrimitiveStore,
     51    surface_index: Option<SurfaceIndex>,
     52    subpixel_mode: SubpixelMode,
     53    frame_context: &FrameBuildingContext,
     54    frame_state: &mut FrameBuildingState,
     55    data_stores: &mut DataStores,
     56    scratch: &mut PrimitiveScratchBuffer,
     57    tile_caches: &mut FastHashMap<SliceId, Box<TileCacheInstance>>,
     58    prim_instances: &mut Vec<PrimitiveInstance>,
     59 ) -> bool {
     60    if frame_state.visited_pictures[pic_index.0] {
     61        return true;
     62    }
     63 
     64    frame_state.visited_pictures[pic_index.0] = true;
     65 
     66    let pic = &mut store.pictures[pic_index.0];
     67    let Some((pic_context, mut pic_state, mut prim_list)) = pic.take_context(
     68        pic_index,
     69        surface_index,
     70        subpixel_mode,
     71        frame_state,
     72        frame_context,
     73        data_stores,
     74        scratch,
     75        tile_caches,
     76    ) else {
     77        return false;
     78    };
     79 
     80    prepare_primitives(
     81        store,
     82        &mut prim_list,
     83        &pic_context,
     84        &mut pic_state,
     85        frame_context,
     86        frame_state,
     87        data_stores,
     88        scratch,
     89        tile_caches,
     90        prim_instances,
     91    );
     92 
     93    // Restore the dependencies (borrow check dance)
     94    store.pictures[pic_context.pic_index.0].restore_context(
     95        pic_context.pic_index,
     96        prim_list,
     97        pic_context,
     98        prim_instances,
     99        frame_context,
    100        frame_state,
    101    );
    102 
    103    true
    104 }
    105 
    106 fn prepare_primitives(
    107    store: &mut PrimitiveStore,
    108    prim_list: &mut PrimitiveList,
    109    pic_context: &PictureContext,
    110    pic_state: &mut PictureState,
    111    frame_context: &FrameBuildingContext,
    112    frame_state: &mut FrameBuildingState,
    113    data_stores: &mut DataStores,
    114    scratch: &mut PrimitiveScratchBuffer,
    115    tile_caches: &mut FastHashMap<SliceId, Box<TileCacheInstance>>,
    116    prim_instances: &mut Vec<PrimitiveInstance>,
    117 ) {
    118    profile_scope!("prepare_primitives");
    119    let mut cmd_buffer_targets = Vec::new();
    120 
    121    for cluster in &mut prim_list.clusters {
    122        if !cluster.flags.contains(ClusterFlags::IS_VISIBLE) {
    123            continue;
    124        }
    125        profile_scope!("cluster");
    126        pic_state.map_local_to_pic.set_target_spatial_node(
    127            cluster.spatial_node_index,
    128            frame_context.spatial_tree,
    129        );
    130 
    131        for prim_instance_index in cluster.prim_range() {
    132            if frame_state.surface_builder.get_cmd_buffer_targets_for_prim(
    133                &prim_instances[prim_instance_index].vis,
    134                &mut cmd_buffer_targets,
    135            ) {
    136                let plane_split_anchor = PlaneSplitAnchor::new(
    137                    cluster.spatial_node_index,
    138                    PrimitiveInstanceIndex(prim_instance_index as u32),
    139                );
    140 
    141                prepare_prim_for_render(
    142                    store,
    143                    prim_instance_index,
    144                    cluster,
    145                    pic_context,
    146                    pic_state,
    147                    frame_context,
    148                    frame_state,
    149                    plane_split_anchor,
    150                    data_stores,
    151                    scratch,
    152                    tile_caches,
    153                    prim_instances,
    154                    &cmd_buffer_targets,
    155                );
    156 
    157                frame_state.num_visible_primitives += 1;
    158                continue;
    159            }
    160 
    161            // TODO(gw): Technically no need to clear visibility here, since from this point it
    162            //           only matters if it got added to a command buffer. Kept here for now to
    163            //           make debugging simpler, but perhaps we can remove / tidy this up.
    164            prim_instances[prim_instance_index].clear_visibility();
    165        }
    166    }
    167 }
    168 
    169 fn can_use_clip_chain_for_quad_path(
    170    clip_chain: &ClipChainInstance,
    171    clip_store: &ClipStore,
    172    data_stores: &DataStores,
    173 ) -> bool {
    174    if !clip_chain.needs_mask {
    175        return true;
    176    }
    177 
    178    for i in 0 .. clip_chain.clips_range.count {
    179        let clip_instance = clip_store.get_instance_from_range(&clip_chain.clips_range, i);
    180        let clip_node = &data_stores.clip[clip_instance.handle];
    181 
    182        match clip_node.item.kind {
    183            ClipItemKind::RoundedRectangle { .. } | ClipItemKind::Rectangle { .. } => {}
    184            ClipItemKind::BoxShadow { .. } => {
    185                // legacy path for box-shadows for now (move them to a separate primitive next)
    186                return false;
    187            }
    188            ClipItemKind::Image { .. } => {
    189                panic!("bug: image-masks not expected on rect/quads");
    190            }
    191        }
    192    }
    193 
    194    true
    195 }
    196 
    197 fn prepare_prim_for_render(
    198    store: &mut PrimitiveStore,
    199    prim_instance_index: usize,
    200    cluster: &mut PrimitiveCluster,
    201    pic_context: &PictureContext,
    202    pic_state: &mut PictureState,
    203    frame_context: &FrameBuildingContext,
    204    frame_state: &mut FrameBuildingState,
    205    plane_split_anchor: PlaneSplitAnchor,
    206    data_stores: &mut DataStores,
    207    scratch: &mut PrimitiveScratchBuffer,
    208    tile_caches: &mut FastHashMap<SliceId, Box<TileCacheInstance>>,
    209    prim_instances: &mut Vec<PrimitiveInstance>,
    210    targets: &[CommandBufferIndex],
    211 ) {
    212    profile_scope!("prepare_prim_for_render");
    213 
    214    // If we have dependencies, we need to prepare them first, in order
    215    // to know the actual rect of this primitive.
    216    // For example, scrolling may affect the location of an item in
    217    // local space, which may force us to render this item on a larger
    218    // picture target, if being composited.
    219    let mut is_passthrough = false;
    220    if let PrimitiveInstanceKind::Picture { pic_index, .. } = prim_instances[prim_instance_index].kind {
    221        if !prepare_picture(
    222            pic_index,
    223            store,
    224            Some(pic_context.surface_index),
    225            pic_context.subpixel_mode,
    226            frame_context,
    227            frame_state,
    228            data_stores,
    229            scratch,
    230            tile_caches,
    231            prim_instances
    232        ) {
    233            return;
    234        }
    235 
    236        is_passthrough = store
    237            .pictures[pic_index.0]
    238            .composite_mode
    239            .is_none();
    240    }
    241 
    242    let prim_instance = &mut prim_instances[prim_instance_index];
    243 
    244    if !is_passthrough {
    245        fn may_need_repetition(stretch_size: LayoutSize, prim_rect: LayoutRect) -> bool {
    246             stretch_size.width < prim_rect.width() ||
    247                 stretch_size.height < prim_rect.height()
    248        }
    249        // Bug 1887841: At the moment the quad shader does not support repetitions.
    250        // Bug 1888349: Some primitives have brush segments that aren't handled by
    251        // the quad infrastructure yet.
    252        let disable_quad_path = match &prim_instance.kind {
    253            PrimitiveInstanceKind::Rectangle { .. } => false,
    254            PrimitiveInstanceKind::LinearGradient { data_handle, .. } => {
    255                let prim_data = &data_stores.linear_grad[*data_handle];
    256                !prim_data.brush_segments.is_empty()
    257                    || may_need_repetition(prim_data.stretch_size, prim_data.common.prim_rect)
    258                    || !frame_context.fb_config.precise_linear_gradients
    259            }
    260            PrimitiveInstanceKind::RadialGradient { data_handle, .. } => {
    261                let prim_data = &data_stores.radial_grad[*data_handle];
    262                !prim_data.brush_segments.is_empty()
    263                    || may_need_repetition(prim_data.stretch_size, prim_data.common.prim_rect)
    264            }
    265            // TODO(bug 1899546) Enable quad conic gradients with SWGL.
    266            PrimitiveInstanceKind::ConicGradient { data_handle, .. } => {
    267                let prim_data = &data_stores.conic_grad[*data_handle];
    268                !prim_data.brush_segments.is_empty()
    269                    || may_need_repetition(prim_data.stretch_size, prim_data.common.prim_rect)
    270            }
    271            _ => true,
    272        };
    273 
    274        // In this initial patch, we only support non-masked primitives through the new
    275        // quad rendering path. Follow up patches will extend this to support masks, and
    276        // then use by other primitives. In the new quad rendering path, we'll still want
    277        // to skip the entry point to `update_clip_task` as that does old-style segmenting
    278        // and mask generation.
    279        let should_update_clip_task = match &mut prim_instance.kind {
    280            PrimitiveInstanceKind::Rectangle { use_legacy_path, .. }
    281            | PrimitiveInstanceKind::RadialGradient { use_legacy_path, .. }
    282            | PrimitiveInstanceKind::ConicGradient { use_legacy_path, .. }
    283            | PrimitiveInstanceKind::LinearGradient { use_legacy_path, .. }
    284            => {
    285                *use_legacy_path = disable_quad_path || !can_use_clip_chain_for_quad_path(
    286                    &prim_instance.vis.clip_chain,
    287                    frame_state.clip_store,
    288                    data_stores,
    289                );
    290 
    291                *use_legacy_path
    292            }
    293            PrimitiveInstanceKind::BoxShadow { .. } |
    294            PrimitiveInstanceKind::Picture { .. } => false,
    295            _ => true,
    296        };
    297 
    298        if should_update_clip_task {
    299            let prim_rect = data_stores.get_local_prim_rect(
    300                prim_instance,
    301                &store.pictures,
    302                frame_state.surfaces,
    303            );
    304 
    305            if !update_clip_task(
    306                prim_instance,
    307                &prim_rect.min,
    308                cluster.spatial_node_index,
    309                pic_context.raster_spatial_node_index,
    310                pic_context.visibility_spatial_node_index,
    311                pic_context,
    312                pic_state,
    313                frame_context,
    314                frame_state,
    315                store,
    316                data_stores,
    317                scratch,
    318            ) {
    319                return;
    320            }
    321        }
    322    }
    323 
    324    prepare_interned_prim_for_render(
    325        store,
    326        PrimitiveInstanceIndex(prim_instance_index as u32),
    327        prim_instance,
    328        cluster,
    329        plane_split_anchor,
    330        pic_context,
    331        pic_state,
    332        frame_context,
    333        frame_state,
    334        data_stores,
    335        scratch,
    336        targets,
    337    )
    338 }
    339 
    340 /// Prepare an interned primitive for rendering, by requesting
    341 /// resources, render tasks etc. This is equivalent to the
    342 /// prepare_prim_for_render_inner call for old style primitives.
    343 fn prepare_interned_prim_for_render(
    344    store: &mut PrimitiveStore,
    345    prim_instance_index: PrimitiveInstanceIndex,
    346    prim_instance: &mut PrimitiveInstance,
    347    cluster: &mut PrimitiveCluster,
    348    plane_split_anchor: PlaneSplitAnchor,
    349    pic_context: &PictureContext,
    350    pic_state: &mut PictureState,
    351    frame_context: &FrameBuildingContext,
    352    frame_state: &mut FrameBuildingState,
    353    data_stores: &mut DataStores,
    354    scratch: &mut PrimitiveScratchBuffer,
    355    targets: &[CommandBufferIndex],
    356 ) {
    357    let prim_spatial_node_index = cluster.spatial_node_index;
    358    let device_pixel_scale = frame_state.surfaces[pic_context.surface_index.0].device_pixel_scale;
    359 
    360    match &mut prim_instance.kind {
    361        PrimitiveInstanceKind::BoxShadow { data_handle } => {
    362            let prim_data = &mut data_stores.box_shadow[*data_handle];
    363 
    364            quad::prepare_quad(
    365                prim_data,
    366                &prim_data.kind.outer_shadow_rect,
    367                prim_instance_index,
    368                &None,
    369                prim_spatial_node_index,
    370                &prim_instance.vis.clip_chain,
    371                device_pixel_scale,
    372                frame_context,
    373                pic_context,
    374                targets,
    375                &data_stores.clip,
    376                frame_state,
    377                pic_state,
    378                scratch,
    379            );
    380 
    381            return;
    382        }
    383        PrimitiveInstanceKind::LineDecoration { data_handle, ref mut render_task, .. } => {
    384            profile_scope!("LineDecoration");
    385            let prim_data = &mut data_stores.line_decoration[*data_handle];
    386            let common_data = &mut prim_data.common;
    387            let line_dec_data = &mut prim_data.kind;
    388 
    389            // Update the template this instane references, which may refresh the GPU
    390            // cache with any shared template data.
    391            line_dec_data.update(common_data, frame_state);
    392 
    393            // Work out the device pixel size to be used to cache this line decoration.
    394 
    395            // If we have a cache key, it's a wavy / dashed / dotted line. Otherwise, it's
    396            // a simple solid line.
    397            if let Some(cache_key) = line_dec_data.cache_key.as_ref() {
    398                // TODO(gw): These scale factors don't do a great job if the world transform
    399                //           contains perspective
    400                let scale = frame_context
    401                    .spatial_tree
    402                    .get_world_transform(prim_spatial_node_index)
    403                    .scale_factors();
    404 
    405                // Scale factors are normalized to a power of 2 to reduce the number of
    406                // resolution changes.
    407                // For frames with a changing scale transform round scale factors up to
    408                // nearest power-of-2 boundary so that we don't keep having to redraw
    409                // the content as it scales up and down. Rounding up to nearest
    410                // power-of-2 boundary ensures we never scale up, only down --- avoiding
    411                // jaggies. It also ensures we never scale down by more than a factor of
    412                // 2, avoiding bad downscaling quality.
    413                let scale_width = clamp_to_scale_factor(scale.0, false);
    414                let scale_height = clamp_to_scale_factor(scale.1, false);
    415                // Pick the maximum dimension as scale
    416                let world_scale = LayoutToWorldScale::new(scale_width.max(scale_height));
    417 
    418                let scale_factor = world_scale * Scale::new(1.0);
    419                let task_size_f = (LayoutSize::from_au(cache_key.size) * scale_factor).ceil();
    420                let mut task_size = if task_size_f.width > MAX_LINE_DECORATION_RESOLUTION as f32 ||
    421                   task_size_f.height > MAX_LINE_DECORATION_RESOLUTION as f32 {
    422                     let max_extent = task_size_f.width.max(task_size_f.height);
    423                     let task_scale_factor = Scale::new(MAX_LINE_DECORATION_RESOLUTION as f32 / max_extent);
    424                     let task_size = (LayoutSize::from_au(cache_key.size) * scale_factor * task_scale_factor)
    425                                    .ceil().to_i32();
    426                    task_size
    427                } else {
    428                    task_size_f.to_i32()
    429                };
    430 
    431                // It's plausible, due to float accuracy issues that the line decoration may be considered
    432                // visible even if the scale factors are ~0. However, the render task allocation below requires
    433                // that the size of the task is > 0. To work around this, ensure that the task size is at least
    434                // 1x1 pixels
    435                task_size.width = task_size.width.max(1);
    436                task_size.height = task_size.height.max(1);
    437 
    438                // Request a pre-rendered image task.
    439                // TODO(gw): This match is a bit untidy, but it should disappear completely
    440                //           once the prepare_prims and batching are unified. When that
    441                //           happens, we can use the cache handle immediately, and not need
    442                //           to temporarily store it in the primitive instance.
    443                *render_task = Some(frame_state.resource_cache.request_render_task(
    444                    Some(RenderTaskCacheKey {
    445                        size: task_size,
    446                        kind: RenderTaskCacheKeyKind::LineDecoration(cache_key.clone()),
    447                    }),
    448                    false,
    449                    RenderTaskParent::Surface,
    450                    &mut frame_state.frame_gpu_data.f32,
    451                    frame_state.rg_builder,
    452                    &mut frame_state.surface_builder,
    453                    &mut |rg_builder, _| {
    454                        rg_builder.add().init(RenderTask::new_dynamic(
    455                            task_size,
    456                            RenderTaskKind::new_line_decoration(
    457                                cache_key.style,
    458                                cache_key.orientation,
    459                                cache_key.wavy_line_thickness.to_f32_px(),
    460                                LayoutSize::from_au(cache_key.size),
    461                            ),
    462                        ))
    463                    }
    464                ));
    465            }
    466        }
    467        PrimitiveInstanceKind::TextRun { run_index, data_handle, .. } => {
    468            profile_scope!("TextRun");
    469            let prim_data = &mut data_stores.text_run[*data_handle];
    470            let run = &mut store.text_runs[*run_index];
    471 
    472            prim_data.common.may_need_repetition = false;
    473 
    474            // The glyph transform has to match `glyph_transform` in "ps_text_run" shader.
    475            // It's relative to the rasterizing space of a glyph.
    476            let transform = frame_context.spatial_tree
    477                .get_relative_transform(
    478                    prim_spatial_node_index,
    479                    pic_context.raster_spatial_node_index,
    480                )
    481                .into_fast_transform();
    482            let prim_offset = prim_data.common.prim_rect.min.to_vector() - run.reference_frame_relative_offset;
    483 
    484            let surface = &frame_state.surfaces[pic_context.surface_index.0];
    485 
    486            // If subpixel AA is disabled due to the backing surface the glyphs
    487            // are being drawn onto, disable it (unless we are using the
    488            // specifial subpixel mode that estimates background color).
    489            let allow_subpixel = match prim_instance.vis.state {
    490                VisibilityState::Culled |
    491                VisibilityState::Unset |
    492                VisibilityState::PassThrough => {
    493                    panic!("bug: invalid visibility state");
    494                }
    495                VisibilityState::Visible { sub_slice_index, .. } => {
    496                    // For now, we only allow subpixel AA on primary sub-slices. In future we
    497                    // may support other sub-slices if we find content that does this.
    498                    if sub_slice_index.is_primary() {
    499                        match pic_context.subpixel_mode {
    500                            SubpixelMode::Allow => true,
    501                            SubpixelMode::Deny => false,
    502                            SubpixelMode::Conditional { allowed_rect, prohibited_rect } => {
    503                                // Conditional mode allows subpixel AA to be enabled for this
    504                                // text run, so long as it's inside the allowed rect.
    505                                allowed_rect.contains_box(&prim_instance.vis.clip_chain.pic_coverage_rect) &&
    506                                !prohibited_rect.intersects(&prim_instance.vis.clip_chain.pic_coverage_rect)
    507                            }
    508                        }
    509                    } else {
    510                        false
    511                    }
    512                }
    513            };
    514 
    515            run.request_resources(
    516                prim_offset,
    517                &prim_data.font,
    518                &prim_data.glyphs,
    519                &transform.to_transform().with_destination::<_>(),
    520                surface,
    521                prim_spatial_node_index,
    522                allow_subpixel,
    523                frame_context.fb_config.low_quality_pinch_zoom,
    524                frame_state.resource_cache,
    525                &mut frame_state.frame_gpu_data.f32,
    526                frame_context.spatial_tree,
    527                scratch,
    528            );
    529 
    530            prim_data.update(frame_state);
    531        }
    532        PrimitiveInstanceKind::NormalBorder { data_handle, ref mut render_task_ids, .. } => {
    533            profile_scope!("NormalBorder");
    534            let prim_data = &mut data_stores.normal_border[*data_handle];
    535            let common_data = &mut prim_data.common;
    536            let border_data = &mut prim_data.kind;
    537 
    538            common_data.may_need_repetition =
    539                matches!(border_data.border.top.style, BorderStyle::Dotted | BorderStyle::Dashed) ||
    540                matches!(border_data.border.right.style, BorderStyle::Dotted | BorderStyle::Dashed) ||
    541                matches!(border_data.border.bottom.style, BorderStyle::Dotted | BorderStyle::Dashed) ||
    542                matches!(border_data.border.left.style, BorderStyle::Dotted | BorderStyle::Dashed);
    543 
    544 
    545            // Update the template this instance references, which may refresh the GPU
    546            // cache with any shared template data.
    547            border_data.update(common_data, frame_state);
    548 
    549            // TODO(gw): For now, the scale factors to rasterize borders at are
    550            //           based on the true world transform of the primitive. When
    551            //           raster roots with local scale are supported in future,
    552            //           that will need to be accounted for here.
    553            let scale = frame_context
    554                .spatial_tree
    555                .get_world_transform(prim_spatial_node_index)
    556                .scale_factors();
    557 
    558            // Scale factors are normalized to a power of 2 to reduce the number of
    559            // resolution changes.
    560            // For frames with a changing scale transform round scale factors up to
    561            // nearest power-of-2 boundary so that we don't keep having to redraw
    562            // the content as it scales up and down. Rounding up to nearest
    563            // power-of-2 boundary ensures we never scale up, only down --- avoiding
    564            // jaggies. It also ensures we never scale down by more than a factor of
    565            // 2, avoiding bad downscaling quality.
    566            let scale_width = clamp_to_scale_factor(scale.0, false);
    567            let scale_height = clamp_to_scale_factor(scale.1, false);
    568            // Pick the maximum dimension as scale
    569            let world_scale = LayoutToWorldScale::new(scale_width.max(scale_height));
    570            let mut scale = world_scale * device_pixel_scale;
    571            let max_scale = get_max_scale_for_border(border_data);
    572            scale.0 = scale.0.min(max_scale.0);
    573 
    574            // For each edge and corner, request the render task by content key
    575            // from the render task cache. This ensures that the render task for
    576            // this segment will be available for batching later in the frame.
    577            let mut handles: SmallVec<[RenderTaskId; 8]> = SmallVec::new();
    578 
    579            for segment in &border_data.border_segments {
    580                // Update the cache key device size based on requested scale.
    581                let cache_size = to_cache_size(segment.local_task_size, &mut scale);
    582                let cache_key = RenderTaskCacheKey {
    583                    kind: RenderTaskCacheKeyKind::BorderSegment(segment.cache_key.clone()),
    584                    size: cache_size,
    585                };
    586 
    587                handles.push(frame_state.resource_cache.request_render_task(
    588                    Some(cache_key),
    589                    false,          // TODO(gw): We don't calculate opacity for borders yet!
    590                    RenderTaskParent::Surface,
    591                    &mut frame_state.frame_gpu_data.f32,
    592                    frame_state.rg_builder,
    593                    &mut frame_state.surface_builder,
    594                    &mut |rg_builder, _| {
    595                        rg_builder.add().init(RenderTask::new_dynamic(
    596                            cache_size,
    597                            RenderTaskKind::new_border_segment(
    598                                build_border_instances(
    599                                    &segment.cache_key,
    600                                    cache_size,
    601                                    &border_data.border,
    602                                    scale,
    603                                )
    604                            ),
    605                        ))
    606                    }
    607                ));
    608            }
    609 
    610            *render_task_ids = scratch
    611                .border_cache_handles
    612                .extend(handles);
    613        }
    614        PrimitiveInstanceKind::ImageBorder { data_handle, .. } => {
    615            profile_scope!("ImageBorder");
    616            let prim_data = &mut data_stores.image_border[*data_handle];
    617 
    618            // TODO: get access to the ninepatch and to check whether we need support
    619            // for repetitions in the shader.
    620 
    621            // Update the template this instance references, which may refresh the GPU
    622            // cache with any shared template data.
    623            prim_data.kind.update(
    624                &mut prim_data.common,
    625                frame_state
    626            );
    627        }
    628        PrimitiveInstanceKind::Rectangle { data_handle, segment_instance_index, use_legacy_path, .. } => {
    629            profile_scope!("Rectangle");
    630 
    631            if *use_legacy_path {
    632                let prim_data = &mut data_stores.prim[*data_handle];
    633                prim_data.common.may_need_repetition = false;
    634 
    635                // Update the template this instane references, which may refresh the GPU
    636                // cache with any shared template data.
    637                prim_data.update(
    638                    frame_state,
    639                    frame_context.scene_properties,
    640                );
    641 
    642                write_segment(
    643                    *segment_instance_index,
    644                    frame_state,
    645                    &mut scratch.segments,
    646                    &mut scratch.segment_instances,
    647                    |request| {
    648                        prim_data.kind.write_prim_gpu_blocks(
    649                            request,
    650                            frame_context.scene_properties,
    651                        );
    652                    }
    653                );
    654            } else {
    655                let prim_data = &data_stores.prim[*data_handle];
    656 
    657                quad::prepare_quad(
    658                    prim_data,
    659                    &prim_data.common.prim_rect,
    660                    prim_instance_index,
    661                    &None,
    662                    prim_spatial_node_index,
    663                    &prim_instance.vis.clip_chain,
    664                    device_pixel_scale,
    665                    frame_context,
    666                    pic_context,
    667                    targets,
    668                    &data_stores.clip,
    669                    frame_state,
    670                    pic_state,
    671                    scratch,
    672                );
    673 
    674                return;
    675            }
    676        }
    677        PrimitiveInstanceKind::YuvImage { data_handle, segment_instance_index, compositor_surface_kind, .. } => {
    678            profile_scope!("YuvImage");
    679            let prim_data = &mut data_stores.yuv_image[*data_handle];
    680            let common_data = &mut prim_data.common;
    681            let yuv_image_data = &mut prim_data.kind;
    682 
    683            common_data.may_need_repetition = false;
    684 
    685            // Update the template this instane references, which may refresh the GPU
    686            // cache with any shared template data.
    687            yuv_image_data.update(
    688                common_data,
    689                compositor_surface_kind.is_composited(),
    690                frame_state,
    691            );
    692 
    693            write_segment(
    694                *segment_instance_index,
    695                frame_state,
    696                &mut scratch.segments,
    697                &mut scratch.segment_instances,
    698                |writer| {
    699                    yuv_image_data.write_prim_gpu_blocks(writer);
    700                }
    701            );
    702        }
    703        PrimitiveInstanceKind::Image { data_handle, image_instance_index, .. } => {
    704            profile_scope!("Image");
    705 
    706            let prim_data = &mut data_stores.image[*data_handle];
    707            let common_data = &mut prim_data.common;
    708            let image_data = &mut prim_data.kind;
    709            let image_instance = &mut store.images[*image_instance_index];
    710 
    711            // Update the template this instance references, which may refresh the GPU
    712            // cache with any shared template data.
    713            image_data.update(
    714                common_data,
    715                image_instance,
    716                prim_spatial_node_index,
    717                frame_state,
    718                frame_context,
    719                &mut prim_instance.vis,
    720            );
    721 
    722            write_segment(
    723                image_instance.segment_instance_index,
    724                frame_state,
    725                &mut scratch.segments,
    726                &mut scratch.segment_instances,
    727                |request| {
    728                    image_data.write_prim_gpu_blocks(&image_instance.adjustment, request);
    729                },
    730            );
    731        }
    732        PrimitiveInstanceKind::LinearGradient { data_handle, ref mut visible_tiles_range, use_legacy_path, .. } => {
    733            profile_scope!("LinearGradient");
    734            let prim_data = &mut data_stores.linear_grad[*data_handle];
    735            if !*use_legacy_path {
    736                quad::prepare_repeatable_quad(
    737                    prim_data,
    738                    &prim_data.common.prim_rect,
    739                    prim_data.stretch_size,
    740                    prim_data.tile_spacing,
    741                    prim_instance_index,
    742                    &None,
    743                    prim_spatial_node_index,
    744                    &prim_instance.vis.clip_chain,
    745                    device_pixel_scale,
    746                    frame_context,
    747                    pic_context,
    748                    targets,
    749                    &data_stores.clip,
    750                    frame_state,
    751                    pic_state,
    752                    scratch,
    753                );
    754 
    755                return;
    756            }
    757 
    758            // Update the template this instane references, which may refresh the GPU
    759            // cache with any shared template data.
    760            prim_data.update(frame_state);
    761 
    762            if prim_data.stretch_size.width >= prim_data.common.prim_rect.width() &&
    763                prim_data.stretch_size.height >= prim_data.common.prim_rect.height() {
    764 
    765                prim_data.common.may_need_repetition = false;
    766            }
    767 
    768            if prim_data.tile_spacing != LayoutSize::zero() {
    769                // We are performing the decomposition on the CPU here, no need to
    770                // have it in the shader.
    771                prim_data.common.may_need_repetition = false;
    772 
    773                *visible_tiles_range = decompose_repeated_gradient(
    774                    &prim_instance.vis,
    775                    &prim_data.common.prim_rect,
    776                    prim_spatial_node_index,
    777                    &prim_data.stretch_size,
    778                    &prim_data.tile_spacing,
    779                    frame_state,
    780                    &mut scratch.gradient_tiles,
    781                    &frame_context.spatial_tree,
    782                    Some(&mut |_, gpu_buffer| {
    783                        let mut writer = gpu_buffer.write_blocks(LinearGradientBrushData::NUM_BLOCKS);
    784                        writer.push(&LinearGradientBrushData {
    785                            start: prim_data.start_point,
    786                            end: prim_data.end_point,
    787                            extend_mode: prim_data.extend_mode,
    788                            stretch_size: prim_data.stretch_size,
    789                        });
    790                        writer.finish()
    791                    }),
    792                );
    793 
    794                if visible_tiles_range.is_empty() {
    795                    prim_instance.clear_visibility();
    796                }
    797            }
    798 
    799            let stops_address = GradientGpuBlockBuilder::build(
    800                prim_data.reverse_stops,
    801                &mut frame_state.frame_gpu_data.f32,
    802                &prim_data.stops,
    803            );
    804 
    805            // TODO(gw): Consider whether it's worth doing segment building
    806            //           for gradient primitives.
    807            frame_state.push_prim(
    808                &PrimitiveCommand::instance(prim_instance_index, stops_address),
    809                prim_spatial_node_index,
    810                targets,
    811            );
    812            return;
    813        }
    814        PrimitiveInstanceKind::CachedLinearGradient { data_handle, ref mut visible_tiles_range, .. } => {
    815            profile_scope!("CachedLinearGradient");
    816            let prim_data = &mut data_stores.linear_grad[*data_handle];
    817            prim_data.common.may_need_repetition = prim_data.stretch_size.width < prim_data.common.prim_rect.width()
    818                || prim_data.stretch_size.height < prim_data.common.prim_rect.height();
    819 
    820            // Update the template this instance references, which may refresh the GPU
    821            // cache with any shared template data.
    822            prim_data.update(frame_state);
    823 
    824            if prim_data.tile_spacing != LayoutSize::zero() {
    825                prim_data.common.may_need_repetition = false;
    826 
    827                *visible_tiles_range = decompose_repeated_gradient(
    828                    &prim_instance.vis,
    829                    &prim_data.common.prim_rect,
    830                    prim_spatial_node_index,
    831                    &prim_data.stretch_size,
    832                    &prim_data.tile_spacing,
    833                    frame_state,
    834                    &mut scratch.gradient_tiles,
    835                    &frame_context.spatial_tree,
    836                    None,
    837                );
    838 
    839                if visible_tiles_range.is_empty() {
    840                    prim_instance.clear_visibility();
    841                }
    842            }
    843        }
    844        PrimitiveInstanceKind::RadialGradient { data_handle, ref mut visible_tiles_range, use_legacy_path, .. } => {
    845            profile_scope!("RadialGradient");
    846            let prim_data = &mut data_stores.radial_grad[*data_handle];
    847 
    848            if !*use_legacy_path {
    849                quad::prepare_repeatable_quad(
    850                    prim_data,
    851                    &prim_data.common.prim_rect,
    852                    prim_data.stretch_size,
    853                    prim_data.tile_spacing,
    854                    prim_instance_index,
    855                    &None,
    856                    prim_spatial_node_index,
    857                    &prim_instance.vis.clip_chain,
    858                    device_pixel_scale,
    859                    frame_context,
    860                    pic_context,
    861                    targets,
    862                    &data_stores.clip,
    863                    frame_state,
    864                    pic_state,
    865                    scratch,
    866                );
    867 
    868                return;
    869            }
    870 
    871            prim_data.common.may_need_repetition = prim_data.stretch_size.width < prim_data.common.prim_rect.width()
    872            || prim_data.stretch_size.height < prim_data.common.prim_rect.height();
    873 
    874            // Update the template this instane references, which may refresh the GPU
    875            // cache with any shared template data.
    876            prim_data.update(frame_state);
    877 
    878            if prim_data.tile_spacing != LayoutSize::zero() {
    879                prim_data.common.may_need_repetition = false;
    880 
    881                *visible_tiles_range = decompose_repeated_gradient(
    882                    &prim_instance.vis,
    883                    &prim_data.common.prim_rect,
    884                    prim_spatial_node_index,
    885                    &prim_data.stretch_size,
    886                    &prim_data.tile_spacing,
    887                    frame_state,
    888                    &mut scratch.gradient_tiles,
    889                    &frame_context.spatial_tree,
    890                    None,
    891                );
    892 
    893                if visible_tiles_range.is_empty() {
    894                    prim_instance.clear_visibility();
    895                }
    896            }
    897        }
    898        PrimitiveInstanceKind::ConicGradient { data_handle, ref mut visible_tiles_range, use_legacy_path, .. } => {
    899            profile_scope!("ConicGradient");
    900            let prim_data = &mut data_stores.conic_grad[*data_handle];
    901 
    902            if !*use_legacy_path {
    903                // Conic gradients are quite slow with SWGL, so we want to cache
    904                // them as much as we can, even large ones.
    905                // TODO: get_surface_rect is not always cheap. We should reorganize
    906                // the code so that we only call it as much as we really need it,
    907                // while avoiding this much boilerplate for each primitive that uses
    908                // caching.
    909                let mut should_cache = frame_context.fb_config.is_software;
    910                if should_cache {
    911                    let surface = &frame_state.surfaces[pic_context.surface_index.0];
    912                    let clipped_surface_rect = surface.get_surface_rect(
    913                        &prim_instance.vis.clip_chain.pic_coverage_rect,
    914                        frame_context.spatial_tree,
    915                    );
    916 
    917                    should_cache = if let Some(rect) = clipped_surface_rect {
    918                        rect.width() < 4096 && rect.height() < 4096
    919                    } else {
    920                        false
    921                    };
    922                }
    923 
    924                let cache_key = if should_cache {
    925                    quad::cache_key(
    926                        data_handle.uid(),
    927                        prim_spatial_node_index,
    928                        frame_context.spatial_tree,
    929                        &prim_instance.vis.clip_chain,
    930                        frame_state.clip_store,
    931                        &data_stores.clip,
    932                    )
    933                } else {
    934                    None
    935                };
    936 
    937                quad::prepare_repeatable_quad(
    938                    prim_data,
    939                    &prim_data.common.prim_rect,
    940                    prim_data.stretch_size,
    941                    prim_data.tile_spacing,
    942                    prim_instance_index,
    943                    &cache_key,
    944                    prim_spatial_node_index,
    945                    &prim_instance.vis.clip_chain,
    946                    device_pixel_scale,
    947                    frame_context,
    948                    pic_context,
    949                    targets,
    950                    &data_stores.clip,
    951                    frame_state,
    952                    pic_state,
    953                    scratch,
    954                );
    955 
    956                return;
    957            }
    958 
    959            prim_data.common.may_need_repetition = prim_data.stretch_size.width < prim_data.common.prim_rect.width()
    960                || prim_data.stretch_size.height < prim_data.common.prim_rect.height();
    961 
    962            // Update the template this instane references, which may refresh the GPU
    963            // cache with any shared template data.
    964            prim_data.update(frame_state);
    965 
    966            if prim_data.tile_spacing != LayoutSize::zero() {
    967                prim_data.common.may_need_repetition = false;
    968 
    969                *visible_tiles_range = decompose_repeated_gradient(
    970                    &prim_instance.vis,
    971                    &prim_data.common.prim_rect,
    972                    prim_spatial_node_index,
    973                    &prim_data.stretch_size,
    974                    &prim_data.tile_spacing,
    975                    frame_state,
    976                    &mut scratch.gradient_tiles,
    977                    &frame_context.spatial_tree,
    978                    None,
    979                );
    980 
    981                if visible_tiles_range.is_empty() {
    982                    prim_instance.clear_visibility();
    983                }
    984            }
    985 
    986            // TODO(gw): Consider whether it's worth doing segment building
    987            //           for gradient primitives.
    988        }
    989        PrimitiveInstanceKind::Picture { pic_index, .. } => {
    990            profile_scope!("Picture");
    991            let pic = &mut store.pictures[pic_index.0];
    992 
    993            if prim_instance.vis.clip_chain.needs_mask {
    994                // TODO(gw): Much of the code in this branch could be moved in to a common
    995                //           function as we move more primitives to the new clip-mask paths.
    996 
    997                // We are going to split the clip mask tasks in to a list to be rendered
    998                // on the source picture, and those to be rendered in to a mask for
    999                // compositing the picture in to the target.
   1000                let mut source_masks = Vec::new();
   1001                let mut target_masks = Vec::new();
   1002 
   1003                // For some composite modes, we force target mask due to limitations. That
   1004                // might results in artifacts for these modes (which are already an existing
   1005                // problem) but we can handle these cases as follow ups.
   1006                let force_target_mask = match pic.composite_mode {
   1007                    // We can't currently render over top of these filters as their size
   1008                    // may have changed due to downscaling. We could handle this separate
   1009                    // case as a follow up.
   1010                    Some(PictureCompositeMode::Filter(Filter::Blur { .. })) |
   1011                    Some(PictureCompositeMode::Filter(Filter::DropShadows { .. })) |
   1012                    Some(PictureCompositeMode::SVGFEGraph( .. )) => {
   1013                        true
   1014                    }
   1015                    _ => {
   1016                        false
   1017                    }
   1018                };
   1019 
   1020                // Work out which clips get drawn in to the source / target mask
   1021                for i in 0 .. prim_instance.vis.clip_chain.clips_range.count {
   1022                    let clip_instance = frame_state.clip_store.get_instance_from_range(&prim_instance.vis.clip_chain.clips_range, i);
   1023 
   1024                    if !force_target_mask && clip_instance.flags.contains(ClipNodeFlags::SAME_COORD_SYSTEM) {
   1025                        source_masks.push(i);
   1026                    } else {
   1027                        target_masks.push(i);
   1028                    }
   1029                }
   1030 
   1031                let pic_surface_index = pic.raster_config.as_ref().unwrap().surface_index;
   1032                let prim_local_rect: LayoutRect = frame_state
   1033                    .surfaces[pic_surface_index.0]
   1034                    .clipped_local_rect
   1035                    .cast_unit();
   1036 
   1037                let pattern = Pattern::color(ColorF::WHITE);
   1038 
   1039                let prim_address_f = quad::write_prim_blocks(
   1040                    &mut frame_state.frame_gpu_data.f32,
   1041                    prim_local_rect.to_untyped(),
   1042                    prim_instance.vis.clip_chain.local_clip_rect.to_untyped(),
   1043                    pattern.base_color,
   1044                    pattern.texture_input.task_id,
   1045                    &[],
   1046                    ScaleOffset::identity(),
   1047                );
   1048 
   1049                // Handle masks on the source. This is the common case, and occurs for:
   1050                // (a) Any masks in the same coord space as the surface
   1051                // (b) All masks if the surface and parent are axis-aligned
   1052                if !source_masks.is_empty() {
   1053                    let first_clip_node_index = frame_state.clip_store.clip_node_instances.len() as u32;
   1054                    let parent_task_id = pic.primary_render_task_id.expect("bug: no composite mode");
   1055 
   1056                    // Construct a new clip node range, also add image-mask dependencies as needed
   1057                    for instance in source_masks {
   1058                        let clip_instance = frame_state.clip_store.get_instance_from_range(&prim_instance.vis.clip_chain.clips_range, instance);
   1059 
   1060                        for tile in frame_state.clip_store.visible_mask_tiles(clip_instance) {
   1061                            frame_state.rg_builder.add_dependency(
   1062                                parent_task_id,
   1063                                tile.task_id,
   1064                            );
   1065                        }
   1066 
   1067                        frame_state.clip_store.clip_node_instances.push(clip_instance.clone());
   1068                    }
   1069 
   1070                    let clip_node_range = ClipNodeRange {
   1071                        first: first_clip_node_index,
   1072                        count: frame_state.clip_store.clip_node_instances.len() as u32 - first_clip_node_index,
   1073                    };
   1074 
   1075                    let masks = MaskSubPass {
   1076                        clip_node_range,
   1077                        prim_spatial_node_index,
   1078                        prim_address_f,
   1079                    };
   1080 
   1081                    // Add the mask as a sub-pass of the picture
   1082                    let pic_task_id = pic.primary_render_task_id.expect("uh oh");
   1083                    let pic_task = frame_state.rg_builder.get_task_mut(pic_task_id);
   1084                    pic_task.add_sub_pass(SubPass::Masks {
   1085                        masks,
   1086                    });
   1087                }
   1088 
   1089                // Handle masks on the target. This is the rare case, and occurs for:
   1090                // Masks in parent space when non-axis-aligned to source space
   1091                if !target_masks.is_empty() {
   1092                    let surface = &frame_state.surfaces[pic_context.surface_index.0];
   1093                    let coverage_rect = prim_instance.vis.clip_chain.pic_coverage_rect;
   1094 
   1095                    let device_pixel_scale = surface.device_pixel_scale;
   1096                    let raster_spatial_node_index = surface.raster_spatial_node_index;
   1097 
   1098                    let Some(clipped_surface_rect) = surface.get_surface_rect(
   1099                        &coverage_rect,
   1100                        frame_context.spatial_tree,
   1101                    ) else {
   1102                        return;
   1103                    };
   1104 
   1105                    // Draw a normal screens-space mask to an alpha target that
   1106                    // can be sampled when compositing this picture.
   1107                    let empty_task = EmptyTask {
   1108                        content_origin: clipped_surface_rect.min.to_f32(),
   1109                        device_pixel_scale,
   1110                        raster_spatial_node_index,
   1111                    };
   1112 
   1113                    let task_size = clipped_surface_rect.size();
   1114 
   1115                    let clip_task_id = frame_state.rg_builder.add().init(RenderTask::new_dynamic(
   1116                        task_size,
   1117                        RenderTaskKind::Empty(empty_task),
   1118                    ));
   1119 
   1120                    // Construct a new clip node range, also add image-mask dependencies as needed
   1121                    let first_clip_node_index = frame_state.clip_store.clip_node_instances.len() as u32;
   1122                    for instance in target_masks {
   1123                        let clip_instance = frame_state.clip_store.get_instance_from_range(&prim_instance.vis.clip_chain.clips_range, instance);
   1124 
   1125                        for tile in frame_state.clip_store.visible_mask_tiles(clip_instance) {
   1126                            frame_state.rg_builder.add_dependency(
   1127                                clip_task_id,
   1128                                tile.task_id,
   1129                            );
   1130                        }
   1131 
   1132                        frame_state.clip_store.clip_node_instances.push(clip_instance.clone());
   1133                    }
   1134 
   1135                    let clip_node_range = ClipNodeRange {
   1136                        first: first_clip_node_index,
   1137                        count: frame_state.clip_store.clip_node_instances.len() as u32 - first_clip_node_index,
   1138                    };
   1139 
   1140                    let masks = MaskSubPass {
   1141                        clip_node_range,
   1142                        prim_spatial_node_index,
   1143                        prim_address_f,
   1144                    };
   1145 
   1146                    let clip_task = frame_state.rg_builder.get_task_mut(clip_task_id);
   1147                    clip_task.add_sub_pass(SubPass::Masks {
   1148                        masks,
   1149                    });
   1150 
   1151                    let clip_task_index = ClipTaskIndex(scratch.clip_mask_instances.len() as _);
   1152                    scratch.clip_mask_instances.push(ClipMaskKind::Mask(clip_task_id));
   1153                    prim_instance.vis.clip_task_index = clip_task_index;
   1154                    frame_state.surface_builder.add_child_render_task(
   1155                        clip_task_id,
   1156                        frame_state.rg_builder,
   1157                    );
   1158                }
   1159            }
   1160 
   1161            pic.write_gpu_blocks(
   1162                frame_state,
   1163                data_stores,
   1164            );
   1165 
   1166            if let Picture3DContext::In { root_data: None, plane_splitter_index, .. } = pic.context_3d {
   1167                let dirty_rect = frame_state.current_dirty_region().combined;
   1168                let visibility_node = frame_state.current_dirty_region().visibility_spatial_node;
   1169                let splitter = &mut frame_state.plane_splitters[plane_splitter_index.0];
   1170                let surface_index = pic.raster_config.as_ref().unwrap().surface_index;
   1171                let surface = &frame_state.surfaces[surface_index.0];
   1172                let local_prim_rect = surface.clipped_local_rect.cast_unit();
   1173 
   1174                PicturePrimitive::add_split_plane(
   1175                    splitter,
   1176                    frame_context.spatial_tree,
   1177                    prim_spatial_node_index,
   1178                    visibility_node,
   1179                    local_prim_rect,
   1180                    &prim_instance.vis.clip_chain.local_clip_rect,
   1181                    dirty_rect,
   1182                    plane_split_anchor,
   1183                );
   1184            }
   1185        }
   1186        PrimitiveInstanceKind::BackdropCapture { .. } => {
   1187            // Register the owner picture of this backdrop primitive as the
   1188            // target for resolve of the sub-graph
   1189            frame_state.surface_builder.register_resolve_source();
   1190 
   1191            if frame_context.debug_flags.contains(DebugFlags::HIGHLIGHT_BACKDROP_FILTERS) {
   1192                if let Some(world_rect) = pic_state.map_pic_to_vis.map(&prim_instance.vis.clip_chain.pic_coverage_rect) {
   1193                    scratch.push_debug_rect(
   1194                        world_rect.cast_unit(),
   1195                        2,
   1196                        crate::debug_colors::MAGENTA,
   1197                        ColorF::TRANSPARENT,
   1198                    );
   1199                }
   1200            }
   1201        }
   1202        PrimitiveInstanceKind::BackdropRender { pic_index, .. } => {
   1203            match frame_state.surface_builder.sub_graph_output_map.get(pic_index).cloned() {
   1204                Some(sub_graph_output_id) => {
   1205                    frame_state.surface_builder.add_child_render_task(
   1206                        sub_graph_output_id,
   1207                        frame_state.rg_builder,
   1208                    );
   1209                }
   1210                None => {
   1211                    // Backdrop capture was found not visible, didn't produce a sub-graph
   1212                    // so we can just skip drawing
   1213                    prim_instance.clear_visibility();
   1214                }
   1215            }
   1216        }
   1217    }
   1218 
   1219    match prim_instance.vis.state {
   1220        VisibilityState::Unset => {
   1221            panic!("bug: invalid vis state");
   1222        }
   1223        VisibilityState::Visible { .. } => {
   1224            frame_state.push_prim(
   1225                &PrimitiveCommand::simple(prim_instance_index),
   1226                prim_spatial_node_index,
   1227                targets,
   1228            );
   1229        }
   1230        VisibilityState::PassThrough | VisibilityState::Culled => {}
   1231    }
   1232 }
   1233 
   1234 
   1235 fn write_segment<F>(
   1236    segment_instance_index: SegmentInstanceIndex,
   1237    frame_state: &mut FrameBuildingState,
   1238    segments: &mut SegmentStorage,
   1239    segment_instances: &mut SegmentInstanceStorage,
   1240    f: F,
   1241 ) where F: Fn(&mut GpuBufferWriterF) {
   1242    debug_assert_ne!(segment_instance_index, SegmentInstanceIndex::INVALID);
   1243    if segment_instance_index != SegmentInstanceIndex::UNUSED {
   1244        let segment_instance = &mut segment_instances[segment_instance_index];
   1245 
   1246        let segments = &segments[segment_instance.segments_range];
   1247        let mut writer = frame_state.frame_gpu_data.f32.write_blocks(3 + segments.len() * VECS_PER_SEGMENT);
   1248 
   1249        f(&mut writer);
   1250 
   1251        for segment in segments {
   1252            segment.write_gpu_blocks(&mut writer);
   1253        }
   1254 
   1255        segment_instance.gpu_data = writer.finish();
   1256    }
   1257 }
   1258 
   1259 fn decompose_repeated_gradient(
   1260    prim_vis: &PrimitiveVisibility,
   1261    prim_local_rect: &LayoutRect,
   1262    prim_spatial_node_index: SpatialNodeIndex,
   1263    stretch_size: &LayoutSize,
   1264    tile_spacing: &LayoutSize,
   1265    frame_state: &mut FrameBuildingState,
   1266    gradient_tiles: &mut GradientTileStorage,
   1267    spatial_tree: &SpatialTree,
   1268    mut callback: Option<&mut dyn FnMut(&LayoutRect, &mut GpuBufferBuilderF) -> GpuBufferAddress>,
   1269 ) -> GradientTileRange {
   1270    let tile_range = gradient_tiles.open_range();
   1271 
   1272    // Tighten the clip rect because decomposing the repeated image can
   1273    // produce primitives that are partially covering the original image
   1274    // rect and we want to clip these extra parts out.
   1275    if let Some(tight_clip_rect) = prim_vis
   1276        .clip_chain
   1277        .local_clip_rect
   1278        .intersection(prim_local_rect) {
   1279 
   1280        let visible_rect = compute_conservative_visible_rect(
   1281            &prim_vis.clip_chain,
   1282            frame_state.current_dirty_region().combined,
   1283            frame_state.current_dirty_region().visibility_spatial_node,
   1284            prim_spatial_node_index,
   1285            spatial_tree,
   1286        );
   1287        let stride = *stretch_size + *tile_spacing;
   1288 
   1289        let repetitions = image_tiling::repetitions(prim_local_rect, &visible_rect, stride);
   1290        gradient_tiles.reserve(repetitions.num_repetitions());
   1291        for Repetition { origin, .. } in repetitions {
   1292            let rect = LayoutRect::from_origin_and_size(
   1293                origin,
   1294                *stretch_size,
   1295            );
   1296 
   1297            let mut address = GpuBufferAddress::INVALID;
   1298 
   1299            if let Some(callback) = &mut callback {
   1300                address = callback(&rect, &mut frame_state.frame_gpu_data.f32);
   1301            }
   1302 
   1303            gradient_tiles.push(VisibleGradientTile {
   1304                local_rect: rect,
   1305                local_clip_rect: tight_clip_rect,
   1306                address,
   1307            });
   1308        }
   1309    }
   1310 
   1311    // At this point if we don't have tiles to show it means we could probably
   1312    // have done a better a job at culling during an earlier stage.
   1313    gradient_tiles.close_range(tile_range)
   1314 }
   1315 
   1316 
   1317 fn update_clip_task_for_brush(
   1318    instance: &PrimitiveInstance,
   1319    prim_origin: &LayoutPoint,
   1320    prim_spatial_node_index: SpatialNodeIndex,
   1321    root_spatial_node_index: SpatialNodeIndex,
   1322    visibility_spatial_node_index: SpatialNodeIndex,
   1323    pic_context: &PictureContext,
   1324    pic_state: &mut PictureState,
   1325    frame_context: &FrameBuildingContext,
   1326    frame_state: &mut FrameBuildingState,
   1327    prim_store: &PrimitiveStore,
   1328    data_stores: &mut DataStores,
   1329    segments_store: &mut SegmentStorage,
   1330    segment_instances_store: &mut SegmentInstanceStorage,
   1331    clip_mask_instances: &mut Vec<ClipMaskKind>,
   1332    device_pixel_scale: DevicePixelScale,
   1333 ) -> Option<ClipTaskIndex> {
   1334    let segments = match instance.kind {
   1335        PrimitiveInstanceKind::BoxShadow { .. } => {
   1336            unreachable!("BUG: box-shadows should not hit legacy brush clip path");
   1337        }
   1338        PrimitiveInstanceKind::Picture { .. } |
   1339        PrimitiveInstanceKind::TextRun { .. } |
   1340        PrimitiveInstanceKind::LineDecoration { .. } |
   1341        PrimitiveInstanceKind::BackdropCapture { .. } |
   1342        PrimitiveInstanceKind::BackdropRender { .. } => {
   1343            return None;
   1344        }
   1345        PrimitiveInstanceKind::Image { image_instance_index, .. } => {
   1346            let segment_instance_index = prim_store
   1347                .images[image_instance_index]
   1348                .segment_instance_index;
   1349 
   1350            if segment_instance_index == SegmentInstanceIndex::UNUSED {
   1351                return None;
   1352            }
   1353 
   1354            let segment_instance = &segment_instances_store[segment_instance_index];
   1355 
   1356            &segments_store[segment_instance.segments_range]
   1357        }
   1358        PrimitiveInstanceKind::YuvImage { segment_instance_index, .. } => {
   1359            debug_assert!(segment_instance_index != SegmentInstanceIndex::INVALID);
   1360 
   1361            if segment_instance_index == SegmentInstanceIndex::UNUSED {
   1362                return None;
   1363            }
   1364 
   1365            let segment_instance = &segment_instances_store[segment_instance_index];
   1366 
   1367            &segments_store[segment_instance.segments_range]
   1368        }
   1369        PrimitiveInstanceKind::Rectangle { use_legacy_path, segment_instance_index, .. } => {
   1370            assert!(use_legacy_path);
   1371            debug_assert!(segment_instance_index != SegmentInstanceIndex::INVALID);
   1372 
   1373            if segment_instance_index == SegmentInstanceIndex::UNUSED {
   1374                return None;
   1375            }
   1376 
   1377            let segment_instance = &segment_instances_store[segment_instance_index];
   1378 
   1379            &segments_store[segment_instance.segments_range]
   1380        }
   1381        PrimitiveInstanceKind::ImageBorder { data_handle, .. } => {
   1382            let border_data = &data_stores.image_border[data_handle].kind;
   1383 
   1384            // TODO: This is quite messy - once we remove legacy primitives we
   1385            //       can change this to be a tuple match on (instance, template)
   1386            border_data.brush_segments.as_slice()
   1387        }
   1388        PrimitiveInstanceKind::NormalBorder { data_handle, .. } => {
   1389            let border_data = &data_stores.normal_border[data_handle].kind;
   1390 
   1391            // TODO: This is quite messy - once we remove legacy primitives we
   1392            //       can change this to be a tuple match on (instance, template)
   1393            border_data.brush_segments.as_slice()
   1394        }
   1395        PrimitiveInstanceKind::LinearGradient { data_handle, .. }
   1396        | PrimitiveInstanceKind::CachedLinearGradient { data_handle, .. } => {
   1397            let prim_data = &data_stores.linear_grad[data_handle];
   1398 
   1399            // TODO: This is quite messy - once we remove legacy primitives we
   1400            //       can change this to be a tuple match on (instance, template)
   1401            if prim_data.brush_segments.is_empty() {
   1402                return None;
   1403            }
   1404 
   1405            prim_data.brush_segments.as_slice()
   1406        }
   1407        PrimitiveInstanceKind::RadialGradient { data_handle, .. } => {
   1408            let prim_data = &data_stores.radial_grad[data_handle];
   1409 
   1410            // TODO: This is quite messy - once we remove legacy primitives we
   1411            //       can change this to be a tuple match on (instance, template)
   1412            if prim_data.brush_segments.is_empty() {
   1413                return None;
   1414            }
   1415 
   1416            prim_data.brush_segments.as_slice()
   1417        }
   1418        PrimitiveInstanceKind::ConicGradient { data_handle, .. } => {
   1419            let prim_data = &data_stores.conic_grad[data_handle];
   1420 
   1421            // TODO: This is quite messy - once we remove legacy primitives we
   1422            //       can change this to be a tuple match on (instance, template)
   1423            if prim_data.brush_segments.is_empty() {
   1424                return None;
   1425            }
   1426 
   1427            prim_data.brush_segments.as_slice()
   1428        }
   1429    };
   1430 
   1431    // If there are no segments, early out to avoid setting a valid
   1432    // clip task instance location below.
   1433    if segments.is_empty() {
   1434        return None;
   1435    }
   1436 
   1437    // Set where in the clip mask instances array the clip mask info
   1438    // can be found for this primitive. Each segment will push the
   1439    // clip mask information for itself in update_clip_task below.
   1440    let clip_task_index = ClipTaskIndex(clip_mask_instances.len() as _);
   1441 
   1442    // If we only built 1 segment, there is no point in re-running
   1443    // the clip chain builder. Instead, just use the clip chain
   1444    // instance that was built for the main primitive. This is a
   1445    // significant optimization for the common case.
   1446    if segments.len() == 1 {
   1447        let clip_mask_kind = update_brush_segment_clip_task(
   1448            &segments[0],
   1449            Some(&instance.vis.clip_chain),
   1450            root_spatial_node_index,
   1451            pic_context.surface_index,
   1452            frame_context,
   1453            frame_state,
   1454            &mut data_stores.clip,
   1455            device_pixel_scale,
   1456        );
   1457        clip_mask_instances.push(clip_mask_kind);
   1458    } else {
   1459        let dirty_rect = frame_state.current_dirty_region().combined;
   1460 
   1461        for segment in segments {
   1462            // Build a clip chain for the smaller segment rect. This will
   1463            // often manage to eliminate most/all clips, and sometimes
   1464            // clip the segment completely.
   1465            frame_state.clip_store.set_active_clips_from_clip_chain(
   1466                &instance.vis.clip_chain,
   1467                prim_spatial_node_index,
   1468                visibility_spatial_node_index,
   1469                &frame_context.spatial_tree,
   1470                &data_stores.clip,
   1471            );
   1472 
   1473            let segment_clip_chain = frame_state
   1474                .clip_store
   1475                .build_clip_chain_instance(
   1476                    segment.local_rect.translate(prim_origin.to_vector()),
   1477                    &pic_state.map_local_to_pic,
   1478                    &pic_state.map_pic_to_vis,
   1479                    &frame_context.spatial_tree,
   1480                    &mut frame_state.frame_gpu_data.f32,
   1481                    frame_state.resource_cache,
   1482                    device_pixel_scale,
   1483                    &dirty_rect,
   1484                    &mut data_stores.clip,
   1485                    frame_state.rg_builder,
   1486                    false,
   1487                );
   1488 
   1489            let clip_mask_kind = update_brush_segment_clip_task(
   1490                &segment,
   1491                segment_clip_chain.as_ref(),
   1492                root_spatial_node_index,
   1493                pic_context.surface_index,
   1494                frame_context,
   1495                frame_state,
   1496                &mut data_stores.clip,
   1497                device_pixel_scale,
   1498            );
   1499            clip_mask_instances.push(clip_mask_kind);
   1500        }
   1501    }
   1502 
   1503    Some(clip_task_index)
   1504 }
   1505 
   1506 pub fn update_clip_task(
   1507    instance: &mut PrimitiveInstance,
   1508    prim_origin: &LayoutPoint,
   1509    prim_spatial_node_index: SpatialNodeIndex,
   1510    root_spatial_node_index: SpatialNodeIndex,
   1511    visibility_spatial_node_index: SpatialNodeIndex,
   1512    pic_context: &PictureContext,
   1513    pic_state: &mut PictureState,
   1514    frame_context: &FrameBuildingContext,
   1515    frame_state: &mut FrameBuildingState,
   1516    prim_store: &mut PrimitiveStore,
   1517    data_stores: &mut DataStores,
   1518    scratch: &mut PrimitiveScratchBuffer,
   1519 ) -> bool {
   1520    let device_pixel_scale = frame_state.surfaces[pic_context.surface_index.0].device_pixel_scale;
   1521 
   1522    build_segments_if_needed(
   1523        instance,
   1524        frame_state,
   1525        prim_store,
   1526        data_stores,
   1527        &mut scratch.segments,
   1528        &mut scratch.segment_instances,
   1529    );
   1530 
   1531    // First try to  render this primitive's mask using optimized brush rendering.
   1532    instance.vis.clip_task_index = if let Some(clip_task_index) = update_clip_task_for_brush(
   1533        instance,
   1534        prim_origin,
   1535        prim_spatial_node_index,
   1536        root_spatial_node_index,
   1537        visibility_spatial_node_index,
   1538        pic_context,
   1539        pic_state,
   1540        frame_context,
   1541        frame_state,
   1542        prim_store,
   1543        data_stores,
   1544        &mut scratch.segments,
   1545        &mut scratch.segment_instances,
   1546        &mut scratch.clip_mask_instances,
   1547        device_pixel_scale,
   1548    ) {
   1549        clip_task_index
   1550    } else if instance.vis.clip_chain.needs_mask {
   1551        // Get a minimal device space rect, clipped to the screen that we
   1552        // need to allocate for the clip mask, as well as interpolated
   1553        // snap offsets.
   1554        let unadjusted_device_rect = match frame_state.surfaces[pic_context.surface_index.0].get_surface_rect(
   1555            &instance.vis.clip_chain.pic_coverage_rect,
   1556            frame_context.spatial_tree,
   1557        ) {
   1558            Some(rect) => rect,
   1559            None => return false,
   1560        };
   1561 
   1562        let (device_rect, device_pixel_scale) = adjust_mask_scale_for_max_size(
   1563            unadjusted_device_rect,
   1564            device_pixel_scale,
   1565        );
   1566 
   1567        if device_rect.size().to_i32().is_empty() {
   1568            log::warn!("Bad adjusted clip task size {:?} (was {:?})", device_rect.size(), unadjusted_device_rect.size());
   1569            return false;
   1570        }
   1571 
   1572        let clip_task_id = RenderTaskKind::new_mask(
   1573            device_rect,
   1574            instance.vis.clip_chain.clips_range,
   1575            root_spatial_node_index,
   1576            frame_state.clip_store,
   1577            &mut frame_state.frame_gpu_data.f32,
   1578            frame_state.resource_cache,
   1579            frame_state.rg_builder,
   1580            &mut data_stores.clip,
   1581            device_pixel_scale,
   1582            frame_context.fb_config,
   1583            &mut frame_state.surface_builder,
   1584        );
   1585        // Set the global clip mask instance for this primitive.
   1586        let clip_task_index = ClipTaskIndex(scratch.clip_mask_instances.len() as _);
   1587        scratch.clip_mask_instances.push(ClipMaskKind::Mask(clip_task_id));
   1588        instance.vis.clip_task_index = clip_task_index;
   1589        frame_state.surface_builder.add_child_render_task(
   1590            clip_task_id,
   1591            frame_state.rg_builder,
   1592        );
   1593        clip_task_index
   1594    } else {
   1595        ClipTaskIndex::INVALID
   1596    };
   1597 
   1598    true
   1599 }
   1600 
   1601 /// Write out to the clip mask instances array the correct clip mask
   1602 /// config for this segment.
   1603 pub fn update_brush_segment_clip_task(
   1604    segment: &BrushSegment,
   1605    clip_chain: Option<&ClipChainInstance>,
   1606    root_spatial_node_index: SpatialNodeIndex,
   1607    surface_index: SurfaceIndex,
   1608    frame_context: &FrameBuildingContext,
   1609    frame_state: &mut FrameBuildingState,
   1610    clip_data_store: &mut ClipDataStore,
   1611    device_pixel_scale: DevicePixelScale,
   1612 ) -> ClipMaskKind {
   1613    let clip_chain = match clip_chain {
   1614        Some(chain) => chain,
   1615        None => return ClipMaskKind::Clipped,
   1616    };
   1617    if !clip_chain.needs_mask ||
   1618       (!segment.may_need_clip_mask && !clip_chain.has_non_local_clips) {
   1619        return ClipMaskKind::None;
   1620    }
   1621 
   1622    let unadjusted_device_rect = match frame_state.surfaces[surface_index.0].get_surface_rect(
   1623        &clip_chain.pic_coverage_rect,
   1624        frame_context.spatial_tree,
   1625    ) {
   1626        Some(rect) => rect,
   1627        None => return ClipMaskKind::Clipped,
   1628    };
   1629 
   1630    let (device_rect, device_pixel_scale) = adjust_mask_scale_for_max_size(unadjusted_device_rect, device_pixel_scale);
   1631 
   1632    if device_rect.size().to_i32().is_empty() {
   1633        log::warn!("Bad adjusted mask size {:?} (was {:?})", device_rect.size(), unadjusted_device_rect.size());
   1634        return ClipMaskKind::Clipped;
   1635    }
   1636 
   1637    let clip_task_id = RenderTaskKind::new_mask(
   1638        device_rect,
   1639        clip_chain.clips_range,
   1640        root_spatial_node_index,
   1641        frame_state.clip_store,
   1642        &mut frame_state.frame_gpu_data.f32,
   1643        frame_state.resource_cache,
   1644        frame_state.rg_builder,
   1645        clip_data_store,
   1646        device_pixel_scale,
   1647        frame_context.fb_config,
   1648        &mut frame_state.surface_builder,
   1649    );
   1650 
   1651    frame_state.surface_builder.add_child_render_task(
   1652        clip_task_id,
   1653        frame_state.rg_builder,
   1654    );
   1655    ClipMaskKind::Mask(clip_task_id)
   1656 }
   1657 
   1658 
   1659 fn write_brush_segment_description(
   1660    prim_local_rect: LayoutRect,
   1661    prim_local_clip_rect: LayoutRect,
   1662    clip_chain: &ClipChainInstance,
   1663    segment_builder: &mut SegmentBuilder,
   1664    clip_store: &ClipStore,
   1665    data_stores: &DataStores,
   1666 ) -> bool {
   1667    // If the brush is small, we want to skip building segments
   1668    // and just draw it as a single primitive with clip mask.
   1669    if prim_local_rect.area() < MIN_BRUSH_SPLIT_AREA {
   1670        return false;
   1671    }
   1672 
   1673    // NOTE: The local clip rect passed to the segment builder must be the unmodified
   1674    //       local clip rect from the clip leaf, not the local_clip_rect from the
   1675    //       clip-chain instance. The clip-chain instance may have been reduced by
   1676    //       clips that are in the same coordinate system, but not the same spatial
   1677    //       node as the primitive. This can result in the clip for the segment building
   1678    //       being affected by scrolling clips, which we can't handle (since the segments
   1679    //       are not invalidated during frame building after being built).
   1680    segment_builder.initialize(
   1681        prim_local_rect,
   1682        None,
   1683        prim_local_clip_rect,
   1684    );
   1685 
   1686    // Segment the primitive on all the local-space clip sources that we can.
   1687    for i in 0 .. clip_chain.clips_range.count {
   1688        let clip_instance = clip_store
   1689            .get_instance_from_range(&clip_chain.clips_range, i);
   1690        let clip_node = &data_stores.clip[clip_instance.handle];
   1691 
   1692        // If this clip item is positioned by another positioning node, its relative position
   1693        // could change during scrolling. This means that we would need to resegment. Instead
   1694        // of doing that, only segment with clips that have the same positioning node.
   1695        // TODO(mrobinson, #2858): It may make sense to include these nodes, resegmenting only
   1696        // when necessary while scrolling.
   1697        if !clip_instance.flags.contains(ClipNodeFlags::SAME_SPATIAL_NODE) {
   1698            continue;
   1699        }
   1700 
   1701        let (local_clip_rect, radius, mode) = match clip_node.item.kind {
   1702            ClipItemKind::RoundedRectangle { rect, radius, mode } => {
   1703                (rect, Some(radius), mode)
   1704            }
   1705            ClipItemKind::Rectangle { rect, mode } => {
   1706                (rect, None, mode)
   1707            }
   1708            ClipItemKind::BoxShadow { ref source } => {
   1709                // For inset box shadows, we can clip out any
   1710                // pixels that are inside the shadow region
   1711                // and are beyond the inner rect, as they can't
   1712                // be affected by the blur radius.
   1713                let inner_clip_mode = match source.clip_mode {
   1714                    BoxShadowClipMode::Outset => None,
   1715                    BoxShadowClipMode::Inset => Some(ClipMode::ClipOut),
   1716                };
   1717 
   1718                // Push a region into the segment builder where the
   1719                // box-shadow can have an effect on the result. This
   1720                // ensures clip-mask tasks get allocated for these
   1721                // pixel regions, even if no other clips affect them.
   1722                segment_builder.push_mask_region(
   1723                    source.prim_shadow_rect,
   1724                    source.prim_shadow_rect.inflate(
   1725                        -0.5 * source.original_alloc_size.width,
   1726                        -0.5 * source.original_alloc_size.height,
   1727                    ),
   1728                    inner_clip_mode,
   1729                );
   1730 
   1731                continue;
   1732            }
   1733            ClipItemKind::Image { .. } => {
   1734                panic!("bug: masks not supported on old segment path");
   1735            }
   1736        };
   1737 
   1738        segment_builder.push_clip_rect(local_clip_rect, radius, mode);
   1739    }
   1740 
   1741    true
   1742 }
   1743 
   1744 fn build_segments_if_needed(
   1745    instance: &mut PrimitiveInstance,
   1746    frame_state: &mut FrameBuildingState,
   1747    prim_store: &mut PrimitiveStore,
   1748    data_stores: &DataStores,
   1749    segments_store: &mut SegmentStorage,
   1750    segment_instances_store: &mut SegmentInstanceStorage,
   1751 ) {
   1752    let prim_clip_chain = &instance.vis.clip_chain;
   1753 
   1754    // Usually, the primitive rect can be found from information
   1755    // in the instance and primitive template.
   1756    let prim_local_rect = data_stores.get_local_prim_rect(
   1757        instance,
   1758        &prim_store.pictures,
   1759        frame_state.surfaces,
   1760    );
   1761 
   1762    let segment_instance_index = match instance.kind {
   1763        PrimitiveInstanceKind::Rectangle { use_legacy_path, ref mut segment_instance_index, .. } => {
   1764            assert!(use_legacy_path);
   1765            segment_instance_index
   1766        }
   1767        PrimitiveInstanceKind::YuvImage { ref mut segment_instance_index, compositor_surface_kind, .. } => {
   1768            // Only use segments for YUV images if not drawing as a compositor surface
   1769            if !compositor_surface_kind.supports_segments() {
   1770                *segment_instance_index = SegmentInstanceIndex::UNUSED;
   1771                return;
   1772            }
   1773 
   1774            segment_instance_index
   1775        }
   1776        PrimitiveInstanceKind::Image { data_handle, image_instance_index, compositor_surface_kind, .. } => {
   1777            let image_data = &data_stores.image[data_handle].kind;
   1778            let image_instance = &mut prim_store.images[image_instance_index];
   1779 
   1780            //Note: tiled images don't support automatic segmentation,
   1781            // they strictly produce one segment per visible tile instead.
   1782            if !compositor_surface_kind.supports_segments() ||
   1783                frame_state.resource_cache
   1784                    .get_image_properties(image_data.key)
   1785                    .and_then(|properties| properties.tiling)
   1786                    .is_some()
   1787            {
   1788                image_instance.segment_instance_index = SegmentInstanceIndex::UNUSED;
   1789                return;
   1790            }
   1791            &mut image_instance.segment_instance_index
   1792        }
   1793        PrimitiveInstanceKind::Picture { .. } |
   1794        PrimitiveInstanceKind::TextRun { .. } |
   1795        PrimitiveInstanceKind::NormalBorder { .. } |
   1796        PrimitiveInstanceKind::ImageBorder { .. } |
   1797        PrimitiveInstanceKind::LinearGradient { .. } |
   1798        PrimitiveInstanceKind::CachedLinearGradient { .. } |
   1799        PrimitiveInstanceKind::RadialGradient { .. } |
   1800        PrimitiveInstanceKind::ConicGradient { .. } |
   1801        PrimitiveInstanceKind::LineDecoration { .. } |
   1802        PrimitiveInstanceKind::BackdropCapture { .. } |
   1803        PrimitiveInstanceKind::BackdropRender { .. } => {
   1804            // These primitives don't support / need segments.
   1805            return;
   1806        }
   1807        PrimitiveInstanceKind::BoxShadow { .. } => {
   1808            unreachable!("BUG: box-shadows should not hit legacy brush clip path");
   1809        }
   1810    };
   1811 
   1812    if *segment_instance_index == SegmentInstanceIndex::INVALID {
   1813        let mut segments: SmallVec<[BrushSegment; 8]> = SmallVec::new();
   1814        let clip_leaf = frame_state.clip_tree.get_leaf(instance.clip_leaf_id);
   1815 
   1816        if write_brush_segment_description(
   1817            prim_local_rect,
   1818            clip_leaf.local_clip_rect,
   1819            prim_clip_chain,
   1820            &mut frame_state.segment_builder,
   1821            frame_state.clip_store,
   1822            data_stores,
   1823        ) {
   1824            frame_state.segment_builder.build(|segment| {
   1825                segments.push(
   1826                    BrushSegment::new(
   1827                        segment.rect.translate(-prim_local_rect.min.to_vector()),
   1828                        segment.has_mask,
   1829                        segment.edge_flags,
   1830                        [0.0; 4],
   1831                        BrushFlags::PERSPECTIVE_INTERPOLATION,
   1832                    ),
   1833                );
   1834            });
   1835        }
   1836 
   1837        // If only a single segment is produced, there is no benefit to writing
   1838        // a segment instance array. Instead, just use the main primitive rect
   1839        // written into the GPU cache.
   1840        // TODO(gw): This is (sortof) a bandaid - due to a limitation in the current
   1841        //           brush encoding, we can only support a total of up to 2^16 segments.
   1842        //           This should be (more than) enough for any real world case, so for
   1843        //           now we can handle this by skipping cases where we were generating
   1844        //           segments where there is no benefit. The long term / robust fix
   1845        //           for this is to move the segment building to be done as a more
   1846        //           limited nine-patch system during scene building, removing arbitrary
   1847        //           segmentation during frame-building (see bug #1617491).
   1848        if segments.len() <= 1 {
   1849            *segment_instance_index = SegmentInstanceIndex::UNUSED;
   1850        } else {
   1851            let segments_range = segments_store.extend(segments);
   1852 
   1853            let instance = SegmentedInstance {
   1854                segments_range,
   1855                gpu_data: GpuBufferAddress::INVALID,
   1856            };
   1857 
   1858            *segment_instance_index = segment_instances_store.push(instance);
   1859        };
   1860    }
   1861 }
   1862 
   1863 // Ensures that the size of mask render tasks are within MAX_MASK_SIZE.
   1864 fn adjust_mask_scale_for_max_size(device_rect: DeviceIntRect, device_pixel_scale: DevicePixelScale) -> (DeviceIntRect, DevicePixelScale) {
   1865    if device_rect.width() > MAX_MASK_SIZE || device_rect.height() > MAX_MASK_SIZE {
   1866        // round_out will grow by 1 integer pixel if origin is on a
   1867        // fractional position, so keep that margin for error with -1:
   1868        let device_rect_f = device_rect.to_f32();
   1869        let scale = (MAX_MASK_SIZE - 1) as f32 /
   1870            f32::max(device_rect_f.width(), device_rect_f.height());
   1871        let new_device_pixel_scale = device_pixel_scale * Scale::new(scale);
   1872        let new_device_rect = (device_rect_f * Scale::new(scale))
   1873            .round_out()
   1874            .to_i32();
   1875        (new_device_rect, new_device_pixel_scale)
   1876    } else {
   1877        (device_rect, device_pixel_scale)
   1878    }
   1879 }
   1880 
   1881 impl CompositorSurfaceKind {
   1882    /// Returns true if the compositor surface strategy supports segment rendering
   1883    fn supports_segments(&self) -> bool {
   1884        match self {
   1885            CompositorSurfaceKind::Underlay | CompositorSurfaceKind::Overlay => false,
   1886            CompositorSurfaceKind::Blit => true,
   1887        }
   1888    }
   1889 }