tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

mod.rs (272287B)


      1 /* This Source Code Form is subject to the terms of the Mozilla Public
      2 * License, v. 2.0. If a copy of the MPL was not distributed with this
      3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      4 
      5 //! The high-level module responsible for interfacing with the GPU.
      6 //!
      7 //! Much of WebRender's design is driven by separating work into different
      8 //! threads. To avoid the complexities of multi-threaded GPU access, we restrict
      9 //! all communication with the GPU to one thread, the render thread. But since
     10 //! issuing GPU commands is often a bottleneck, we move everything else (i.e.
     11 //! the computation of what commands to issue) to another thread, the
     12 //! RenderBackend thread. The RenderBackend, in turn, may delegate work to other
     13 //! thread (like the SceneBuilder threads or Rayon workers), but the
     14 //! Render-vs-RenderBackend distinction is the most important.
     15 //!
     16 //! The consumer is responsible for initializing the render thread before
     17 //! calling into WebRender, which means that this module also serves as the
     18 //! initial entry point into WebRender, and is responsible for spawning the
     19 //! various other threads discussed above. That said, WebRender initialization
     20 //! returns both the `Renderer` instance as well as a channel for communicating
     21 //! directly with the `RenderBackend`. Aside from a few high-level operations
     22 //! like 'render now', most of interesting commands from the consumer go over
     23 //! that channel and operate on the `RenderBackend`.
     24 //!
     25 //! ## Space conversion guidelines
     26 //! At this stage, we shuld be operating with `DevicePixel` and `FramebufferPixel` only.
     27 //! "Framebuffer" space represents the final destination of our rendeing,
     28 //! and it happens to be Y-flipped on OpenGL. The conversion is done as follows:
     29 //!   - for rasterized primitives, the orthographics projection transforms
     30 //! the content rectangle to -1 to 1
     31 //!   - the viewport transformation is setup to map the whole range to
     32 //! the framebuffer rectangle provided by the document view, stored in `DrawTarget`
     33 //!   - all the direct framebuffer operations, like blitting, reading pixels, and setting
     34 //! up the scissor, are accepting already transformed coordinates, which we can get by
     35 //! calling `DrawTarget::to_framebuffer_rect`
     36 
     37 use api::{ClipMode, ColorF, ColorU, MixBlendMode, TextureCacheCategory};
     38 use api::{DocumentId, Epoch, ExternalImageHandler, RenderReasons};
     39 #[cfg(feature = "replay")]
     40 use api::ExternalImageId;
     41 use api::{ExternalImageSource, ExternalImageType, ImageFormat, PremultipliedColorF};
     42 use api::{PipelineId, ImageRendering, Checkpoint, NotificationRequest, ImageBufferKind};
     43 #[cfg(feature = "replay")]
     44 use api::ExternalImage;
     45 use api::FramePublishId;
     46 use api::units::*;
     47 use api::channel::{Sender, Receiver};
     48 pub use api::DebugFlags;
     49 use core::time::Duration;
     50 
     51 use crate::pattern::PatternKind;
     52 use crate::render_api::{DebugCommand, ApiMsg, MemoryReport};
     53 use crate::batch::{AlphaBatchContainer, BatchKind, BatchFeatures, BatchTextures, BrushBatchKind, ClipBatchList};
     54 use crate::batch::ClipMaskInstanceList;
     55 #[cfg(any(feature = "capture", feature = "replay"))]
     56 use crate::capture::{CaptureConfig, ExternalCaptureImage, PlainExternalImage};
     57 use crate::composite::{CompositeState, CompositeTileSurface, CompositorInputLayer, CompositorSurfaceTransform, ResolvedExternalSurface};
     58 use crate::composite::{CompositorKind, Compositor, NativeTileId, CompositeFeatures, CompositeSurfaceFormat, ResolvedExternalSurfaceColorData};
     59 use crate::composite::{CompositorConfig, NativeSurfaceOperationDetails, NativeSurfaceId, NativeSurfaceOperation, ClipRadius};
     60 use crate::composite::{CompositeRoundedCorner, TileKind};
     61 #[cfg(feature = "debugger")]
     62 use api::debugger::{CompositorDebugInfo, DebuggerTextureContent};
     63 use crate::segment::{EdgeAaSegmentMask, SegmentBuilder};
     64 use crate::{debug_colors, CompositorInputConfig, CompositorSurfaceUsage};
     65 use crate::device::{DepthFunction, Device, DrawTarget, ExternalTexture, GpuFrameId, UploadPBOPool};
     66 use crate::device::{ReadTarget, ShaderError, Texture, TextureFilter, TextureFlags, TextureSlot, Texel};
     67 use crate::device::query::{GpuSampler, GpuTimer};
     68 #[cfg(feature = "capture")]
     69 use crate::device::FBOId;
     70 use crate::debug_item::DebugItem;
     71 use crate::frame_builder::Frame;
     72 use glyph_rasterizer::GlyphFormat;
     73 use crate::gpu_types::{ScalingInstance, SVGFEFilterInstance, CopyInstance, PrimitiveInstanceData};
     74 use crate::gpu_types::{BlurInstance, ClearInstance, CompositeInstance, ZBufferId};
     75 use crate::internal_types::{TextureSource, TextureSourceExternal, FrameVec};
     76 #[cfg(any(feature = "capture", feature = "replay"))]
     77 use crate::internal_types::DebugOutput;
     78 use crate::internal_types::{CacheTextureId, FastHashMap, FastHashSet, RenderedDocument, ResultMsg};
     79 use crate::internal_types::{TextureCacheAllocInfo, TextureCacheAllocationKind, TextureUpdateList};
     80 use crate::internal_types::{RenderTargetInfo, Swizzle, DeferredResolveIndex};
     81 use crate::picture::ResolvedSurfaceTexture;
     82 use crate::tile_cache::TileId;
     83 use crate::prim_store::DeferredResolve;
     84 use crate::profiler::{self, RenderCommandLog, GpuProfileTag, TransactionProfile};
     85 use crate::profiler::{Profiler, add_event_marker, add_text_marker, thread_is_being_profiled};
     86 use crate::device::query::GpuProfiler;
     87 use crate::render_target::ResolveOp;
     88 use crate::render_task_graph::RenderTaskGraph;
     89 use crate::render_task::{RenderTask, RenderTaskKind, ReadbackTask};
     90 use crate::screen_capture::AsyncScreenshotGrabber;
     91 use crate::render_target::{RenderTarget, PictureCacheTarget, PictureCacheTargetKind};
     92 use crate::render_target::{RenderTargetKind, BlitJob};
     93 use crate::telemetry::Telemetry;
     94 use crate::tile_cache::PictureCacheDebugInfo;
     95 use crate::util::drain_filter;
     96 use crate::rectangle_occlusion as occlusion;
     97 #[cfg(feature = "debugger")]
     98 use crate::debugger::{Debugger, DebugQueryKind};
     99 use upload::{upload_to_texture_cache, UploadTexturePool};
    100 use init::*;
    101 
    102 use euclid::{rect, Transform3D, Scale, default};
    103 use gleam::gl;
    104 use malloc_size_of::MallocSizeOfOps;
    105 
    106 #[cfg(feature = "replay")]
    107 use std::sync::Arc;
    108 
    109 use std::{
    110    cell::RefCell,
    111    collections::HashSet,
    112    collections::VecDeque,
    113    f32,
    114    ffi::c_void,
    115    mem,
    116    num::NonZeroUsize,
    117    path::PathBuf,
    118    rc::Rc,
    119 };
    120 #[cfg(any(feature = "capture", feature = "replay"))]
    121 use std::collections::hash_map::Entry;
    122 
    123 mod debug;
    124 mod gpu_buffer;
    125 mod shade;
    126 mod vertex;
    127 mod upload;
    128 pub(crate) mod init;
    129 
    130 pub use debug::DebugRenderer;
    131 pub use shade::{PendingShadersToPrecache, Shaders, SharedShaders};
    132 pub use vertex::{desc, VertexArrayKind, MAX_VERTEX_TEXTURE_WIDTH};
    133 pub use gpu_buffer::{GpuBuffer, GpuBufferF, GpuBufferBuilderF, GpuBufferI, GpuBufferBuilderI};
    134 pub use gpu_buffer::{GpuBufferHandle, GpuBufferAddress, GpuBufferBuilder, GpuBufferWriterF};
    135 pub use gpu_buffer::{GpuBufferBlockF, GpuBufferDataF, GpuBufferDataI, GpuBufferWriterI};
    136 
    137 /// The size of the array of each type of vertex data texture that
    138 /// is round-robin-ed each frame during bind_frame_data. Doing this
    139 /// helps avoid driver stalls while updating the texture in some
    140 /// drivers. The size of these textures are typically very small
    141 /// (e.g. < 16 kB) so it's not a huge waste of memory. Despite that,
    142 /// this is a short-term solution - we want to find a better way
    143 /// to provide this frame data, which will likely involve some
    144 /// combination of UBO/SSBO usage. Although this only affects some
    145 /// platforms, it's enabled on all platforms to reduce testing
    146 /// differences between platforms.
    147 pub const VERTEX_DATA_TEXTURE_COUNT: usize = 3;
    148 
    149 /// Number of GPU blocks per UV rectangle provided for an image.
    150 pub const BLOCKS_PER_UV_RECT: usize = 2;
    151 
    152 const GPU_TAG_BRUSH_OPACITY: GpuProfileTag = GpuProfileTag {
    153    label: "B_Opacity",
    154    color: debug_colors::DARKMAGENTA,
    155 };
    156 const GPU_TAG_BRUSH_LINEAR_GRADIENT: GpuProfileTag = GpuProfileTag {
    157    label: "B_LinearGradient",
    158    color: debug_colors::POWDERBLUE,
    159 };
    160 const GPU_TAG_BRUSH_YUV_IMAGE: GpuProfileTag = GpuProfileTag {
    161    label: "B_YuvImage",
    162    color: debug_colors::DARKGREEN,
    163 };
    164 const GPU_TAG_BRUSH_MIXBLEND: GpuProfileTag = GpuProfileTag {
    165    label: "B_MixBlend",
    166    color: debug_colors::MAGENTA,
    167 };
    168 const GPU_TAG_BRUSH_BLEND: GpuProfileTag = GpuProfileTag {
    169    label: "B_Blend",
    170    color: debug_colors::ORANGE,
    171 };
    172 const GPU_TAG_BRUSH_IMAGE: GpuProfileTag = GpuProfileTag {
    173    label: "B_Image",
    174    color: debug_colors::SPRINGGREEN,
    175 };
    176 const GPU_TAG_BRUSH_SOLID: GpuProfileTag = GpuProfileTag {
    177    label: "B_Solid",
    178    color: debug_colors::RED,
    179 };
    180 const GPU_TAG_CACHE_CLIP: GpuProfileTag = GpuProfileTag {
    181    label: "C_Clip",
    182    color: debug_colors::PURPLE,
    183 };
    184 const GPU_TAG_CACHE_BORDER: GpuProfileTag = GpuProfileTag {
    185    label: "C_Border",
    186    color: debug_colors::CORNSILK,
    187 };
    188 const GPU_TAG_CACHE_LINE_DECORATION: GpuProfileTag = GpuProfileTag {
    189    label: "C_LineDecoration",
    190    color: debug_colors::YELLOWGREEN,
    191 };
    192 const GPU_TAG_CACHE_FAST_LINEAR_GRADIENT: GpuProfileTag = GpuProfileTag {
    193    label: "C_FastLinearGradient",
    194    color: debug_colors::BROWN,
    195 };
    196 const GPU_TAG_CACHE_LINEAR_GRADIENT: GpuProfileTag = GpuProfileTag {
    197    label: "C_LinearGradient",
    198    color: debug_colors::BROWN,
    199 };
    200 const GPU_TAG_GRADIENT: GpuProfileTag = GpuProfileTag {
    201    label: "C_Gradient",
    202    color: debug_colors::BROWN,
    203 };
    204 const GPU_TAG_RADIAL_GRADIENT: GpuProfileTag = GpuProfileTag {
    205    label: "C_RadialGradient",
    206    color: debug_colors::BROWN,
    207 };
    208 const GPU_TAG_CONIC_GRADIENT: GpuProfileTag = GpuProfileTag {
    209    label: "C_ConicGradient",
    210    color: debug_colors::BROWN,
    211 };
    212 const GPU_TAG_SETUP_TARGET: GpuProfileTag = GpuProfileTag {
    213    label: "target init",
    214    color: debug_colors::SLATEGREY,
    215 };
    216 const GPU_TAG_SETUP_DATA: GpuProfileTag = GpuProfileTag {
    217    label: "data init",
    218    color: debug_colors::LIGHTGREY,
    219 };
    220 const GPU_TAG_PRIM_SPLIT_COMPOSITE: GpuProfileTag = GpuProfileTag {
    221    label: "SplitComposite",
    222    color: debug_colors::DARKBLUE,
    223 };
    224 const GPU_TAG_PRIM_TEXT_RUN: GpuProfileTag = GpuProfileTag {
    225    label: "TextRun",
    226    color: debug_colors::BLUE,
    227 };
    228 const GPU_TAG_PRIMITIVE: GpuProfileTag = GpuProfileTag {
    229    label: "Primitive",
    230    color: debug_colors::RED,
    231 };
    232 const GPU_TAG_INDIRECT_PRIM: GpuProfileTag = GpuProfileTag {
    233    label: "Primitive (indirect)",
    234    color: debug_colors::YELLOWGREEN,
    235 };
    236 const GPU_TAG_INDIRECT_MASK: GpuProfileTag = GpuProfileTag {
    237    label: "Mask (indirect)",
    238    color: debug_colors::IVORY,
    239 };
    240 const GPU_TAG_BLUR: GpuProfileTag = GpuProfileTag {
    241    label: "Blur",
    242    color: debug_colors::VIOLET,
    243 };
    244 const GPU_TAG_BLIT: GpuProfileTag = GpuProfileTag {
    245    label: "Blit",
    246    color: debug_colors::LIME,
    247 };
    248 const GPU_TAG_SCALE: GpuProfileTag = GpuProfileTag {
    249    label: "Scale",
    250    color: debug_colors::GHOSTWHITE,
    251 };
    252 const GPU_SAMPLER_TAG_ALPHA: GpuProfileTag = GpuProfileTag {
    253    label: "Alpha targets",
    254    color: debug_colors::BLACK,
    255 };
    256 const GPU_SAMPLER_TAG_OPAQUE: GpuProfileTag = GpuProfileTag {
    257    label: "Opaque pass",
    258    color: debug_colors::BLACK,
    259 };
    260 const GPU_SAMPLER_TAG_TRANSPARENT: GpuProfileTag = GpuProfileTag {
    261    label: "Transparent pass",
    262    color: debug_colors::BLACK,
    263 };
    264 const GPU_TAG_SVG_FILTER_NODES: GpuProfileTag = GpuProfileTag {
    265    label: "SvgFilterNodes",
    266    color: debug_colors::LEMONCHIFFON,
    267 };
    268 const GPU_TAG_COMPOSITE: GpuProfileTag = GpuProfileTag {
    269    label: "Composite",
    270    color: debug_colors::TOMATO,
    271 };
    272 
    273 // Key used when adding compositing tiles to the occlusion tracker.
    274 // Since an entire tile may have a mask, but we may segment that in
    275 // to masked and non-masked regions, we need to track which of the
    276 // occlusion tracker outputs need a mask
    277 #[derive(Debug, Copy, Clone)]
    278 struct OcclusionItemKey {
    279    tile_index: usize,
    280    needs_mask: bool,
    281 }
    282 
    283 // Defines the content that we will draw to a given swapchain / layer, calculated
    284 // after occlusion culling.
    285 struct SwapChainLayer {
    286    occlusion: occlusion::FrontToBackBuilder<OcclusionItemKey>,
    287 }
    288 
    289 // Store rects state of tile used for compositing with layer compositor
    290 struct CompositeTileState {
    291    pub local_rect: PictureRect,
    292    pub local_valid_rect: PictureRect,
    293    pub device_clip_rect: DeviceRect,
    294    pub z_id: ZBufferId,
    295    pub device_tile_box: DeviceRect,
    296    pub visible_rects: Vec<DeviceRect>,
    297 }
    298 
    299 impl CompositeTileState {
    300    pub fn same_state(&self, other: &CompositeTileState) -> bool {
    301        self.local_rect == other.local_rect &&
    302        self.local_valid_rect == other.local_valid_rect &&
    303        self.device_clip_rect == other.device_clip_rect &&
    304        self.z_id == other.z_id &&
    305        self.device_tile_box == other.device_tile_box
    306    }
    307 }
    308 
    309 /// The list of tiles and rects used for compositing to a frame with layer compositor
    310 struct LayerCompositorFrameState {
    311    tile_states: FastHashMap<TileId, CompositeTileState>,
    312    pub rects_without_id: Vec<DeviceRect>,
    313 }
    314 
    315 /// The clear color used for the texture cache when the debug display is enabled.
    316 /// We use a shade of blue so that we can still identify completely blue items in
    317 /// the texture cache.
    318 pub const TEXTURE_CACHE_DBG_CLEAR_COLOR: [f32; 4] = [0.0, 0.0, 0.8, 1.0];
    319 
    320 impl BatchKind {
    321    fn sampler_tag(&self) -> GpuProfileTag {
    322        match *self {
    323            BatchKind::SplitComposite => GPU_TAG_PRIM_SPLIT_COMPOSITE,
    324            BatchKind::Brush(kind) => {
    325                match kind {
    326                    BrushBatchKind::Solid => GPU_TAG_BRUSH_SOLID,
    327                    BrushBatchKind::Image(..) => GPU_TAG_BRUSH_IMAGE,
    328                    BrushBatchKind::Blend => GPU_TAG_BRUSH_BLEND,
    329                    BrushBatchKind::MixBlend { .. } => GPU_TAG_BRUSH_MIXBLEND,
    330                    BrushBatchKind::YuvImage(..) => GPU_TAG_BRUSH_YUV_IMAGE,
    331                    BrushBatchKind::LinearGradient => GPU_TAG_BRUSH_LINEAR_GRADIENT,
    332                    BrushBatchKind::Opacity => GPU_TAG_BRUSH_OPACITY,
    333                }
    334            }
    335            BatchKind::TextRun(_) => GPU_TAG_PRIM_TEXT_RUN,
    336            BatchKind::Quad(PatternKind::ColorOrTexture) => GPU_TAG_PRIMITIVE,
    337            BatchKind::Quad(PatternKind::Gradient) => GPU_TAG_GRADIENT,
    338            BatchKind::Quad(PatternKind::RadialGradient) => GPU_TAG_RADIAL_GRADIENT,
    339            BatchKind::Quad(PatternKind::ConicGradient) => GPU_TAG_CONIC_GRADIENT,
    340            BatchKind::Quad(PatternKind::Mask) => GPU_TAG_INDIRECT_MASK,
    341        }
    342    }
    343 }
    344 
    345 fn flag_changed(before: DebugFlags, after: DebugFlags, select: DebugFlags) -> Option<bool> {
    346    if before & select != after & select {
    347        Some(after.contains(select))
    348    } else {
    349        None
    350    }
    351 }
    352 
    353 #[repr(C)]
    354 #[derive(Copy, Clone, Debug)]
    355 pub enum ShaderColorMode {
    356    Alpha = 0,
    357    SubpixelDualSource = 1,
    358    BitmapShadow = 2,
    359    ColorBitmap = 3,
    360    Image = 4,
    361    MultiplyDualSource = 5,
    362 }
    363 
    364 impl From<GlyphFormat> for ShaderColorMode {
    365    fn from(format: GlyphFormat) -> ShaderColorMode {
    366        match format {
    367            GlyphFormat::Alpha |
    368            GlyphFormat::TransformedAlpha |
    369            GlyphFormat::Bitmap => ShaderColorMode::Alpha,
    370            GlyphFormat::Subpixel | GlyphFormat::TransformedSubpixel => {
    371                panic!("Subpixel glyph formats must be handled separately.");
    372            }
    373            GlyphFormat::ColorBitmap => ShaderColorMode::ColorBitmap,
    374        }
    375    }
    376 }
    377 
    378 /// Enumeration of the texture samplers used across the various WebRender shaders.
    379 ///
    380 /// Each variant corresponds to a uniform declared in shader source. We only bind
    381 /// the variants we need for a given shader, so not every variant is bound for every
    382 /// batch.
    383 #[derive(Debug, Copy, Clone, PartialEq, Eq)]
    384 pub(crate) enum TextureSampler {
    385    Color0,
    386    Color1,
    387    Color2,
    388    TransformPalette,
    389    RenderTasks,
    390    Dither,
    391    PrimitiveHeadersF,
    392    PrimitiveHeadersI,
    393    ClipMask,
    394    GpuBufferF,
    395    GpuBufferI,
    396 }
    397 
    398 impl TextureSampler {
    399    pub(crate) fn color(n: usize) -> TextureSampler {
    400        match n {
    401            0 => TextureSampler::Color0,
    402            1 => TextureSampler::Color1,
    403            2 => TextureSampler::Color2,
    404            _ => {
    405                panic!("There are only 3 color samplers.");
    406            }
    407        }
    408    }
    409 }
    410 
    411 impl Into<TextureSlot> for TextureSampler {
    412    fn into(self) -> TextureSlot {
    413        match self {
    414            TextureSampler::Color0 => TextureSlot(0),
    415            TextureSampler::Color1 => TextureSlot(1),
    416            TextureSampler::Color2 => TextureSlot(2),
    417            TextureSampler::TransformPalette => TextureSlot(3),
    418            TextureSampler::RenderTasks => TextureSlot(4),
    419            TextureSampler::Dither => TextureSlot(5),
    420            TextureSampler::PrimitiveHeadersF => TextureSlot(6),
    421            TextureSampler::PrimitiveHeadersI => TextureSlot(7),
    422            TextureSampler::ClipMask => TextureSlot(8),
    423            TextureSampler::GpuBufferF => TextureSlot(9),
    424            TextureSampler::GpuBufferI => TextureSlot(10),
    425        }
    426    }
    427 }
    428 
    429 #[derive(Clone, Debug, PartialEq)]
    430 pub enum GraphicsApi {
    431    OpenGL,
    432 }
    433 
    434 #[derive(Clone, Debug)]
    435 pub struct GraphicsApiInfo {
    436    pub kind: GraphicsApi,
    437    pub renderer: String,
    438    pub version: String,
    439 }
    440 
    441 #[derive(Debug)]
    442 pub struct GpuProfile {
    443    pub frame_id: GpuFrameId,
    444    pub paint_time_ns: u64,
    445 }
    446 
    447 impl GpuProfile {
    448    fn new(frame_id: GpuFrameId, timers: &[GpuTimer]) -> GpuProfile {
    449        let mut paint_time_ns = 0;
    450        for timer in timers {
    451            paint_time_ns += timer.time_ns;
    452        }
    453        GpuProfile {
    454            frame_id,
    455            paint_time_ns,
    456        }
    457    }
    458 }
    459 
    460 #[derive(Debug)]
    461 pub struct CpuProfile {
    462    pub frame_id: GpuFrameId,
    463    pub backend_time_ns: u64,
    464    pub composite_time_ns: u64,
    465    pub draw_calls: usize,
    466 }
    467 
    468 impl CpuProfile {
    469    fn new(
    470        frame_id: GpuFrameId,
    471        backend_time_ns: u64,
    472        composite_time_ns: u64,
    473        draw_calls: usize,
    474    ) -> CpuProfile {
    475        CpuProfile {
    476            frame_id,
    477            backend_time_ns,
    478            composite_time_ns,
    479            draw_calls,
    480        }
    481    }
    482 }
    483 
    484 /// The selected partial present mode for a given frame.
    485 #[derive(Debug, Copy, Clone)]
    486 enum PartialPresentMode {
    487    /// The device supports fewer dirty rects than the number of dirty rects
    488    /// that WR produced. In this case, the WR dirty rects are union'ed into
    489    /// a single dirty rect, that is provided to the caller.
    490    Single {
    491        dirty_rect: DeviceRect,
    492    },
    493 }
    494 
    495 struct CacheTexture {
    496    texture: Texture,
    497    category: TextureCacheCategory,
    498 }
    499 
    500 /// Helper struct for resolving device Textures for use during rendering passes.
    501 ///
    502 /// Manages the mapping between the at-a-distance texture handles used by the
    503 /// `RenderBackend` (which does not directly interface with the GPU) and actual
    504 /// device texture handles.
    505 struct TextureResolver {
    506    /// A map to resolve texture cache IDs to native textures.
    507    texture_cache_map: FastHashMap<CacheTextureId, CacheTexture>,
    508 
    509    /// Map of external image IDs to native textures.
    510    external_images: FastHashMap<DeferredResolveIndex, ExternalTexture>,
    511 
    512    /// A special 1x1 dummy texture used for shaders that expect to work with
    513    /// the output of the previous pass but are actually running in the first
    514    /// pass.
    515    dummy_cache_texture: Texture,
    516 }
    517 
    518 impl TextureResolver {
    519    fn new(device: &mut Device) -> TextureResolver {
    520        let dummy_cache_texture = device
    521            .create_texture(
    522                ImageBufferKind::Texture2D,
    523                ImageFormat::RGBA8,
    524                1,
    525                1,
    526                TextureFilter::Linear,
    527                None,
    528            );
    529        device.upload_texture_immediate(
    530            &dummy_cache_texture,
    531            &[0xff, 0xff, 0xff, 0xff],
    532        );
    533 
    534        TextureResolver {
    535            texture_cache_map: FastHashMap::default(),
    536            external_images: FastHashMap::default(),
    537            dummy_cache_texture,
    538        }
    539    }
    540 
    541    fn deinit(self, device: &mut Device) {
    542        device.delete_texture(self.dummy_cache_texture);
    543 
    544        for (_id, item) in self.texture_cache_map {
    545            device.delete_texture(item.texture);
    546        }
    547    }
    548 
    549    fn begin_frame(&mut self) {
    550    }
    551 
    552    fn end_pass(
    553        &mut self,
    554        device: &mut Device,
    555        textures_to_invalidate: &[CacheTextureId],
    556    ) {
    557        // For any texture that is no longer needed, immediately
    558        // invalidate it so that tiled GPUs don't need to resolve it
    559        // back to memory.
    560        for texture_id in textures_to_invalidate {
    561            let render_target = &self.texture_cache_map[texture_id].texture;
    562            device.invalidate_render_target(render_target);
    563        }
    564    }
    565 
    566    // Bind a source texture to the device.
    567    fn bind(&self, texture_id: &TextureSource, sampler: TextureSampler, device: &mut Device) -> Swizzle {
    568        match *texture_id {
    569            TextureSource::Invalid => {
    570                Swizzle::default()
    571            }
    572            TextureSource::Dummy => {
    573                let swizzle = Swizzle::default();
    574                device.bind_texture(sampler, &self.dummy_cache_texture, swizzle);
    575                swizzle
    576            }
    577            TextureSource::External(TextureSourceExternal { ref index, .. }) => {
    578                let texture = self.external_images
    579                    .get(index)
    580                    .expect("BUG: External image should be resolved by now");
    581                device.bind_external_texture(sampler, texture);
    582                Swizzle::default()
    583            }
    584            TextureSource::TextureCache(index, swizzle) => {
    585                let texture = &self.texture_cache_map[&index].texture;
    586                device.bind_texture(sampler, texture, swizzle);
    587                swizzle
    588            }
    589        }
    590    }
    591 
    592    // Get the real (OpenGL) texture ID for a given source texture.
    593    // For a texture cache texture, the IDs are stored in a vector
    594    // map for fast access.
    595    fn resolve(&self, texture_id: &TextureSource) -> Option<(&Texture, Swizzle)> {
    596        match *texture_id {
    597            TextureSource::Invalid => None,
    598            TextureSource::Dummy => {
    599                Some((&self.dummy_cache_texture, Swizzle::default()))
    600            }
    601            TextureSource::External(..) => {
    602                panic!("BUG: External textures cannot be resolved, they can only be bound.");
    603            }
    604            TextureSource::TextureCache(index, swizzle) => {
    605                Some((&self.texture_cache_map[&index].texture, swizzle))
    606            }
    607        }
    608    }
    609 
    610    // Retrieve the deferred / resolved UV rect if an external texture, otherwise
    611    // return the default supplied UV rect.
    612    fn get_uv_rect(
    613        &self,
    614        source: &TextureSource,
    615        default_value: TexelRect,
    616    ) -> TexelRect {
    617        match source {
    618            TextureSource::External(TextureSourceExternal { ref index, .. }) => {
    619                let texture = self.external_images
    620                    .get(index)
    621                    .expect("BUG: External image should be resolved by now");
    622                texture.get_uv_rect()
    623            }
    624            _ => {
    625                default_value
    626            }
    627        }
    628    }
    629 
    630    /// Returns the size of the texture in pixels
    631    fn get_texture_size(&self, texture: &TextureSource) -> DeviceIntSize {
    632        match *texture {
    633            TextureSource::Invalid => DeviceIntSize::zero(),
    634            TextureSource::TextureCache(id, _) => {
    635                self.texture_cache_map[&id].texture.get_dimensions()
    636            },
    637            TextureSource::External(TextureSourceExternal { index, .. }) => {
    638                // If UV coords are normalized then this value will be incorrect. However, the
    639                // texture size is currently only used to set the uTextureSize uniform, so that
    640                // shaders without access to textureSize() can normalize unnormalized UVs. Which
    641                // means this is not a problem.
    642                let uv_rect = self.external_images[&index].get_uv_rect();
    643                (uv_rect.uv1 - uv_rect.uv0).abs().to_size().to_i32()
    644            },
    645            TextureSource::Dummy => DeviceIntSize::new(1, 1),
    646        }
    647    }
    648 
    649    fn report_memory(&self) -> MemoryReport {
    650        let mut report = MemoryReport::default();
    651 
    652        // We're reporting GPU memory rather than heap-allocations, so we don't
    653        // use size_of_op.
    654        for item in self.texture_cache_map.values() {
    655            let counter = match item.category {
    656                TextureCacheCategory::Atlas => &mut report.atlas_textures,
    657                TextureCacheCategory::Standalone => &mut report.standalone_textures,
    658                TextureCacheCategory::PictureTile => &mut report.picture_tile_textures,
    659                TextureCacheCategory::RenderTarget => &mut report.render_target_textures,
    660            };
    661            *counter += item.texture.size_in_bytes();
    662        }
    663 
    664        report
    665    }
    666 
    667    fn update_profile(&self, profile: &mut TransactionProfile) {
    668        let mut external_image_bytes = 0;
    669        for img in self.external_images.values() {
    670            let uv_rect = img.get_uv_rect();
    671            // If UV coords are normalized then this value will be incorrect. This is unfortunate
    672            // but doesn't impact end users at all.
    673            let size = (uv_rect.uv1 - uv_rect.uv0).abs().to_size().to_i32();
    674 
    675            // Assume 4 bytes per pixels which is true most of the time but
    676            // not always.
    677            let bpp = 4;
    678            external_image_bytes += size.area() as usize * bpp;
    679        }
    680 
    681        profile.set(profiler::EXTERNAL_IMAGE_BYTES, profiler::bytes_to_mb(external_image_bytes));
    682    }
    683 
    684    fn get_cache_texture_mut(&mut self, id: &CacheTextureId) -> &mut Texture {
    685        &mut self.texture_cache_map
    686            .get_mut(id)
    687            .expect("bug: texture not allocated")
    688            .texture
    689    }
    690 }
    691 
    692 #[derive(Debug, Copy, Clone, PartialEq)]
    693 #[cfg_attr(feature = "capture", derive(Serialize))]
    694 #[cfg_attr(feature = "replay", derive(Deserialize))]
    695 pub enum BlendMode {
    696    None,
    697    Alpha,
    698    PremultipliedAlpha,
    699    PremultipliedDestOut,
    700    SubpixelDualSource,
    701    Advanced(MixBlendMode),
    702    MultiplyDualSource,
    703    Screen,
    704    Exclusion,
    705    PlusLighter,
    706 }
    707 
    708 impl BlendMode {
    709    /// Decides when a given mix-blend-mode can be implemented in terms of
    710    /// simple blending, dual-source blending, advanced blending, or not at
    711    /// all based on available capabilities.
    712    pub fn from_mix_blend_mode(
    713        mode: MixBlendMode,
    714        advanced_blend: bool,
    715        coherent: bool,
    716        dual_source: bool,
    717    ) -> Option<BlendMode> {
    718        // If we emulate a mix-blend-mode via simple or dual-source blending,
    719        // care must be taken to output alpha As + Ad*(1-As) regardless of what
    720        // the RGB output is to comply with the mix-blend-mode spec.
    721        Some(match mode {
    722            // If we have coherent advanced blend, just use that.
    723            _ if advanced_blend && coherent => BlendMode::Advanced(mode),
    724            // Screen can be implemented as Cs + Cd - Cs*Cd => Cs + Cd*(1-Cs)
    725            MixBlendMode::Screen => BlendMode::Screen,
    726            // Exclusion can be implemented as Cs + Cd - 2*Cs*Cd => Cs*(1-Cd) + Cd*(1-Cs)
    727            MixBlendMode::Exclusion => BlendMode::Exclusion,
    728            // PlusLighter is basically a clamped add.
    729            MixBlendMode::PlusLighter => BlendMode::PlusLighter,
    730            // Multiply can be implemented as Cs*Cd + Cs*(1-Ad) + Cd*(1-As) => Cs*(1-Ad) + Cd*(1 - SRC1=(As-Cs))
    731            MixBlendMode::Multiply if dual_source => BlendMode::MultiplyDualSource,
    732            // Otherwise, use advanced blend without coherency if available.
    733            _ if advanced_blend => BlendMode::Advanced(mode),
    734            // If advanced blend is not available, then we have to use brush_mix_blend.
    735            _ => return None,
    736        })
    737    }
    738 }
    739 
    740 /// Information about the state of the debugging / profiler overlay in native compositing mode.
    741 struct DebugOverlayState {
    742    /// True if any of the current debug flags will result in drawing a debug overlay.
    743    is_enabled: bool,
    744 
    745    /// The current size of the debug overlay surface. None implies that the
    746    /// debug surface isn't currently allocated.
    747    current_size: Option<DeviceIntSize>,
    748 
    749    layer_index: usize,
    750 }
    751 
    752 impl DebugOverlayState {
    753    fn new() -> Self {
    754        DebugOverlayState {
    755            is_enabled: false,
    756            current_size: None,
    757            layer_index: 0,
    758        }
    759    }
    760 }
    761 
    762 /// Tracks buffer damage rects over a series of frames.
    763 #[derive(Debug, Default)]
    764 pub(crate) struct BufferDamageTracker {
    765    damage_rects: [DeviceRect; 4],
    766    current_offset: usize,
    767 }
    768 
    769 impl BufferDamageTracker {
    770    /// Sets the damage rect for the current frame. Should only be called *after*
    771    /// get_damage_rect() has been called to get the current backbuffer's damage rect.
    772    fn push_dirty_rect(&mut self, rect: &DeviceRect) {
    773        self.damage_rects[self.current_offset] = rect.clone();
    774        self.current_offset = match self.current_offset {
    775            0 => self.damage_rects.len() - 1,
    776            n => n - 1,
    777        }
    778    }
    779 
    780    /// Gets the damage rect for the current backbuffer, given the backbuffer's age.
    781    /// (The number of frames since it was previously the backbuffer.)
    782    /// Returns an empty rect if the buffer is valid, and None if the entire buffer is invalid.
    783    fn get_damage_rect(&self, buffer_age: usize) -> Option<DeviceRect> {
    784        match buffer_age {
    785            // 0 means this is a new buffer, so is completely invalid.
    786            0 => None,
    787            // 1 means this backbuffer was also the previous frame's backbuffer
    788            // (so must have been copied to the frontbuffer). It is therefore entirely valid.
    789            1 => Some(DeviceRect::zero()),
    790            // We must calculate the union of the damage rects since this buffer was previously
    791            // the backbuffer.
    792            n if n <= self.damage_rects.len() + 1 => {
    793                Some(
    794                    self.damage_rects.iter()
    795                        .cycle()
    796                        .skip(self.current_offset + 1)
    797                        .take(n - 1)
    798                        .fold(DeviceRect::zero(), |acc, r| acc.union(r))
    799                )
    800            }
    801            // The backbuffer is older than the number of frames for which we track,
    802            // so we treat it as entirely invalid.
    803            _ => None,
    804        }
    805    }
    806 }
    807 
    808 /// The renderer is responsible for submitting to the GPU the work prepared by the
    809 /// RenderBackend.
    810 ///
    811 /// We have a separate `Renderer` instance for each instance of WebRender (generally
    812 /// one per OS window), and all instances share the same thread.
    813 pub struct Renderer {
    814    result_rx: Receiver<ResultMsg>,
    815    api_tx: Sender<ApiMsg>,
    816    pub device: Device,
    817    pending_texture_updates: Vec<TextureUpdateList>,
    818    /// True if there are any TextureCacheUpdate pending.
    819    pending_texture_cache_updates: bool,
    820    pending_native_surface_updates: Vec<NativeSurfaceOperation>,
    821    pending_shader_updates: Vec<PathBuf>,
    822    active_documents: FastHashMap<DocumentId, RenderedDocument>,
    823 
    824    shaders: Rc<RefCell<Shaders>>,
    825 
    826    max_recorded_profiles: usize,
    827 
    828    clear_color: ColorF,
    829    enable_clear_scissor: bool,
    830    enable_advanced_blend_barriers: bool,
    831    clear_caches_with_quads: bool,
    832    clear_alpha_targets_with_quads: bool,
    833 
    834    debug: debug::LazyInitializedDebugRenderer,
    835    debug_flags: DebugFlags,
    836    profile: TransactionProfile,
    837    frame_counter: u64,
    838    resource_upload_time: f64,
    839    profiler: Profiler,
    840    command_log: Option<RenderCommandLog>,
    841    #[cfg(feature = "debugger")]
    842    debugger: Debugger,
    843 
    844    last_time: u64,
    845 
    846    pub gpu_profiler: GpuProfiler,
    847    vaos: vertex::RendererVAOs,
    848 
    849    gpu_buffer_texture_f: Option<Texture>,
    850    gpu_buffer_texture_f_too_large: i32,
    851    gpu_buffer_texture_i: Option<Texture>,
    852    gpu_buffer_texture_i_too_large: i32,
    853    vertex_data_textures: Vec<vertex::VertexDataTextures>,
    854    current_vertex_data_textures: usize,
    855 
    856    pipeline_info: PipelineInfo,
    857 
    858    // Manages and resolves source textures IDs to real texture IDs.
    859    texture_resolver: TextureResolver,
    860 
    861    texture_upload_pbo_pool: UploadPBOPool,
    862    staging_texture_pool: UploadTexturePool,
    863 
    864    dither_matrix_texture: Option<Texture>,
    865 
    866    /// Optional trait object that allows the client
    867    /// application to provide external buffers for image data.
    868    external_image_handler: Option<Box<dyn ExternalImageHandler>>,
    869 
    870    /// Optional function pointers for measuring memory used by a given
    871    /// heap-allocated pointer.
    872    size_of_ops: Option<MallocSizeOfOps>,
    873 
    874    pub renderer_errors: Vec<RendererError>,
    875 
    876    pub(in crate) async_frame_recorder: Option<AsyncScreenshotGrabber>,
    877    pub(in crate) async_screenshots: Option<AsyncScreenshotGrabber>,
    878 
    879    /// List of profile results from previous frames. Can be retrieved
    880    /// via get_frame_profiles().
    881    cpu_profiles: VecDeque<CpuProfile>,
    882    gpu_profiles: VecDeque<GpuProfile>,
    883 
    884    /// Notification requests to be fulfilled after rendering.
    885    notifications: Vec<NotificationRequest>,
    886 
    887    device_size: Option<DeviceIntSize>,
    888 
    889    /// A lazily created texture for the zoom debugging widget.
    890    zoom_debug_texture: Option<Texture>,
    891 
    892    /// The current mouse position. This is used for debugging
    893    /// functionality only, such as the debug zoom widget.
    894    cursor_position: DeviceIntPoint,
    895 
    896    /// Guards to check if we might be rendering a frame with expired texture
    897    /// cache entries.
    898    shared_texture_cache_cleared: bool,
    899 
    900    /// The set of documents which we've seen a publish for since last render.
    901    documents_seen: FastHashSet<DocumentId>,
    902 
    903    #[cfg(feature = "capture")]
    904    read_fbo: FBOId,
    905    #[cfg(feature = "replay")]
    906    owned_external_images: FastHashMap<(ExternalImageId, u8), ExternalTexture>,
    907 
    908    /// The compositing config, affecting how WR composites into the final scene.
    909    compositor_config: CompositorConfig,
    910    current_compositor_kind: CompositorKind,
    911 
    912    /// Maintains a set of allocated native composite surfaces. This allows any
    913    /// currently allocated surfaces to be cleaned up as soon as deinit() is
    914    /// called (the normal bookkeeping for native surfaces exists in the
    915    /// render backend thread).
    916    allocated_native_surfaces: FastHashSet<NativeSurfaceId>,
    917 
    918    /// If true, partial present state has been reset and everything needs to
    919    /// be drawn on the next render.
    920    force_redraw: bool,
    921 
    922    /// State related to the debug / profiling overlays
    923    debug_overlay_state: DebugOverlayState,
    924 
    925    /// Tracks the dirty rectangles from previous frames. Used on platforms
    926    /// that require keeping the front buffer fully correct when doing
    927    /// partial present (e.g. unix desktop with EGL_EXT_buffer_age).
    928    buffer_damage_tracker: BufferDamageTracker,
    929 
    930    max_primitive_instance_count: usize,
    931    enable_instancing: bool,
    932 
    933    /// Count consecutive oom frames to detectif we are stuck unable to render
    934    /// in a loop.
    935    consecutive_oom_frames: u32,
    936 
    937    /// update() defers processing of ResultMsg, if frame_publish_id of
    938    /// ResultMsg::PublishDocument exceeds target_frame_publish_id.
    939    target_frame_publish_id: Option<FramePublishId>,
    940 
    941    /// Hold a next ResultMsg that will be handled by update().
    942    pending_result_msg: Option<ResultMsg>,
    943 
    944    /// Hold previous frame compositing state with layer compositor.
    945    layer_compositor_frame_state_in_prev_frame: Option<LayerCompositorFrameState>,
    946 
    947    /// Hold DebugItems of DebugFlags::EXTERNAL_COMPOSITE_BORDERS for debug overlay
    948    external_composite_debug_items: Vec<DebugItem>,
    949 }
    950 
    951 #[derive(Debug)]
    952 pub enum RendererError {
    953    Shader(ShaderError),
    954    Thread(std::io::Error),
    955    MaxTextureSize,
    956    SoftwareRasterizer,
    957    OutOfMemory,
    958 }
    959 
    960 impl From<ShaderError> for RendererError {
    961    fn from(err: ShaderError) -> Self {
    962        RendererError::Shader(err)
    963    }
    964 }
    965 
    966 impl From<std::io::Error> for RendererError {
    967    fn from(err: std::io::Error) -> Self {
    968        RendererError::Thread(err)
    969    }
    970 }
    971 
    972 impl Renderer {
    973    pub fn device_size(&self) -> Option<DeviceIntSize> {
    974        self.device_size
    975    }
    976 
    977    /// Update the current position of the debug cursor.
    978    pub fn set_cursor_position(
    979        &mut self,
    980        position: DeviceIntPoint,
    981    ) {
    982        self.cursor_position = position;
    983    }
    984 
    985    pub fn get_max_texture_size(&self) -> i32 {
    986        self.device.max_texture_size()
    987    }
    988 
    989    pub fn get_graphics_api_info(&self) -> GraphicsApiInfo {
    990        GraphicsApiInfo {
    991            kind: GraphicsApi::OpenGL,
    992            version: self.device.gl().get_string(gl::VERSION),
    993            renderer: self.device.gl().get_string(gl::RENDERER),
    994        }
    995    }
    996 
    997    pub fn preferred_color_format(&self) -> ImageFormat {
    998        self.device.preferred_color_formats().external
    999    }
   1000 
   1001    pub fn required_texture_stride_alignment(&self, format: ImageFormat) -> usize {
   1002        self.device.required_pbo_stride().num_bytes(format).get()
   1003    }
   1004 
   1005    pub fn set_clear_color(&mut self, color: ColorF) {
   1006        self.clear_color = color;
   1007    }
   1008 
   1009    pub fn flush_pipeline_info(&mut self) -> PipelineInfo {
   1010        mem::replace(&mut self.pipeline_info, PipelineInfo::default())
   1011    }
   1012 
   1013    /// Returns the Epoch of the current frame in a pipeline.
   1014    pub fn current_epoch(&self, document_id: DocumentId, pipeline_id: PipelineId) -> Option<Epoch> {
   1015        self.pipeline_info.epochs.get(&(pipeline_id, document_id)).cloned()
   1016    }
   1017 
   1018    fn get_next_result_msg(&mut self) -> Option<ResultMsg> {
   1019        if self.pending_result_msg.is_none() {
   1020            if let Ok(msg) = self.result_rx.try_recv() {
   1021                self.pending_result_msg = Some(msg);
   1022            }
   1023        }
   1024 
   1025        match (&self.pending_result_msg, &self.target_frame_publish_id) {
   1026          (Some(ResultMsg::PublishDocument(frame_publish_id, _, _, _)), Some(target_id)) => {
   1027            if frame_publish_id > target_id {
   1028              return None;
   1029            }
   1030          }
   1031          _ => {}
   1032        }
   1033 
   1034        self.pending_result_msg.take()
   1035    }
   1036 
   1037    /// Processes the result queue.
   1038    ///
   1039    /// Should be called before `render()`, as texture cache updates are done here.
   1040    pub fn update(&mut self) {
   1041        profile_scope!("update");
   1042 
   1043        // Pull any pending results and return the most recent.
   1044        while let Some(msg) = self.get_next_result_msg() {
   1045            match msg {
   1046                ResultMsg::PublishPipelineInfo(mut pipeline_info) => {
   1047                    for ((pipeline_id, document_id), epoch) in pipeline_info.epochs {
   1048                        self.pipeline_info.epochs.insert((pipeline_id, document_id), epoch);
   1049                    }
   1050                    self.pipeline_info.removed_pipelines.extend(pipeline_info.removed_pipelines.drain(..));
   1051                }
   1052                ResultMsg::PublishDocument(
   1053                    _,
   1054                    document_id,
   1055                    mut doc,
   1056                    resource_update_list,
   1057                ) => {
   1058                    // Add a new document to the active set
   1059 
   1060                    // If the document we are replacing must be drawn (in order to
   1061                    // update the texture cache), issue a render just to
   1062                    // off-screen targets, ie pass None to render_impl. We do this
   1063                    // because a) we don't need to render to the main framebuffer
   1064                    // so it is cheaper not to, and b) doing so without a
   1065                    // subsequent present would break partial present.
   1066                    let prev_frame_memory = if let Some(mut prev_doc) = self.active_documents.remove(&document_id) {
   1067                        doc.profile.merge(&mut prev_doc.profile);
   1068 
   1069                        if prev_doc.frame.must_be_drawn() {
   1070                            prev_doc.render_reasons |= RenderReasons::TEXTURE_CACHE_FLUSH;
   1071                            self.render_impl(
   1072                                document_id,
   1073                                &mut prev_doc,
   1074                                None,
   1075                                0,
   1076                            ).ok();
   1077                        }
   1078 
   1079                        Some(prev_doc.frame.allocator_memory)
   1080                    } else {
   1081                        None
   1082                    };
   1083 
   1084                    if let Some(memory) = prev_frame_memory {
   1085                        // We just dropped the frame a few lives above. There should be no
   1086                        // live allocations left in the frame's memory.
   1087                        memory.assert_memory_reusable();
   1088                    }
   1089 
   1090                    self.active_documents.insert(document_id, doc);
   1091 
   1092                    // IMPORTANT: The pending texture cache updates must be applied
   1093                    //            *after* the previous frame has been rendered above
   1094                    //            (if neceessary for a texture cache update). For
   1095                    //            an example of why this is required:
   1096                    //            1) Previous frame contains a render task that
   1097                    //               targets Texture X.
   1098                    //            2) New frame contains a texture cache update which
   1099                    //               frees Texture X.
   1100                    //            3) bad stuff happens.
   1101 
   1102                    //TODO: associate `document_id` with target window
   1103                    self.pending_texture_cache_updates |= !resource_update_list.texture_updates.updates.is_empty();
   1104                    self.pending_texture_updates.push(resource_update_list.texture_updates);
   1105                    self.pending_native_surface_updates.extend(resource_update_list.native_surface_updates);
   1106                    self.documents_seen.insert(document_id);
   1107                }
   1108                ResultMsg::UpdateResources {
   1109                    resource_updates,
   1110                    memory_pressure,
   1111                } => {
   1112                    if memory_pressure {
   1113                        // If a memory pressure event arrives _after_ a new scene has
   1114                        // been published that writes persistent targets (i.e. cached
   1115                        // render tasks to the texture cache, or picture cache tiles)
   1116                        // but _before_ the next update/render loop, those targets
   1117                        // will not be updated due to the active_documents list being
   1118                        // cleared at the end of this message. To work around that,
   1119                        // if any of the existing documents have not rendered yet, and
   1120                        // have picture/texture cache targets, force a render so that
   1121                        // those targets are updated.
   1122                        let active_documents = mem::replace(
   1123                            &mut self.active_documents,
   1124                            FastHashMap::default(),
   1125                        );
   1126                        for (doc_id, mut doc) in active_documents {
   1127                            if doc.frame.must_be_drawn() {
   1128                                // As this render will not be presented, we must pass None to
   1129                                // render_impl. This avoids interfering with partial present
   1130                                // logic, as well as being more efficient.
   1131                                self.render_impl(
   1132                                    doc_id,
   1133                                    &mut doc,
   1134                                    None,
   1135                                    0,
   1136                                ).ok();
   1137                            }
   1138                        }
   1139                    }
   1140 
   1141                    self.pending_texture_cache_updates |= !resource_updates.texture_updates.updates.is_empty();
   1142                    self.pending_texture_updates.push(resource_updates.texture_updates);
   1143                    self.pending_native_surface_updates.extend(resource_updates.native_surface_updates);
   1144                    self.device.begin_frame();
   1145 
   1146                    self.update_texture_cache();
   1147                    self.update_native_surfaces();
   1148 
   1149                    // Flush the render target pool on memory pressure.
   1150                    //
   1151                    // This needs to be separate from the block below because
   1152                    // the device module asserts if we delete textures while
   1153                    // not in a frame.
   1154                    if memory_pressure {
   1155                        self.texture_upload_pbo_pool.on_memory_pressure(&mut self.device);
   1156                        self.staging_texture_pool.delete_textures(&mut self.device);
   1157                        if let Some(texture) = self.gpu_buffer_texture_f.take() {
   1158                            self.device.delete_texture(texture);
   1159                        }
   1160                        if let Some(texture) = self.gpu_buffer_texture_i.take() {
   1161                            self.device.delete_texture(texture);
   1162                        }
   1163                    }
   1164 
   1165                    self.device.end_frame();
   1166                }
   1167                ResultMsg::RenderDocumentOffscreen(document_id, mut offscreen_doc, resources) => {
   1168                    // Flush pending operations if needed (See comment in the match arm for
   1169                    // PublishPipelineInfo).
   1170 
   1171                    // Borrow-ck dance.
   1172                    let prev_doc = self.active_documents.remove(&document_id);
   1173                    if let Some(mut prev_doc) = prev_doc {
   1174                        if prev_doc.frame.must_be_drawn() {
   1175                            prev_doc.render_reasons |= RenderReasons::TEXTURE_CACHE_FLUSH;
   1176                            self.render_impl(
   1177                                document_id,
   1178                                &mut prev_doc,
   1179                                None,
   1180                                0,
   1181                            ).ok();
   1182                        }
   1183 
   1184                        self.active_documents.insert(document_id, prev_doc);
   1185                    }
   1186 
   1187                    // Now update resources and render the offscreen frame.
   1188 
   1189                    self.pending_texture_cache_updates |= !resources.texture_updates.updates.is_empty();
   1190                    self.pending_texture_updates.push(resources.texture_updates);
   1191                    self.pending_native_surface_updates.extend(resources.native_surface_updates);
   1192 
   1193                    self.render_impl(
   1194                        document_id,
   1195                        &mut offscreen_doc,
   1196                        None,
   1197                        0,
   1198                    ).unwrap();
   1199                }
   1200                ResultMsg::AppendNotificationRequests(mut notifications) => {
   1201                    // We need to know specifically if there are any pending
   1202                    // TextureCacheUpdate updates in any of the entries in
   1203                    // pending_texture_updates. They may simply be nops, which do not
   1204                    // need to prevent issuing the notification, and if so, may not
   1205                    // cause a timely frame render to occur to wake up any listeners.
   1206                    if !self.pending_texture_cache_updates {
   1207                        drain_filter(
   1208                            &mut notifications,
   1209                            |n| { n.when() == Checkpoint::FrameTexturesUpdated },
   1210                            |n| { n.notify(); },
   1211                        );
   1212                    }
   1213                    self.notifications.append(&mut notifications);
   1214                }
   1215                ResultMsg::ForceRedraw => {
   1216                    self.force_redraw = true;
   1217                }
   1218                ResultMsg::RefreshShader(path) => {
   1219                    self.pending_shader_updates.push(path);
   1220                }
   1221                ResultMsg::SetParameter(ref param) => {
   1222                    self.device.set_parameter(param);
   1223                    self.profiler.set_parameter(param);
   1224                }
   1225                ResultMsg::DebugOutput(output) => match output {
   1226                    #[cfg(feature = "capture")]
   1227                    DebugOutput::SaveCapture(config, deferred) => {
   1228                        self.save_capture(config, deferred);
   1229                    }
   1230                    #[cfg(feature = "replay")]
   1231                    DebugOutput::LoadCapture(config, plain_externals) => {
   1232                        self.active_documents.clear();
   1233                        self.load_capture(config, plain_externals);
   1234                    }
   1235                },
   1236                ResultMsg::DebugCommand(command) => {
   1237                    self.handle_debug_command(command);
   1238                }
   1239            }
   1240        }
   1241    }
   1242 
   1243    /// update() defers processing of ResultMsg, if frame_publish_id of
   1244    /// ResultMsg::PublishDocument exceeds target_frame_publish_id.
   1245    pub fn set_target_frame_publish_id(&mut self, publish_id: FramePublishId) {
   1246        self.target_frame_publish_id = Some(publish_id);
   1247    }
   1248 
   1249    fn handle_debug_command(&mut self, command: DebugCommand) {
   1250        match command {
   1251            DebugCommand::SetPictureTileSize(_) |
   1252            DebugCommand::SetMaximumSurfaceSize(_) |
   1253            DebugCommand::GenerateFrame => {
   1254                panic!("Should be handled by render backend");
   1255            }
   1256            #[cfg(feature = "debugger")]
   1257            DebugCommand::Query(ref query) => {
   1258                match query.kind {
   1259                    DebugQueryKind::SpatialTree { .. } => {
   1260                        panic!("Should be handled by render backend");
   1261                    }
   1262                    DebugQueryKind::CompositorConfig { .. } => {
   1263                        let result = match self.active_documents.iter().last() {
   1264                            Some((_, doc)) => {
   1265                                doc.frame.composite_state.print_to_string()
   1266                            }
   1267                            None => {
   1268                                "No active documents".into()
   1269                            }
   1270                        };
   1271                        query.result.send(result).ok();
   1272                    }
   1273                    DebugQueryKind::CompositorView { .. } => {
   1274                        let result = match self.active_documents.iter().last() {
   1275                            Some((_, doc)) => {
   1276                                let info = CompositorDebugInfo::from(&doc.frame.composite_state);
   1277                                serde_json::to_string(&info).unwrap()
   1278                            }
   1279                            None => {
   1280                                "No active documents".into()
   1281                            }
   1282                        };
   1283                        query.result.send(result).ok();
   1284                    }
   1285                    DebugQueryKind::Textures { category } => {
   1286                        let mut texture_list = Vec::new();
   1287 
   1288                        self.device.begin_frame();
   1289                        self.device.bind_read_target_impl(self.read_fbo, DeviceIntPoint::zero());
   1290 
   1291                        for (id, item) in &self.texture_resolver.texture_cache_map {
   1292                            if category.is_some() && category != Some(item.category) {
   1293                                continue;
   1294                            }
   1295 
   1296                            let size = item.texture.get_dimensions();
   1297                            let format = item.texture.get_format();
   1298                            let buffer_size = (size.area() * format.bytes_per_pixel()) as usize;
   1299                            let mut data = vec![0u8; buffer_size];
   1300                            let rect = size.cast_unit().into();
   1301                            self.device.attach_read_texture(&item.texture);
   1302                            self.device.read_pixels_into(rect, format, &mut data);
   1303 
   1304                            let category_str = match item.category {
   1305                                TextureCacheCategory::Atlas => "atlas",
   1306                                TextureCacheCategory::Standalone => "standalone",
   1307                                TextureCacheCategory::PictureTile => "tile",
   1308                                TextureCacheCategory::RenderTarget => "target",
   1309                            };
   1310 
   1311                            let texture_msg = DebuggerTextureContent {
   1312                                name: format!("{category_str}-{:02}", id.0),
   1313                                category: item.category,
   1314                                width: size.width as u32,
   1315                                height: size.height as u32,
   1316                                format,
   1317                                data,
   1318                            };
   1319                            texture_list.push(texture_msg);
   1320                        }
   1321                        self.device.reset_read_target();
   1322                        self.device.end_frame();
   1323 
   1324                        query.result.send(serde_json::to_string(&texture_list).unwrap()).ok();
   1325                    }
   1326                }
   1327            }
   1328            DebugCommand::SaveCapture(..) |
   1329            DebugCommand::LoadCapture(..) |
   1330            DebugCommand::StartCaptureSequence(..) |
   1331            DebugCommand::StopCaptureSequence => {
   1332                panic!("Capture commands are not welcome here! Did you build with 'capture' feature?")
   1333            }
   1334            DebugCommand::ClearCaches(_)
   1335            | DebugCommand::SimulateLongSceneBuild(_)
   1336            | DebugCommand::EnableNativeCompositor(_)
   1337            | DebugCommand::SetBatchingLookback(_) => {}
   1338            DebugCommand::SetFlags(flags) => {
   1339                self.set_debug_flags(flags);
   1340            }
   1341            DebugCommand::GetDebugFlags(tx) => {
   1342                tx.send(self.debug_flags).unwrap();
   1343            }
   1344            DebugCommand::SetRenderCommandLog(enabled) => {
   1345                if enabled && self.command_log.is_none() {
   1346                    self.command_log = Some(RenderCommandLog::new());
   1347                } else if !enabled {
   1348                    self.command_log = None;
   1349                }
   1350            }
   1351            #[cfg(feature = "debugger")]
   1352            DebugCommand::AddDebugClient(client) => {
   1353                self.debugger.add_client(
   1354                    client,
   1355                    self.debug_flags,
   1356                    &self.profiler,
   1357                );
   1358            }
   1359        }
   1360    }
   1361 
   1362    /// Set a callback for handling external images.
   1363    pub fn set_external_image_handler(&mut self, handler: Box<dyn ExternalImageHandler>) {
   1364        self.external_image_handler = Some(handler);
   1365    }
   1366 
   1367    /// Retrieve (and clear) the current list of recorded frame profiles.
   1368    pub fn get_frame_profiles(&mut self) -> (Vec<CpuProfile>, Vec<GpuProfile>) {
   1369        let cpu_profiles = self.cpu_profiles.drain(..).collect();
   1370        let gpu_profiles = self.gpu_profiles.drain(..).collect();
   1371        (cpu_profiles, gpu_profiles)
   1372    }
   1373 
   1374    /// Reset the current partial present state. This forces the entire framebuffer
   1375    /// to be refreshed next time `render` is called.
   1376    pub fn force_redraw(&mut self) {
   1377        self.force_redraw = true;
   1378    }
   1379 
   1380    /// Renders the current frame.
   1381    ///
   1382    /// A Frame is supplied by calling [`generate_frame()`][webrender_api::Transaction::generate_frame].
   1383    /// buffer_age is the age of the current backbuffer. It is only relevant if partial present
   1384    /// is active, otherwise 0 should be passed here.
   1385    pub fn render(
   1386        &mut self,
   1387        device_size: DeviceIntSize,
   1388        buffer_age: usize,
   1389    ) -> Result<RenderResults, Vec<RendererError>> {
   1390        self.device_size = Some(device_size);
   1391 
   1392        // TODO(gw): We want to make the active document that is
   1393        //           being rendered configurable via the public
   1394        //           API in future. For now, just select the last
   1395        //           added document as the active one to render
   1396        //           (Gecko only ever creates a single document
   1397        //           per renderer right now).
   1398        let doc_id = self.active_documents.keys().last().cloned();
   1399 
   1400        let result = match doc_id {
   1401            Some(doc_id) => {
   1402                // Remove the doc from the map to appease the borrow checker
   1403                let mut doc = self.active_documents
   1404                    .remove(&doc_id)
   1405                    .unwrap();
   1406 
   1407                let size = if !device_size.is_empty() {
   1408                    Some(device_size)
   1409                } else {
   1410                    None
   1411                };
   1412 
   1413                let result = self.render_impl(
   1414                    doc_id,
   1415                    &mut doc,
   1416                    size,
   1417                    buffer_age,
   1418                );
   1419 
   1420                self.active_documents.insert(doc_id, doc);
   1421 
   1422                result
   1423            }
   1424            None => {
   1425                self.last_time = zeitstempel::now();
   1426                Ok(RenderResults::default())
   1427            }
   1428        };
   1429 
   1430        drain_filter(
   1431            &mut self.notifications,
   1432            |n| { n.when() == Checkpoint::FrameRendered },
   1433            |n| { n.notify(); },
   1434        );
   1435 
   1436        let mut oom = false;
   1437        if let Err(ref errors) = result {
   1438            for error in errors {
   1439                if matches!(error, &RendererError::OutOfMemory) {
   1440                    oom = true;
   1441                    break;
   1442                }
   1443            }
   1444        }
   1445 
   1446        if oom {
   1447            let _ = self.api_tx.send(ApiMsg::MemoryPressure);
   1448            // Ensure we don't get stuck in a loop.
   1449            self.consecutive_oom_frames += 1;
   1450            assert!(self.consecutive_oom_frames < 5, "Renderer out of memory");
   1451        } else {
   1452            self.consecutive_oom_frames = 0;
   1453        }
   1454 
   1455        // This is the end of the rendering pipeline. If some notifications are is still there,
   1456        // just clear them and they will autimatically fire the Checkpoint::TransactionDropped
   1457        // event. Otherwise they would just pile up in this vector forever.
   1458        self.notifications.clear();
   1459 
   1460        self.external_composite_debug_items = Vec::new();
   1461 
   1462        tracy_frame_marker!();
   1463 
   1464        result
   1465    }
   1466 
   1467    /// Update the state of any debug / profiler overlays. This is currently only needed
   1468    /// when running with the native compositor enabled.
   1469    fn update_debug_overlay(
   1470        &mut self,
   1471        framebuffer_size: DeviceIntSize,
   1472        has_debug_items: bool,
   1473    ) {
   1474        // If any of the following debug flags are set, something will be drawn on the debug overlay.
   1475        self.debug_overlay_state.is_enabled = has_debug_items || self.debug_flags.intersects(
   1476            DebugFlags::PROFILER_DBG |
   1477            DebugFlags::RENDER_TARGET_DBG |
   1478            DebugFlags::TEXTURE_CACHE_DBG |
   1479            DebugFlags::EPOCHS |
   1480            DebugFlags::PICTURE_CACHING_DBG |
   1481            DebugFlags::PICTURE_BORDERS |
   1482            DebugFlags::ZOOM_DBG |
   1483            DebugFlags::WINDOW_VISIBILITY_DBG |
   1484            DebugFlags::EXTERNAL_COMPOSITE_BORDERS
   1485        );
   1486 
   1487        // Update the debug overlay surface, if we are running in native compositor mode.
   1488        if let CompositorKind::Native { .. } = self.current_compositor_kind {
   1489            let compositor = self.compositor_config.compositor().unwrap();
   1490 
   1491            // If there is a current surface, destroy it if we don't need it for this frame, or if
   1492            // the size has changed.
   1493            if let Some(current_size) = self.debug_overlay_state.current_size {
   1494                if !self.debug_overlay_state.is_enabled || current_size != framebuffer_size {
   1495                    compositor.destroy_surface(&mut self.device, NativeSurfaceId::DEBUG_OVERLAY);
   1496                    self.debug_overlay_state.current_size = None;
   1497                }
   1498            }
   1499 
   1500            // Allocate a new surface, if we need it and there isn't one.
   1501            if self.debug_overlay_state.is_enabled && self.debug_overlay_state.current_size.is_none() {
   1502                compositor.create_surface(
   1503                    &mut self.device,
   1504                    NativeSurfaceId::DEBUG_OVERLAY,
   1505                    DeviceIntPoint::zero(),
   1506                    framebuffer_size,
   1507                    false,
   1508                );
   1509                compositor.create_tile(
   1510                    &mut self.device,
   1511                    NativeTileId::DEBUG_OVERLAY,
   1512                );
   1513                self.debug_overlay_state.current_size = Some(framebuffer_size);
   1514            }
   1515        }
   1516    }
   1517 
   1518    /// Bind a draw target for the debug / profiler overlays, if required.
   1519    fn bind_debug_overlay(&mut self, device_size: DeviceIntSize) -> Option<DrawTarget> {
   1520        // Debug overlay setup are only required in native compositing mode
   1521        if self.debug_overlay_state.is_enabled {
   1522            match self.current_compositor_kind {
   1523                CompositorKind::Native { .. } => {
   1524                    let compositor = self.compositor_config.compositor().unwrap();
   1525                    let surface_size = self.debug_overlay_state.current_size.unwrap();
   1526 
   1527                    // Ensure old surface is invalidated before binding
   1528                    compositor.invalidate_tile(
   1529                        &mut self.device,
   1530                        NativeTileId::DEBUG_OVERLAY,
   1531                        DeviceIntRect::from_size(surface_size),
   1532                    );
   1533                    // Bind the native surface
   1534                    let surface_info = compositor.bind(
   1535                        &mut self.device,
   1536                        NativeTileId::DEBUG_OVERLAY,
   1537                        DeviceIntRect::from_size(surface_size),
   1538                        DeviceIntRect::from_size(surface_size),
   1539                    );
   1540 
   1541                    // Bind the native surface to current FBO target
   1542                    let draw_target = DrawTarget::NativeSurface {
   1543                        offset: surface_info.origin,
   1544                        external_fbo_id: surface_info.fbo_id,
   1545                        dimensions: surface_size,
   1546                    };
   1547                    self.device.bind_draw_target(draw_target);
   1548 
   1549                    // When native compositing, clear the debug overlay each frame.
   1550                    self.device.clear_target(
   1551                        Some([0.0, 0.0, 0.0, 0.0]),
   1552                        None, // debug renderer does not use depth
   1553                        None,
   1554                    );
   1555 
   1556                    Some(draw_target)
   1557                }
   1558                CompositorKind::Layer { .. } => {
   1559                    let compositor = self.compositor_config.layer_compositor().unwrap();
   1560                    compositor.bind_layer(self.debug_overlay_state.layer_index, &[]);
   1561 
   1562                    self.device.clear_target(
   1563                        Some([0.0, 0.0, 0.0, 0.0]),
   1564                        None, // debug renderer does not use depth
   1565                        None,
   1566                    );
   1567 
   1568                    Some(DrawTarget::new_default(device_size, self.device.surface_origin_is_top_left()))
   1569                }
   1570                CompositorKind::Draw { .. } => {
   1571                    // If we're not using the native compositor, then the default
   1572                    // frame buffer is already bound. Create a DrawTarget for it and
   1573                    // return it.
   1574                    Some(DrawTarget::new_default(device_size, self.device.surface_origin_is_top_left()))
   1575                }
   1576            }
   1577        } else {
   1578            None
   1579        }
   1580    }
   1581 
   1582    /// Unbind the draw target for debug / profiler overlays, if required.
   1583    fn unbind_debug_overlay(&mut self) {
   1584        // Debug overlay setup are only required in native compositing mode
   1585        if self.debug_overlay_state.is_enabled {
   1586            match self.current_compositor_kind {
   1587                CompositorKind::Native { .. } => {
   1588                    let compositor = self.compositor_config.compositor().unwrap();
   1589                    // Unbind the draw target and add it to the visual tree to be composited
   1590                    compositor.unbind(&mut self.device);
   1591 
   1592                    let clip_rect = DeviceIntRect::from_size(
   1593                        self.debug_overlay_state.current_size.unwrap(),
   1594                    );
   1595 
   1596                    compositor.add_surface(
   1597                        &mut self.device,
   1598                        NativeSurfaceId::DEBUG_OVERLAY,
   1599                        CompositorSurfaceTransform::identity(),
   1600                        clip_rect,
   1601                        ImageRendering::Auto,
   1602                        clip_rect,
   1603                        ClipRadius::EMPTY,
   1604                    );
   1605                }
   1606                CompositorKind::Draw { .. } => {}
   1607                CompositorKind::Layer { .. } => {
   1608                    let compositor = self.compositor_config.layer_compositor().unwrap();
   1609                    compositor.present_layer(self.debug_overlay_state.layer_index, &[]);
   1610                }
   1611            }
   1612        }
   1613    }
   1614 
   1615    // If device_size is None, don't render to the main frame buffer. This is useful to
   1616    // update texture cache render tasks but avoid doing a full frame render. If the
   1617    // render is not going to be presented, then this must be set to None, as performing a
   1618    // composite without a present will confuse partial present.
   1619    fn render_impl(
   1620        &mut self,
   1621        doc_id: DocumentId,
   1622        active_doc: &mut RenderedDocument,
   1623        mut device_size: Option<DeviceIntSize>,
   1624        buffer_age: usize,
   1625    ) -> Result<RenderResults, Vec<RendererError>> {
   1626        profile_scope!("render");
   1627        let mut results = RenderResults::default();
   1628        self.profile.end_time_if_started(profiler::FRAME_SEND_TIME);
   1629        self.profile.start_time(profiler::RENDERER_TIME);
   1630 
   1631        if let Some(log) = &mut self.command_log {
   1632            log.clear();
   1633        }
   1634 
   1635        self.staging_texture_pool.begin_frame();
   1636 
   1637        let compositor_kind = active_doc.frame.composite_state.compositor_kind;
   1638        // CompositorKind is updated
   1639        if self.current_compositor_kind != compositor_kind {
   1640            let enable = match (self.current_compositor_kind, compositor_kind) {
   1641                (CompositorKind::Native { .. }, CompositorKind::Draw { .. }) => {
   1642                    if self.debug_overlay_state.current_size.is_some() {
   1643                        self.compositor_config
   1644                            .compositor()
   1645                            .unwrap()
   1646                            .destroy_surface(&mut self.device, NativeSurfaceId::DEBUG_OVERLAY);
   1647                        self.debug_overlay_state.current_size = None;
   1648                    }
   1649                    false
   1650                }
   1651                (CompositorKind::Draw { .. }, CompositorKind::Native { .. }) => {
   1652                    true
   1653                }
   1654                (current_compositor_kind, active_doc_compositor_kind) => {
   1655                    warn!("Compositor mismatch, assuming this is Wrench running. Current {:?}, active {:?}",
   1656                        current_compositor_kind, active_doc_compositor_kind);
   1657                    false
   1658                }
   1659            };
   1660 
   1661            if let Some(config) = self.compositor_config.compositor() {
   1662                config.enable_native_compositor(&mut self.device, enable);
   1663            }
   1664            self.current_compositor_kind = compositor_kind;
   1665        }
   1666 
   1667        // The texture resolver scope should be outside of any rendering, including
   1668        // debug rendering. This ensures that when we return render targets to the
   1669        // pool via glInvalidateFramebuffer, we don't do any debug rendering after
   1670        // that point. Otherwise, the bind / invalidate / bind logic trips up the
   1671        // render pass logic in tiled / mobile GPUs, resulting in an extra copy /
   1672        // resolve step when the debug overlay is enabled.
   1673        self.texture_resolver.begin_frame();
   1674 
   1675        if let Some(device_size) = device_size {
   1676            self.update_gpu_profile(device_size);
   1677        }
   1678 
   1679        let cpu_frame_id = {
   1680            let _gm = self.gpu_profiler.start_marker("begin frame");
   1681            let frame_id = self.device.begin_frame();
   1682            self.gpu_profiler.begin_frame(frame_id);
   1683 
   1684            self.device.disable_scissor();
   1685            self.device.disable_depth();
   1686            self.set_blend(false, FramebufferKind::Main);
   1687            //self.update_shaders();
   1688 
   1689            self.update_texture_cache();
   1690            self.update_native_surfaces();
   1691 
   1692            frame_id
   1693        };
   1694 
   1695        if !active_doc.frame.present {
   1696            // Setting device_size to None is what ensures compositing/presenting
   1697            // the frame is skipped in the rest of this module.
   1698            device_size = None;
   1699        }
   1700 
   1701        if let Some(device_size) = device_size {
   1702            // Inform the client that we are starting a composition transaction if native
   1703            // compositing is enabled. This needs to be done early in the frame, so that
   1704            // we can create debug overlays after drawing the main surfaces.
   1705            if let CompositorKind::Native { .. } = self.current_compositor_kind {
   1706                let compositor = self.compositor_config.compositor().unwrap();
   1707                compositor.begin_frame(&mut self.device);
   1708            }
   1709 
   1710            // Update the state of the debug overlay surface, ensuring that
   1711            // the compositor mode has a suitable surface to draw to, if required.
   1712            self.update_debug_overlay(device_size, !active_doc.frame.debug_items.is_empty());
   1713        }
   1714 
   1715        let frame = &mut active_doc.frame;
   1716        let profile = &mut active_doc.profile;
   1717        assert!(self.current_compositor_kind == frame.composite_state.compositor_kind);
   1718 
   1719        if self.shared_texture_cache_cleared {
   1720            assert!(self.documents_seen.contains(&doc_id),
   1721                    "Cleared texture cache without sending new document frame.");
   1722        }
   1723 
   1724        self.update_deferred_resolves(&frame.deferred_resolves, &mut frame.gpu_buffer_f);
   1725 
   1726        self.draw_frame(
   1727            frame,
   1728            device_size,
   1729            buffer_age,
   1730            &mut results,
   1731        );
   1732 
   1733        // TODO(nical): do this automatically by selecting counters in the wr profiler
   1734        // Profile marker for the number of invalidated picture cache
   1735        if thread_is_being_profiled() {
   1736            let duration = Duration::new(0,0);
   1737            if let Some(n) = self.profile.get(profiler::RENDERED_PICTURE_TILES) {
   1738                let message = (n as usize).to_string();
   1739                add_text_marker("NumPictureCacheInvalidated", &message, duration);
   1740            }
   1741        }
   1742 
   1743        if device_size.is_some() {
   1744            self.draw_frame_debug_items(&frame.debug_items);
   1745        }
   1746 
   1747        self.profile.merge(profile);
   1748 
   1749        self.unlock_external_images(&frame.deferred_resolves);
   1750 
   1751        let _gm = self.gpu_profiler.start_marker("end frame");
   1752        self.gpu_profiler.end_frame();
   1753 
   1754        let t = self.profile.end_time(profiler::RENDERER_TIME);
   1755        self.profile.end_time_if_started(profiler::TOTAL_FRAME_CPU_TIME);
   1756 
   1757        let current_time = zeitstempel::now();
   1758        if device_size.is_some() {
   1759            let time = profiler::ns_to_ms(current_time - self.last_time);
   1760            self.profile.set(profiler::FRAME_TIME, time);
   1761        }
   1762 
   1763        let debug_overlay = device_size.and_then(|device_size| {
   1764            // Bind a surface to draw the debug / profiler information to.
   1765            self.bind_debug_overlay(device_size).map(|draw_target| {
   1766                self.draw_render_target_debug(&draw_target);
   1767                self.draw_texture_cache_debug(&draw_target);
   1768                self.draw_zoom_debug(device_size);
   1769                self.draw_epoch_debug();
   1770                self.draw_window_visibility_debug();
   1771                self.draw_external_composite_borders_debug();
   1772                draw_target
   1773            })
   1774        });
   1775 
   1776        Telemetry::record_renderer_time(Duration::from_micros((t * 1000.00) as u64));
   1777        if self.profile.get(profiler::SHADER_BUILD_TIME).is_none() {
   1778          Telemetry::record_renderer_time_no_sc(Duration::from_micros((t * 1000.00) as u64));
   1779        }
   1780 
   1781        if self.max_recorded_profiles > 0 {
   1782            while self.cpu_profiles.len() >= self.max_recorded_profiles {
   1783                self.cpu_profiles.pop_front();
   1784            }
   1785            let cpu_profile = CpuProfile::new(
   1786                cpu_frame_id,
   1787                (self.profile.get_or(profiler::FRAME_BUILDING_TIME, 0.0) * 1000000.0) as u64,
   1788                (self.profile.get_or(profiler::RENDERER_TIME, 0.0) * 1000000.0) as u64,
   1789                self.profile.get_or(profiler::DRAW_CALLS, 0.0) as usize,
   1790            );
   1791            self.cpu_profiles.push_back(cpu_profile);
   1792        }
   1793 
   1794        if thread_is_being_profiled() {
   1795            let duration = Duration::new(0,0);
   1796            let message = (self.profile.get_or(profiler::DRAW_CALLS, 0.0) as usize).to_string();
   1797            add_text_marker("NumDrawCalls", &message, duration);
   1798        }
   1799 
   1800        let report = self.texture_resolver.report_memory();
   1801        self.profile.set(profiler::RENDER_TARGET_MEM, profiler::bytes_to_mb(report.render_target_textures));
   1802        self.profile.set(profiler::PICTURE_TILES_MEM, profiler::bytes_to_mb(report.picture_tile_textures));
   1803        self.profile.set(profiler::ATLAS_TEXTURES_MEM, profiler::bytes_to_mb(report.atlas_textures));
   1804        self.profile.set(profiler::STANDALONE_TEXTURES_MEM, profiler::bytes_to_mb(report.standalone_textures));
   1805 
   1806        self.profile.set(profiler::DEPTH_TARGETS_MEM, profiler::bytes_to_mb(self.device.depth_targets_memory()));
   1807 
   1808        self.profile.set(profiler::TEXTURES_CREATED, self.device.textures_created);
   1809        self.profile.set(profiler::TEXTURES_DELETED, self.device.textures_deleted);
   1810 
   1811        results.stats.texture_upload_mb = self.profile.get_or(profiler::TEXTURE_UPLOADS_MEM, 0.0);
   1812        self.frame_counter += 1;
   1813        results.stats.resource_upload_time = self.resource_upload_time;
   1814        self.resource_upload_time = 0.0;
   1815 
   1816        if let Some(stats) = active_doc.frame_stats.take() {
   1817          // Copy the full frame stats to RendererStats
   1818          results.stats.merge(&stats);
   1819 
   1820          self.profiler.update_frame_stats(stats);
   1821        }
   1822 
   1823        // Turn the render reasons bitflags into something we can see in the profiler.
   1824        // For now this is just a binary yes/no for each bit, which means that when looking
   1825        // at "Render reasons" in the profiler HUD the average view indicates the proportion
   1826        // of frames that had the bit set over a half second window whereas max shows whether
   1827        // the bit as been set at least once during that time window.
   1828        // We could implement better ways to visualize this information.
   1829        let add_markers = thread_is_being_profiled();
   1830        for i in 0..RenderReasons::NUM_BITS {
   1831            let counter = profiler::RENDER_REASON_FIRST + i as usize;
   1832            let mut val = 0.0;
   1833            let reason_bit = RenderReasons::from_bits_truncate(1 << i);
   1834            if active_doc.render_reasons.contains(reason_bit) {
   1835                val = 1.0;
   1836                if add_markers {
   1837                    let event_str = format!("Render reason {:?}", reason_bit);
   1838                    add_event_marker(&event_str);
   1839                }
   1840            }
   1841            self.profile.set(counter, val);
   1842        }
   1843        active_doc.render_reasons = RenderReasons::empty();
   1844 
   1845 
   1846        self.texture_resolver.update_profile(&mut self.profile);
   1847 
   1848        // Note: this clears the values in self.profile.
   1849        self.profiler.set_counters(&mut self.profile);
   1850 
   1851        // If debugger is enabled, collect any profiler updates before value is overwritten
   1852        // during update below.
   1853        #[cfg(feature = "debugger")]
   1854        self.debugger.update(
   1855            self.debug_flags,
   1856            &self.profiler,
   1857            &self.command_log,
   1858        );
   1859 
   1860        // Note: profile counters must be set before this or they will count for next frame.
   1861        self.profiler.update();
   1862 
   1863        if self.debug_flags.intersects(DebugFlags::PROFILER_DBG | DebugFlags::PROFILER_CAPTURE) {
   1864            if let Some(device_size) = device_size {
   1865                //TODO: take device/pixel ratio into equation?
   1866                if let Some(debug_renderer) = self.debug.get_mut(&mut self.device) {
   1867                    self.profiler.draw_profile(
   1868                        self.frame_counter,
   1869                        debug_renderer,
   1870                        device_size,
   1871                    );
   1872                }
   1873            }
   1874        }
   1875 
   1876        if self.debug_flags.contains(DebugFlags::ECHO_DRIVER_MESSAGES) {
   1877            self.device.echo_driver_messages();
   1878        }
   1879 
   1880        if let Some(debug_renderer) = self.debug.try_get_mut() {
   1881            let small_screen = self.debug_flags.contains(DebugFlags::SMALL_SCREEN);
   1882            let scale = if small_screen { 1.6 } else { 1.0 };
   1883            // TODO(gw): Tidy this up so that compositor config integrates better
   1884            //           with the (non-compositor) surface y-flip options.
   1885            let surface_origin_is_top_left = match self.current_compositor_kind {
   1886                CompositorKind::Native { .. } => true,
   1887                CompositorKind::Draw { .. } | CompositorKind::Layer { .. } => self.device.surface_origin_is_top_left(),
   1888            };
   1889            // If there is a debug overlay, render it. Otherwise, just clear
   1890            // the debug renderer.
   1891            debug_renderer.render(
   1892                &mut self.device,
   1893                debug_overlay.and(device_size),
   1894                scale,
   1895                surface_origin_is_top_left,
   1896            );
   1897        }
   1898 
   1899        self.staging_texture_pool.end_frame(&mut self.device);
   1900        self.texture_upload_pbo_pool.end_frame(&mut self.device);
   1901        self.device.end_frame();
   1902 
   1903        if debug_overlay.is_some() {
   1904            self.last_time = current_time;
   1905 
   1906            // Unbind the target for the debug overlay. No debug or profiler drawing
   1907            // can occur afer this point.
   1908            self.unbind_debug_overlay();
   1909        }
   1910 
   1911        if device_size.is_some() {
   1912            // Inform the client that we are finished this composition transaction if native
   1913            // compositing is enabled. This must be called after any debug / profiling compositor
   1914            // surfaces have been drawn and added to the visual tree.
   1915            match self.current_compositor_kind {
   1916                CompositorKind::Layer { .. } => {
   1917                    let compositor = self.compositor_config.layer_compositor().unwrap();
   1918                    compositor.end_frame();
   1919                }
   1920                CompositorKind::Native { .. } => {
   1921                    profile_scope!("compositor.end_frame");
   1922                    let compositor = self.compositor_config.compositor().unwrap();
   1923                    compositor.end_frame(&mut self.device);
   1924                }
   1925                CompositorKind::Draw { .. } => {}
   1926            }
   1927        }
   1928 
   1929        self.documents_seen.clear();
   1930        self.shared_texture_cache_cleared = false;
   1931 
   1932        self.check_gl_errors();
   1933 
   1934        if self.renderer_errors.is_empty() {
   1935            Ok(results)
   1936        } else {
   1937            Err(mem::replace(&mut self.renderer_errors, Vec::new()))
   1938        }
   1939    }
   1940 
   1941    fn update_gpu_profile(&mut self, device_size: DeviceIntSize) {
   1942        let _gm = self.gpu_profiler.start_marker("build samples");
   1943        // Block CPU waiting for last frame's GPU profiles to arrive.
   1944        // In general this shouldn't block unless heavily GPU limited.
   1945        let (gpu_frame_id, timers, samplers) = self.gpu_profiler.build_samples();
   1946 
   1947        if self.max_recorded_profiles > 0 {
   1948            while self.gpu_profiles.len() >= self.max_recorded_profiles {
   1949                self.gpu_profiles.pop_front();
   1950            }
   1951 
   1952            self.gpu_profiles.push_back(GpuProfile::new(gpu_frame_id, &timers));
   1953        }
   1954 
   1955        self.profiler.set_gpu_time_queries(timers);
   1956 
   1957        if !samplers.is_empty() {
   1958            let screen_fraction = 1.0 / device_size.to_f32().area();
   1959 
   1960            fn accumulate_sampler_value(description: &str, samplers: &[GpuSampler]) -> f32 {
   1961                let mut accum = 0.0;
   1962                for sampler in samplers {
   1963                    if sampler.tag.label != description {
   1964                        continue;
   1965                    }
   1966 
   1967                    accum += sampler.count as f32;
   1968                }
   1969 
   1970                accum
   1971            }
   1972 
   1973            let alpha_targets = accumulate_sampler_value(&"Alpha targets", &samplers) * screen_fraction;
   1974            let transparent_pass = accumulate_sampler_value(&"Transparent pass", &samplers) * screen_fraction;
   1975            let opaque_pass = accumulate_sampler_value(&"Opaque pass", &samplers) * screen_fraction;
   1976            self.profile.set(profiler::ALPHA_TARGETS_SAMPLERS, alpha_targets);
   1977            self.profile.set(profiler::TRANSPARENT_PASS_SAMPLERS, transparent_pass);
   1978            self.profile.set(profiler::OPAQUE_PASS_SAMPLERS, opaque_pass);
   1979            self.profile.set(profiler::TOTAL_SAMPLERS, alpha_targets + transparent_pass + opaque_pass);
   1980        }
   1981    }
   1982 
   1983    fn update_texture_cache(&mut self) {
   1984        profile_scope!("update_texture_cache");
   1985 
   1986        let _gm = self.gpu_profiler.start_marker("texture cache update");
   1987        let mut pending_texture_updates = mem::replace(&mut self.pending_texture_updates, vec![]);
   1988        self.pending_texture_cache_updates = false;
   1989 
   1990        self.profile.start_time(profiler::TEXTURE_CACHE_UPDATE_TIME);
   1991 
   1992        let mut create_cache_texture_time = 0;
   1993        let mut delete_cache_texture_time = 0;
   1994 
   1995        for update_list in pending_texture_updates.drain(..) {
   1996            // Handle copies from one texture to another.
   1997            for ((src_tex, dst_tex), copies) in &update_list.copies {
   1998 
   1999                let dest_texture = &self.texture_resolver.texture_cache_map[&dst_tex].texture;
   2000                let dst_texture_size = dest_texture.get_dimensions().to_f32();
   2001 
   2002                let mut copy_instances = Vec::new();
   2003                for copy in copies {
   2004                    copy_instances.push(CopyInstance {
   2005                        src_rect: copy.src_rect.to_f32(),
   2006                        dst_rect: copy.dst_rect.to_f32(),
   2007                        dst_texture_size,
   2008                    });
   2009                }
   2010 
   2011                let draw_target = DrawTarget::from_texture(dest_texture, false);
   2012                self.device.bind_draw_target(draw_target);
   2013 
   2014                self.shaders
   2015                    .borrow_mut()
   2016                    .ps_copy()
   2017                    .bind(
   2018                        &mut self.device,
   2019                        &Transform3D::identity(),
   2020                        None,
   2021                        &mut self.renderer_errors,
   2022                        &mut self.profile,
   2023                        &mut self.command_log,
   2024                    );
   2025 
   2026                self.draw_instanced_batch(
   2027                    &copy_instances,
   2028                    VertexArrayKind::Copy,
   2029                    &BatchTextures::composite_rgb(
   2030                        TextureSource::TextureCache(*src_tex, Swizzle::default())
   2031                    ),
   2032                    &mut RendererStats::default(),
   2033                );
   2034            }
   2035 
   2036            // Find any textures that will need to be deleted in this group of allocations.
   2037            let mut pending_deletes = Vec::new();
   2038            for allocation in &update_list.allocations {
   2039                let old = self.texture_resolver.texture_cache_map.remove(&allocation.id);
   2040                match allocation.kind {
   2041                    TextureCacheAllocationKind::Alloc(_) => {
   2042                        assert!(old.is_none(), "Renderer and backend disagree!");
   2043                    }
   2044                    TextureCacheAllocationKind::Reset(_) |
   2045                    TextureCacheAllocationKind::Free => {
   2046                        assert!(old.is_some(), "Renderer and backend disagree!");
   2047                    }
   2048                }
   2049                if let Some(old) = old {
   2050 
   2051                    // Regenerate the cache allocation info so we can search through deletes for reuse.
   2052                    let size = old.texture.get_dimensions();
   2053                    let info = TextureCacheAllocInfo {
   2054                        width: size.width,
   2055                        height: size.height,
   2056                        format: old.texture.get_format(),
   2057                        filter: old.texture.get_filter(),
   2058                        target: old.texture.get_target(),
   2059                        is_shared_cache: old.texture.flags().contains(TextureFlags::IS_SHARED_TEXTURE_CACHE),
   2060                        has_depth: old.texture.supports_depth(),
   2061                        category: old.category,
   2062                    };
   2063                    pending_deletes.push((old.texture, info));
   2064                }
   2065            }
   2066            // Look for any alloc or reset that has matching alloc info and save it from being deleted.
   2067            let mut reused_textures = VecDeque::with_capacity(pending_deletes.len());
   2068            for allocation in &update_list.allocations {
   2069                match allocation.kind {
   2070                    TextureCacheAllocationKind::Alloc(ref info) |
   2071                    TextureCacheAllocationKind::Reset(ref info) => {
   2072                        reused_textures.push_back(
   2073                            pending_deletes.iter()
   2074                                .position(|(_, old_info)| *old_info == *info)
   2075                                .map(|index| pending_deletes.swap_remove(index).0)
   2076                        );
   2077                    }
   2078                    TextureCacheAllocationKind::Free => {}
   2079                }
   2080            }
   2081 
   2082            // Now that we've saved as many deletions for reuse as we can, actually delete whatever is left.
   2083            if !pending_deletes.is_empty() {
   2084                let delete_texture_start = zeitstempel::now();
   2085                for (texture, _) in pending_deletes {
   2086                    add_event_marker("TextureCacheFree");
   2087                    self.device.delete_texture(texture);
   2088                }
   2089                delete_cache_texture_time += zeitstempel::now() - delete_texture_start;
   2090            }
   2091 
   2092            for allocation in update_list.allocations {
   2093                match allocation.kind {
   2094                    TextureCacheAllocationKind::Alloc(_) => add_event_marker("TextureCacheAlloc"),
   2095                    TextureCacheAllocationKind::Reset(_) => add_event_marker("TextureCacheReset"),
   2096                    TextureCacheAllocationKind::Free => {}
   2097                };
   2098                match allocation.kind {
   2099                    TextureCacheAllocationKind::Alloc(ref info) |
   2100                    TextureCacheAllocationKind::Reset(ref info) => {
   2101                        let create_cache_texture_start = zeitstempel::now();
   2102                        // Create a new native texture, as requested by the texture cache.
   2103                        // If we managed to reuse a deleted texture, then prefer that instead.
   2104                        //
   2105                        // Ensure no PBO is bound when creating the texture storage,
   2106                        // or GL will attempt to read data from there.
   2107                        let mut texture = reused_textures.pop_front().unwrap_or(None).unwrap_or_else(|| {
   2108                            self.device.create_texture(
   2109                                info.target,
   2110                                info.format,
   2111                                info.width,
   2112                                info.height,
   2113                                info.filter,
   2114                                // This needs to be a render target because some render
   2115                                // tasks get rendered into the texture cache.
   2116                                Some(RenderTargetInfo { has_depth: info.has_depth }),
   2117                            )
   2118                        });
   2119 
   2120                        if info.is_shared_cache {
   2121                            texture.flags_mut()
   2122                                .insert(TextureFlags::IS_SHARED_TEXTURE_CACHE);
   2123 
   2124                            // On Mali-Gxx devices we use batched texture uploads as it performs much better.
   2125                            // However, due to another driver bug we must ensure the textures are fully cleared,
   2126                            // otherwise we get visual artefacts when blitting to the texture cache.
   2127                            if self.device.use_batched_texture_uploads() &&
   2128                                !self.device.get_capabilities().supports_render_target_partial_update
   2129                            {
   2130                                self.clear_texture(&texture, [0.0; 4]);
   2131                            }
   2132 
   2133                            // Textures in the cache generally don't need to be cleared,
   2134                            // but we do so if the debug display is active to make it
   2135                            // easier to identify unallocated regions.
   2136                            if self.debug_flags.contains(DebugFlags::TEXTURE_CACHE_DBG) {
   2137                                self.clear_texture(&texture, TEXTURE_CACHE_DBG_CLEAR_COLOR);
   2138                            }
   2139                        }
   2140 
   2141                        create_cache_texture_time += zeitstempel::now() - create_cache_texture_start;
   2142 
   2143                        self.texture_resolver.texture_cache_map.insert(allocation.id, CacheTexture {
   2144                            texture,
   2145                            category: info.category,
   2146                        });
   2147                    }
   2148                    TextureCacheAllocationKind::Free => {}
   2149                };
   2150            }
   2151 
   2152            upload_to_texture_cache(self, update_list.updates);
   2153 
   2154            self.check_gl_errors();
   2155        }
   2156 
   2157        if create_cache_texture_time > 0 {
   2158            self.profile.set(
   2159                profiler::CREATE_CACHE_TEXTURE_TIME,
   2160                profiler::ns_to_ms(create_cache_texture_time)
   2161            );
   2162        }
   2163        if delete_cache_texture_time > 0 {
   2164            self.profile.set(
   2165                profiler::DELETE_CACHE_TEXTURE_TIME,
   2166                profiler::ns_to_ms(delete_cache_texture_time)
   2167            )
   2168        }
   2169 
   2170        let t = self.profile.end_time(profiler::TEXTURE_CACHE_UPDATE_TIME);
   2171        self.resource_upload_time += t;
   2172        Telemetry::record_texture_cache_update_time(Duration::from_micros((t * 1000.00) as u64));
   2173 
   2174        drain_filter(
   2175            &mut self.notifications,
   2176            |n| { n.when() == Checkpoint::FrameTexturesUpdated },
   2177            |n| { n.notify(); },
   2178        );
   2179    }
   2180 
   2181    fn check_gl_errors(&mut self) {
   2182        let err = self.device.gl().get_error();
   2183        if err == gl::OUT_OF_MEMORY {
   2184            self.renderer_errors.push(RendererError::OutOfMemory);
   2185        }
   2186 
   2187        // Probably should check for other errors?
   2188    }
   2189 
   2190    fn bind_textures(&mut self, textures: &BatchTextures) {
   2191        for i in 0 .. 3 {
   2192            self.texture_resolver.bind(
   2193                &textures.input.colors[i],
   2194                TextureSampler::color(i),
   2195                &mut self.device,
   2196            );
   2197        }
   2198 
   2199        self.texture_resolver.bind(
   2200            &textures.clip_mask,
   2201            TextureSampler::ClipMask,
   2202            &mut self.device,
   2203        );
   2204 
   2205        // TODO: this probably isn't the best place for this.
   2206        if let Some(ref texture) = self.dither_matrix_texture {
   2207            self.device.bind_texture(TextureSampler::Dither, texture, Swizzle::default());
   2208        }
   2209    }
   2210 
   2211    fn draw_instanced_batch<T: Clone>(
   2212        &mut self,
   2213        data: &[T],
   2214        vertex_array_kind: VertexArrayKind,
   2215        textures: &BatchTextures,
   2216        stats: &mut RendererStats,
   2217    ) {
   2218        if let Some(history) = &mut self.command_log {
   2219            history.draw(data.len() as u32);
   2220        }
   2221 
   2222        self.bind_textures(textures);
   2223 
   2224        // If we end up with an empty draw call here, that means we have
   2225        // probably introduced unnecessary batch breaks during frame
   2226        // building - so we should be catching this earlier and removing
   2227        // the batch.
   2228        debug_assert!(!data.is_empty());
   2229 
   2230        let vao = &self.vaos[vertex_array_kind];
   2231        self.device.bind_vao(vao);
   2232 
   2233        let chunk_size = if self.debug_flags.contains(DebugFlags::DISABLE_BATCHING) {
   2234            1
   2235        } else if vertex_array_kind == VertexArrayKind::Primitive {
   2236            self.max_primitive_instance_count
   2237        } else {
   2238            data.len()
   2239        };
   2240 
   2241        for chunk in data.chunks(chunk_size) {
   2242            if self.enable_instancing {
   2243                self.device
   2244                    .update_vao_instances(vao, chunk, ONE_TIME_USAGE_HINT, None);
   2245                self.device
   2246                    .draw_indexed_triangles_instanced_u16(6, chunk.len() as i32);
   2247            } else {
   2248                self.device
   2249                    .update_vao_instances(vao, chunk, ONE_TIME_USAGE_HINT, NonZeroUsize::new(4));
   2250                self.device
   2251                    .draw_indexed_triangles(6 * chunk.len() as i32);
   2252            }
   2253            self.profile.inc(profiler::DRAW_CALLS);
   2254            stats.total_draw_calls += 1;
   2255        }
   2256 
   2257        self.profile.add(profiler::VERTICES, 6 * data.len());
   2258    }
   2259 
   2260    fn handle_readback_composite(
   2261        &mut self,
   2262        draw_target: DrawTarget,
   2263        uses_scissor: bool,
   2264        backdrop: &RenderTask,
   2265        readback: &RenderTask,
   2266    ) {
   2267        // Extract the rectangle in the backdrop surface's device space of where
   2268        // we need to read from.
   2269        let readback_origin = match readback.kind {
   2270            RenderTaskKind::Readback(ReadbackTask { readback_origin: Some(o), .. }) => o,
   2271            RenderTaskKind::Readback(ReadbackTask { readback_origin: None, .. }) => {
   2272                // If this is a dummy readback, just early out. We know that the
   2273                // clear of the target will ensure the task rect is already zero alpha,
   2274                // so it won't affect the rendering output.
   2275                return;
   2276            }
   2277            _ => unreachable!(),
   2278        };
   2279 
   2280        if uses_scissor {
   2281            self.device.disable_scissor();
   2282        }
   2283 
   2284        let texture_source = TextureSource::TextureCache(
   2285            readback.get_target_texture(),
   2286            Swizzle::default(),
   2287        );
   2288        let (cache_texture, _) = self.texture_resolver
   2289            .resolve(&texture_source).expect("bug: no source texture");
   2290 
   2291        // Before submitting the composite batch, do the
   2292        // framebuffer readbacks that are needed for each
   2293        // composite operation in this batch.
   2294        let readback_rect = readback.get_target_rect();
   2295        let backdrop_rect = backdrop.get_target_rect();
   2296        let (backdrop_screen_origin, _) = match backdrop.kind {
   2297            RenderTaskKind::Picture(ref task_info) => (task_info.content_origin, task_info.device_pixel_scale),
   2298            _ => panic!("bug: composite on non-picture?"),
   2299        };
   2300 
   2301        // Bind the FBO to blit the backdrop to.
   2302        // Called per-instance in case the FBO changes. The device will skip
   2303        // the GL call if the requested target is already bound.
   2304        let cache_draw_target = DrawTarget::from_texture(
   2305            cache_texture,
   2306            false,
   2307        );
   2308 
   2309        // Get the rect that we ideally want, in space of the parent surface
   2310        let wanted_rect = DeviceRect::from_origin_and_size(
   2311            readback_origin,
   2312            readback_rect.size().to_f32(),
   2313        );
   2314 
   2315        // Get the rect that is available on the parent surface. It may be smaller
   2316        // than desired because this is a picture cache tile covering only part of
   2317        // the wanted rect and/or because the parent surface was clipped.
   2318        let avail_rect = DeviceRect::from_origin_and_size(
   2319            backdrop_screen_origin,
   2320            backdrop_rect.size().to_f32(),
   2321        );
   2322 
   2323        if let Some(int_rect) = wanted_rect.intersection(&avail_rect) {
   2324            // If there is a valid intersection, work out the correct origins and
   2325            // sizes of the copy rects, and do the blit.
   2326            let copy_size = int_rect.size().to_i32();
   2327 
   2328            let src_origin = backdrop_rect.min.to_f32() +
   2329                int_rect.min.to_vector() -
   2330                backdrop_screen_origin.to_vector();
   2331 
   2332            let src = DeviceIntRect::from_origin_and_size(
   2333                src_origin.to_i32(),
   2334                copy_size,
   2335            );
   2336 
   2337            let dest_origin = readback_rect.min.to_f32() +
   2338                int_rect.min.to_vector() -
   2339                readback_origin.to_vector();
   2340 
   2341            let dest = DeviceIntRect::from_origin_and_size(
   2342                dest_origin.to_i32(),
   2343                copy_size,
   2344            );
   2345 
   2346            // Should always be drawing to picture cache tiles or off-screen surface!
   2347            debug_assert!(!draw_target.is_default());
   2348            let device_to_framebuffer = Scale::new(1i32);
   2349 
   2350            self.device.blit_render_target(
   2351                draw_target.into(),
   2352                src * device_to_framebuffer,
   2353                cache_draw_target,
   2354                dest * device_to_framebuffer,
   2355                TextureFilter::Linear,
   2356            );
   2357        }
   2358 
   2359        // Restore draw target to current pass render target, and reset
   2360        // the read target.
   2361        self.device.bind_draw_target(draw_target);
   2362        self.device.reset_read_target();
   2363 
   2364        if uses_scissor {
   2365            self.device.enable_scissor();
   2366        }
   2367    }
   2368 
   2369    fn handle_resolves(
   2370        &mut self,
   2371        resolve_ops: &[ResolveOp],
   2372        render_tasks: &RenderTaskGraph,
   2373        draw_target: DrawTarget,
   2374    ) {
   2375        if resolve_ops.is_empty() {
   2376            return;
   2377        }
   2378 
   2379        let _timer = self.gpu_profiler.start_timer(GPU_TAG_BLIT);
   2380 
   2381        for resolve_op in resolve_ops {
   2382            self.handle_resolve(
   2383                resolve_op,
   2384                render_tasks,
   2385                draw_target,
   2386            );
   2387        }
   2388 
   2389        self.device.reset_read_target();
   2390    }
   2391 
   2392    fn handle_prims(
   2393        &mut self,
   2394        draw_target: &DrawTarget,
   2395        prim_instances: &[FastHashMap<TextureSource, FrameVec<PrimitiveInstanceData>>],
   2396        prim_instances_with_scissor: &FastHashMap<(DeviceIntRect, PatternKind), FastHashMap<TextureSource, FrameVec<PrimitiveInstanceData>>>,
   2397        projection: &default::Transform3D<f32>,
   2398        stats: &mut RendererStats,
   2399    ) {
   2400        self.device.disable_depth_write();
   2401 
   2402        let has_prim_instances = prim_instances.iter().any(|map| !map.is_empty());
   2403        if has_prim_instances || !prim_instances_with_scissor.is_empty() {
   2404            let _timer = self.gpu_profiler.start_timer(GPU_TAG_INDIRECT_PRIM);
   2405 
   2406            self.set_blend(false, FramebufferKind::Other);
   2407 
   2408            for (pattern_idx, prim_instances_map) in prim_instances.iter().enumerate() {
   2409                if prim_instances_map.is_empty() {
   2410                    continue;
   2411                }
   2412                let pattern = PatternKind::from_u32(pattern_idx as u32);
   2413 
   2414                self.shaders.borrow_mut().get_quad_shader(pattern).bind(
   2415                    &mut self.device,
   2416                    projection,
   2417                    None,
   2418                    &mut self.renderer_errors,
   2419                    &mut self.profile,
   2420                    &mut self.command_log,
   2421                );
   2422 
   2423                for (texture_source, prim_instances) in prim_instances_map {
   2424                    let texture_bindings = BatchTextures::composite_rgb(*texture_source);
   2425 
   2426                    self.draw_instanced_batch(
   2427                        prim_instances,
   2428                        VertexArrayKind::Primitive,
   2429                        &texture_bindings,
   2430                        stats,
   2431                    );
   2432                }
   2433            }
   2434 
   2435            if !prim_instances_with_scissor.is_empty() {
   2436                self.set_blend(true, FramebufferKind::Other);
   2437                self.device.set_blend_mode_premultiplied_alpha();
   2438                self.device.enable_scissor();
   2439 
   2440                let mut prev_pattern = None;
   2441 
   2442                for ((scissor_rect, pattern), prim_instances_map) in prim_instances_with_scissor {
   2443                    if prev_pattern != Some(*pattern) {
   2444                        prev_pattern = Some(*pattern);
   2445                        self.shaders.borrow_mut().get_quad_shader(*pattern).bind(
   2446                            &mut self.device,
   2447                            projection,
   2448                            None,
   2449                            &mut self.renderer_errors,
   2450                            &mut self.profile,
   2451                            &mut self.command_log,
   2452                        );
   2453                    }
   2454 
   2455                    self.device.set_scissor_rect(draw_target.to_framebuffer_rect(*scissor_rect));
   2456 
   2457                    for (texture_source, prim_instances) in prim_instances_map {
   2458                        let texture_bindings = BatchTextures::composite_rgb(*texture_source);
   2459 
   2460                        self.draw_instanced_batch(
   2461                            prim_instances,
   2462                            VertexArrayKind::Primitive,
   2463                            &texture_bindings,
   2464                            stats,
   2465                        );
   2466                    }
   2467                }
   2468 
   2469                self.device.disable_scissor();
   2470            }
   2471        }
   2472    }
   2473 
   2474    fn handle_clips(
   2475        &mut self,
   2476        draw_target: &DrawTarget,
   2477        masks: &ClipMaskInstanceList,
   2478        projection: &default::Transform3D<f32>,
   2479        stats: &mut RendererStats,
   2480    ) {
   2481        self.device.disable_depth_write();
   2482 
   2483        {
   2484            let _timer = self.gpu_profiler.start_timer(GPU_TAG_INDIRECT_MASK);
   2485 
   2486            self.set_blend(true, FramebufferKind::Other);
   2487            self.set_blend_mode_multiply(FramebufferKind::Other);
   2488 
   2489            if !masks.mask_instances_fast.is_empty() {
   2490                self.shaders.borrow_mut().ps_mask_fast().bind(
   2491                    &mut self.device,
   2492                    projection,
   2493                    None,
   2494                    &mut self.renderer_errors,
   2495                    &mut self.profile,
   2496                    &mut self.command_log,
   2497                );
   2498 
   2499                self.draw_instanced_batch(
   2500                    &masks.mask_instances_fast,
   2501                    VertexArrayKind::Mask,
   2502                    &BatchTextures::empty(),
   2503                    stats,
   2504                );
   2505            }
   2506 
   2507            if !masks.mask_instances_fast_with_scissor.is_empty() {
   2508                self.shaders.borrow_mut().ps_mask_fast().bind(
   2509                    &mut self.device,
   2510                    projection,
   2511                    None,
   2512                    &mut self.renderer_errors,
   2513                    &mut self.profile,
   2514                    &mut self.command_log,
   2515                );
   2516 
   2517                self.device.enable_scissor();
   2518 
   2519                for (scissor_rect, instances) in &masks.mask_instances_fast_with_scissor {
   2520                    self.device.set_scissor_rect(draw_target.to_framebuffer_rect(*scissor_rect));
   2521 
   2522                    self.draw_instanced_batch(
   2523                        instances,
   2524                        VertexArrayKind::Mask,
   2525                        &BatchTextures::empty(),
   2526                        stats,
   2527                    );
   2528                }
   2529 
   2530                self.device.disable_scissor();
   2531            }
   2532 
   2533            if !masks.image_mask_instances.is_empty() {
   2534                self.shaders.borrow_mut().ps_quad_textured().bind(
   2535                    &mut self.device,
   2536                    projection,
   2537                    None,
   2538                    &mut self.renderer_errors,
   2539                    &mut self.profile,
   2540                    &mut self.command_log,
   2541                );
   2542 
   2543                for (texture, prim_instances) in &masks.image_mask_instances {
   2544                    self.draw_instanced_batch(
   2545                        prim_instances,
   2546                        VertexArrayKind::Primitive,
   2547                        &BatchTextures::composite_rgb(*texture),
   2548                        stats,
   2549                    );
   2550                }
   2551            }
   2552 
   2553            if !masks.image_mask_instances_with_scissor.is_empty() {
   2554                self.device.enable_scissor();
   2555 
   2556                self.shaders.borrow_mut().ps_quad_textured().bind(
   2557                    &mut self.device,
   2558                    projection,
   2559                    None,
   2560                    &mut self.renderer_errors,
   2561                    &mut self.profile,
   2562                    &mut self.command_log,
   2563                );
   2564 
   2565                for ((scissor_rect, texture), prim_instances) in &masks.image_mask_instances_with_scissor {
   2566                    self.device.set_scissor_rect(draw_target.to_framebuffer_rect(*scissor_rect));
   2567 
   2568                    self.draw_instanced_batch(
   2569                        prim_instances,
   2570                        VertexArrayKind::Primitive,
   2571                        &BatchTextures::composite_rgb(*texture),
   2572                        stats,
   2573                    );
   2574                }
   2575 
   2576                self.device.disable_scissor();
   2577            }
   2578 
   2579            if !masks.mask_instances_slow.is_empty() {
   2580                self.shaders.borrow_mut().ps_mask().bind(
   2581                    &mut self.device,
   2582                    projection,
   2583                    None,
   2584                    &mut self.renderer_errors,
   2585                    &mut self.profile,
   2586                    &mut self.command_log,
   2587                );
   2588 
   2589                self.draw_instanced_batch(
   2590                    &masks.mask_instances_slow,
   2591                    VertexArrayKind::Mask,
   2592                    &BatchTextures::empty(),
   2593                    stats,
   2594                );
   2595            }
   2596 
   2597            if !masks.mask_instances_slow_with_scissor.is_empty() {
   2598                self.shaders.borrow_mut().ps_mask().bind(
   2599                    &mut self.device,
   2600                    projection,
   2601                    None,
   2602                    &mut self.renderer_errors,
   2603                    &mut self.profile,
   2604                    &mut self.command_log,
   2605                );
   2606 
   2607                self.device.enable_scissor();
   2608 
   2609                for (scissor_rect, instances) in &masks.mask_instances_slow_with_scissor {
   2610                    self.device.set_scissor_rect(draw_target.to_framebuffer_rect(*scissor_rect));
   2611 
   2612                    self.draw_instanced_batch(
   2613                        instances,
   2614                        VertexArrayKind::Mask,
   2615                        &BatchTextures::empty(),
   2616                        stats,
   2617                    );
   2618                }
   2619 
   2620                self.device.disable_scissor();
   2621            }
   2622        }
   2623    }
   2624 
   2625    fn handle_blits(
   2626        &mut self,
   2627        blits: &[BlitJob],
   2628        render_tasks: &RenderTaskGraph,
   2629        draw_target: DrawTarget,
   2630    ) {
   2631        if blits.is_empty() {
   2632            return;
   2633        }
   2634 
   2635        let _timer = self.gpu_profiler.start_timer(GPU_TAG_BLIT);
   2636 
   2637        // TODO(gw): For now, we don't bother batching these by source texture.
   2638        //           If if ever shows up as an issue, we can easily batch them.
   2639        for blit in blits {
   2640            let (source, source_rect) = {
   2641                // A blit from the child render task into this target.
   2642                // TODO(gw): Support R8 format here once we start
   2643                //           creating mips for alpha masks.
   2644                let task = &render_tasks[blit.source];
   2645                let source_rect = blit.source_rect.translate(task.get_target_rect().min.to_vector());
   2646                let source_texture = task.get_texture_source();
   2647 
   2648                (source_texture, source_rect)
   2649            };
   2650 
   2651            let (texture, swizzle) = self.texture_resolver
   2652                .resolve(&source)
   2653                .expect("BUG: invalid source texture");
   2654 
   2655            if swizzle != Swizzle::default() {
   2656                error!("Swizzle {:?} can't be handled by a blit", swizzle);
   2657            }
   2658 
   2659            let read_target = DrawTarget::from_texture(
   2660                texture,
   2661                false,
   2662            );
   2663 
   2664            self.device.blit_render_target(
   2665                read_target.into(),
   2666                read_target.to_framebuffer_rect(source_rect),
   2667                draw_target,
   2668                draw_target.to_framebuffer_rect(blit.target_rect),
   2669                TextureFilter::Linear,
   2670            );
   2671        }
   2672    }
   2673 
   2674    fn handle_scaling(
   2675        &mut self,
   2676        scalings: &FastHashMap<TextureSource, FrameVec<ScalingInstance>>,
   2677        projection: &default::Transform3D<f32>,
   2678        stats: &mut RendererStats,
   2679    ) {
   2680        if scalings.is_empty() {
   2681            return
   2682        }
   2683 
   2684        let _timer = self.gpu_profiler.start_timer(GPU_TAG_SCALE);
   2685        for (source, instances) in scalings {
   2686            let buffer_kind = source.image_buffer_kind();
   2687 
   2688            // When the source texture is an external texture, the UV rect is not known
   2689            // when the external surface descriptor is created, because external textures
   2690            // are not resolved until the lock() callback is invoked at the start of the
   2691            // frame render. We must therefore override the source rects now.
   2692            let uv_override_instances;
   2693            let instances = match source {
   2694                TextureSource::External(..) => {
   2695                    uv_override_instances = instances.iter().map(|instance| {
   2696                        let mut new_instance = instance.clone();
   2697                        let texel_rect: TexelRect = self.texture_resolver.get_uv_rect(
   2698                            &source,
   2699                            instance.source_rect.cast().into()
   2700                        ).into();
   2701                        new_instance.source_rect = DeviceRect::new(texel_rect.uv0, texel_rect.uv1);
   2702                        new_instance
   2703                    }).collect::<Vec<_>>();
   2704                    uv_override_instances.as_slice()
   2705                }
   2706                _ => instances.as_slice()
   2707            };
   2708 
   2709            self.shaders
   2710                .borrow_mut()
   2711                .get_scale_shader(buffer_kind)
   2712                .bind(
   2713                    &mut self.device,
   2714                    &projection,
   2715                    Some(self.texture_resolver.get_texture_size(source).to_f32()),
   2716                    &mut self.renderer_errors,
   2717                    &mut self.profile,
   2718                    &mut self.command_log,
   2719                );
   2720 
   2721            self.draw_instanced_batch(
   2722                instances,
   2723                VertexArrayKind::Scale,
   2724                &BatchTextures::composite_rgb(*source),
   2725                stats,
   2726            );
   2727        }
   2728    }
   2729 
   2730    fn handle_svg_nodes(
   2731        &mut self,
   2732        textures: &BatchTextures,
   2733        svg_filters: &[SVGFEFilterInstance],
   2734        projection: &default::Transform3D<f32>,
   2735        stats: &mut RendererStats,
   2736    ) {
   2737        if svg_filters.is_empty() {
   2738            return;
   2739        }
   2740 
   2741        let _timer = self.gpu_profiler.start_timer(GPU_TAG_SVG_FILTER_NODES);
   2742 
   2743        self.shaders.borrow_mut().cs_svg_filter_node().bind(
   2744            &mut self.device,
   2745            &projection,
   2746            None,
   2747            &mut self.renderer_errors,
   2748            &mut self.profile,
   2749            &mut self.command_log,
   2750        );
   2751 
   2752        self.draw_instanced_batch(
   2753            &svg_filters,
   2754            VertexArrayKind::SvgFilterNode,
   2755            textures,
   2756            stats,
   2757        );
   2758    }
   2759 
   2760    fn handle_resolve(
   2761        &mut self,
   2762        resolve_op: &ResolveOp,
   2763        render_tasks: &RenderTaskGraph,
   2764        draw_target: DrawTarget,
   2765    ) {
   2766        for src_task_id in &resolve_op.src_task_ids {
   2767            let src_task = &render_tasks[*src_task_id];
   2768            let src_info = match src_task.kind {
   2769                RenderTaskKind::Picture(ref info) => info,
   2770                _ => panic!("bug: not a picture"),
   2771            };
   2772            let src_task_rect = src_task.get_target_rect().to_f32();
   2773 
   2774            let dest_task = &render_tasks[resolve_op.dest_task_id];
   2775            let dest_info = match dest_task.kind {
   2776                RenderTaskKind::Picture(ref info) => info,
   2777                _ => panic!("bug: not a picture"),
   2778            };
   2779            let dest_task_rect = dest_task.get_target_rect().to_f32();
   2780 
   2781            // If the dest picture is going to a blur target, it may have been
   2782            // expanded in size so that the downsampling passes don't introduce
   2783            // sampling error. In this case, we need to ensure we use the
   2784            // content size rather than the render task size to work out
   2785            // the intersecting rect to use for the resolve copy.
   2786            let dest_task_rect = DeviceRect::from_origin_and_size(
   2787                dest_task_rect.min,
   2788                dest_info.content_size.to_f32(),
   2789            );
   2790 
   2791            // Get the rect that we ideally want, in space of the parent surface
   2792            let wanted_rect = DeviceRect::from_origin_and_size(
   2793                dest_info.content_origin,
   2794                dest_task_rect.size().to_f32(),
   2795            ).cast_unit() * dest_info.device_pixel_scale.inverse();
   2796 
   2797            // Get the rect that is available on the parent surface. It may be smaller
   2798            // than desired because this is a picture cache tile covering only part of
   2799            // the wanted rect and/or because the parent surface was clipped.
   2800            let avail_rect = DeviceRect::from_origin_and_size(
   2801                src_info.content_origin,
   2802                src_task_rect.size().to_f32(),
   2803            ).cast_unit() * src_info.device_pixel_scale.inverse();
   2804 
   2805            if let Some(device_int_rect) = wanted_rect.intersection(&avail_rect) {
   2806                let src_int_rect = (device_int_rect * src_info.device_pixel_scale).cast_unit();
   2807                let dest_int_rect = (device_int_rect * dest_info.device_pixel_scale).cast_unit();
   2808 
   2809                // If there is a valid intersection, work out the correct origins and
   2810                // sizes of the copy rects, and do the blit.
   2811 
   2812                let src_origin = src_task_rect.min.to_f32() +
   2813                    src_int_rect.min.to_vector() -
   2814                    src_info.content_origin.to_vector();
   2815 
   2816                let src = DeviceIntRect::from_origin_and_size(
   2817                    src_origin.to_i32(),
   2818                    src_int_rect.size().round().to_i32(),
   2819                );
   2820 
   2821                let dest_origin = dest_task_rect.min.to_f32() +
   2822                    dest_int_rect.min.to_vector() -
   2823                    dest_info.content_origin.to_vector();
   2824 
   2825                let dest = DeviceIntRect::from_origin_and_size(
   2826                    dest_origin.to_i32(),
   2827                    dest_int_rect.size().round().to_i32(),
   2828                );
   2829 
   2830                let texture_source = TextureSource::TextureCache(
   2831                    src_task.get_target_texture(),
   2832                    Swizzle::default(),
   2833                );
   2834                let (cache_texture, _) = self.texture_resolver
   2835                    .resolve(&texture_source).expect("bug: no source texture");
   2836 
   2837                let read_target = ReadTarget::from_texture(cache_texture);
   2838 
   2839                // Should always be drawing to picture cache tiles or off-screen surface!
   2840                debug_assert!(!draw_target.is_default());
   2841                let device_to_framebuffer = Scale::new(1i32);
   2842 
   2843                self.device.blit_render_target(
   2844                    read_target,
   2845                    src * device_to_framebuffer,
   2846                    draw_target,
   2847                    dest * device_to_framebuffer,
   2848                    TextureFilter::Linear,
   2849                );
   2850            }
   2851        }
   2852    }
   2853 
   2854    fn draw_picture_cache_target(
   2855        &mut self,
   2856        target: &PictureCacheTarget,
   2857        draw_target: DrawTarget,
   2858        projection: &default::Transform3D<f32>,
   2859        render_tasks: &RenderTaskGraph,
   2860        stats: &mut RendererStats,
   2861    ) {
   2862        profile_scope!("draw_picture_cache_target");
   2863        if let Some(history) = &mut self.command_log {
   2864            history.begin_render_target("Picture tile", draw_target.dimensions());
   2865        }
   2866 
   2867        self.profile.inc(profiler::RENDERED_PICTURE_TILES);
   2868        let _gm = self.gpu_profiler.start_marker("picture cache target");
   2869        let framebuffer_kind = FramebufferKind::Other;
   2870 
   2871        {
   2872            let _timer = self.gpu_profiler.start_timer(GPU_TAG_SETUP_TARGET);
   2873            self.device.bind_draw_target(draw_target);
   2874 
   2875            if self.device.get_capabilities().supports_qcom_tiled_rendering {
   2876                self.device.gl().start_tiling_qcom(
   2877                    target.dirty_rect.min.x.max(0) as _,
   2878                    target.dirty_rect.min.y.max(0) as _,
   2879                    target.dirty_rect.width() as _,
   2880                    target.dirty_rect.height() as _,
   2881                    0,
   2882                );
   2883            }
   2884 
   2885            self.device.enable_depth_write();
   2886            self.set_blend(false, framebuffer_kind);
   2887 
   2888            let clear_color = target.clear_color.map(|c| c.to_array());
   2889            let scissor_rect = if self.device.get_capabilities().supports_render_target_partial_update
   2890                && (target.dirty_rect != target.valid_rect
   2891                    || self.device.get_capabilities().prefers_clear_scissor)
   2892            {
   2893                Some(target.dirty_rect)
   2894            } else {
   2895                None
   2896            };
   2897            match scissor_rect {
   2898                // If updating only a dirty rect within a picture cache target, the
   2899                // clear must also be scissored to that dirty region.
   2900                Some(r) if self.clear_caches_with_quads => {
   2901                    self.device.enable_depth(DepthFunction::Always);
   2902                    // Save the draw call count so that our reftests don't get confused...
   2903                    let old_draw_call_count = stats.total_draw_calls;
   2904                    if clear_color.is_none() {
   2905                        self.device.disable_color_write();
   2906                    }
   2907                    let instance = ClearInstance {
   2908                        rect: [
   2909                            r.min.x as f32, r.min.y as f32,
   2910                            r.max.x as f32, r.max.y as f32,
   2911                        ],
   2912                        color: clear_color.unwrap_or([0.0; 4]),
   2913                    };
   2914                    self.shaders.borrow_mut().ps_clear().bind(
   2915                        &mut self.device,
   2916                        &projection,
   2917                        None,
   2918                        &mut self.renderer_errors,
   2919                        &mut self.profile,
   2920                        &mut self.command_log,
   2921                    );
   2922                    self.draw_instanced_batch(
   2923                        &[instance],
   2924                        VertexArrayKind::Clear,
   2925                        &BatchTextures::empty(),
   2926                        stats,
   2927                    );
   2928                    if clear_color.is_none() {
   2929                        self.device.enable_color_write();
   2930                    }
   2931                    stats.total_draw_calls = old_draw_call_count;
   2932                    self.device.disable_depth();
   2933                }
   2934                other => {
   2935                    let scissor_rect = other.map(|rect| {
   2936                        draw_target.build_scissor_rect(Some(rect))
   2937                    });
   2938                    self.device.clear_target(clear_color, Some(1.0), scissor_rect);
   2939                }
   2940            };
   2941            self.device.disable_depth_write();
   2942        }
   2943 
   2944        match target.kind {
   2945            PictureCacheTargetKind::Draw { ref alpha_batch_container } => {
   2946                self.draw_alpha_batch_container(
   2947                    alpha_batch_container,
   2948                    draw_target,
   2949                    framebuffer_kind,
   2950                    projection,
   2951                    render_tasks,
   2952                    stats,
   2953                );
   2954            }
   2955            PictureCacheTargetKind::Blit { task_id, sub_rect_offset } => {
   2956                let src_task = &render_tasks[task_id];
   2957                let (texture, _swizzle) = self.texture_resolver
   2958                    .resolve(&src_task.get_texture_source())
   2959                    .expect("BUG: invalid source texture");
   2960 
   2961                let src_task_rect = src_task.get_target_rect();
   2962 
   2963                let p0 = src_task_rect.min + sub_rect_offset;
   2964                let p1 = p0 + target.dirty_rect.size();
   2965                let src_rect = DeviceIntRect::new(p0, p1);
   2966 
   2967                // TODO(gw): In future, it'd be tidier to have the draw target offset
   2968                //           for DC surfaces handled by `blit_render_target`. However,
   2969                //           for now they are only ever written to here.
   2970                let target_rect = target
   2971                    .dirty_rect
   2972                    .translate(draw_target.offset().to_vector())
   2973                    .cast_unit();
   2974 
   2975                self.device.blit_render_target(
   2976                    ReadTarget::from_texture(texture),
   2977                    src_rect.cast_unit(),
   2978                    draw_target,
   2979                    target_rect,
   2980                    TextureFilter::Nearest,
   2981                );
   2982            }
   2983        }
   2984 
   2985        self.device.invalidate_depth_target();
   2986        if self.device.get_capabilities().supports_qcom_tiled_rendering {
   2987            self.device.gl().end_tiling_qcom(gl::COLOR_BUFFER_BIT0_QCOM);
   2988        }
   2989    }
   2990 
   2991    /// Draw an alpha batch container into a given draw target. This is used
   2992    /// by both color and picture cache target kinds.
   2993    fn draw_alpha_batch_container(
   2994        &mut self,
   2995        alpha_batch_container: &AlphaBatchContainer,
   2996        draw_target: DrawTarget,
   2997        framebuffer_kind: FramebufferKind,
   2998        projection: &default::Transform3D<f32>,
   2999        render_tasks: &RenderTaskGraph,
   3000        stats: &mut RendererStats,
   3001    ) {
   3002        let uses_scissor = alpha_batch_container.task_scissor_rect.is_some();
   3003 
   3004        if uses_scissor {
   3005            self.device.enable_scissor();
   3006            let scissor_rect = draw_target.build_scissor_rect(
   3007                alpha_batch_container.task_scissor_rect,
   3008            );
   3009            self.device.set_scissor_rect(scissor_rect)
   3010        }
   3011 
   3012        if !alpha_batch_container.opaque_batches.is_empty()
   3013            && !self.debug_flags.contains(DebugFlags::DISABLE_OPAQUE_PASS) {
   3014            let _gl = self.gpu_profiler.start_marker("opaque batches");
   3015            let opaque_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_OPAQUE);
   3016            self.set_blend(false, framebuffer_kind);
   3017            //Note: depth equality is needed for split planes
   3018            self.device.enable_depth(DepthFunction::LessEqual);
   3019            self.device.enable_depth_write();
   3020 
   3021            // Draw opaque batches front-to-back for maximum
   3022            // z-buffer efficiency!
   3023            for batch in alpha_batch_container
   3024                .opaque_batches
   3025                .iter()
   3026                .rev()
   3027                {
   3028                    if should_skip_batch(&batch.key.kind, self.debug_flags) {
   3029                        continue;
   3030                    }
   3031 
   3032                    self.shaders.borrow_mut()
   3033                        .get(&batch.key, batch.features, self.debug_flags, &self.device)
   3034                        .bind(
   3035                            &mut self.device, projection, None,
   3036                            &mut self.renderer_errors,
   3037                            &mut self.profile,
   3038                            &mut self.command_log,
   3039                        );
   3040 
   3041                    let _timer = self.gpu_profiler.start_timer(batch.key.kind.sampler_tag());
   3042                    self.draw_instanced_batch(
   3043                        &batch.instances,
   3044                        VertexArrayKind::Primitive,
   3045                        &batch.key.textures,
   3046                        stats
   3047                    );
   3048                }
   3049 
   3050            self.device.disable_depth_write();
   3051            self.gpu_profiler.finish_sampler(opaque_sampler);
   3052        } else {
   3053            self.device.disable_depth();
   3054        }
   3055 
   3056        if !alpha_batch_container.alpha_batches.is_empty()
   3057            && !self.debug_flags.contains(DebugFlags::DISABLE_ALPHA_PASS) {
   3058            let _gl = self.gpu_profiler.start_marker("alpha batches");
   3059            let transparent_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_TRANSPARENT);
   3060            self.set_blend(true, framebuffer_kind);
   3061 
   3062            let mut prev_blend_mode = BlendMode::None;
   3063            let shaders_rc = self.shaders.clone();
   3064 
   3065            for batch in &alpha_batch_container.alpha_batches {
   3066                if should_skip_batch(&batch.key.kind, self.debug_flags) {
   3067                    continue;
   3068                }
   3069 
   3070                let mut shaders = shaders_rc.borrow_mut();
   3071                let shader = shaders.get(
   3072                    &batch.key,
   3073                    batch.features | BatchFeatures::ALPHA_PASS,
   3074                    self.debug_flags,
   3075                    &self.device,
   3076                );
   3077 
   3078                if batch.key.blend_mode != prev_blend_mode {
   3079                    match batch.key.blend_mode {
   3080                        _ if self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) &&
   3081                            framebuffer_kind == FramebufferKind::Main => {
   3082                            self.device.set_blend_mode_show_overdraw();
   3083                        }
   3084                        BlendMode::None => {
   3085                            unreachable!("bug: opaque blend in alpha pass");
   3086                        }
   3087                        BlendMode::Alpha => {
   3088                            self.device.set_blend_mode_alpha();
   3089                        }
   3090                        BlendMode::PremultipliedAlpha => {
   3091                            self.device.set_blend_mode_premultiplied_alpha();
   3092                        }
   3093                        BlendMode::PremultipliedDestOut => {
   3094                            self.device.set_blend_mode_premultiplied_dest_out();
   3095                        }
   3096                        BlendMode::SubpixelDualSource => {
   3097                            self.device.set_blend_mode_subpixel_dual_source();
   3098                        }
   3099                        BlendMode::Advanced(mode) => {
   3100                            if self.enable_advanced_blend_barriers {
   3101                                self.device.gl().blend_barrier_khr();
   3102                            }
   3103                            self.device.set_blend_mode_advanced(mode);
   3104                        }
   3105                        BlendMode::MultiplyDualSource => {
   3106                            self.device.set_blend_mode_multiply_dual_source();
   3107                        }
   3108                        BlendMode::Screen => {
   3109                            self.device.set_blend_mode_screen();
   3110                        }
   3111                        BlendMode::Exclusion => {
   3112                            self.device.set_blend_mode_exclusion();
   3113                        }
   3114                        BlendMode::PlusLighter => {
   3115                            self.device.set_blend_mode_plus_lighter();
   3116                        }
   3117                    }
   3118                    prev_blend_mode = batch.key.blend_mode;
   3119                }
   3120 
   3121                // Handle special case readback for composites.
   3122                if let BatchKind::Brush(BrushBatchKind::MixBlend { task_id, backdrop_id }) = batch.key.kind {
   3123                    // composites can't be grouped together because
   3124                    // they may overlap and affect each other.
   3125                    debug_assert_eq!(batch.instances.len(), 1);
   3126                    self.handle_readback_composite(
   3127                        draw_target,
   3128                        uses_scissor,
   3129                        &render_tasks[task_id],
   3130                        &render_tasks[backdrop_id],
   3131                    );
   3132                }
   3133 
   3134                let _timer = self.gpu_profiler.start_timer(batch.key.kind.sampler_tag());
   3135                shader.bind(
   3136                    &mut self.device,
   3137                    projection,
   3138                    None,
   3139                    &mut self.renderer_errors,
   3140                    &mut self.profile,
   3141                    &mut self.command_log,
   3142                );
   3143 
   3144                self.draw_instanced_batch(
   3145                    &batch.instances,
   3146                    VertexArrayKind::Primitive,
   3147                    &batch.key.textures,
   3148                    stats
   3149                );
   3150            }
   3151 
   3152            self.set_blend(false, framebuffer_kind);
   3153            self.gpu_profiler.finish_sampler(transparent_sampler);
   3154        }
   3155 
   3156        self.device.disable_depth();
   3157        if uses_scissor {
   3158            self.device.disable_scissor();
   3159        }
   3160    }
   3161 
   3162    /// Rasterize any external compositor surfaces that require updating
   3163    fn update_external_native_surfaces(
   3164        &mut self,
   3165        external_surfaces: &[ResolvedExternalSurface],
   3166        results: &mut RenderResults,
   3167    ) {
   3168        if external_surfaces.is_empty() {
   3169            return;
   3170        }
   3171 
   3172        let opaque_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_OPAQUE);
   3173 
   3174        self.device.disable_depth();
   3175        self.set_blend(false, FramebufferKind::Main);
   3176 
   3177        for surface in external_surfaces {
   3178            // See if this surface needs to be updated
   3179            let (native_surface_id, surface_size) = match surface.update_params {
   3180                Some(params) => params,
   3181                None => continue,
   3182            };
   3183 
   3184            // When updating an external surface, the entire surface rect is used
   3185            // for all of the draw, dirty, valid and clip rect parameters.
   3186            let surface_rect = surface_size.into();
   3187 
   3188            // Bind the native compositor surface to update
   3189            let surface_info = self.compositor_config
   3190                .compositor()
   3191                .unwrap()
   3192                .bind(
   3193                    &mut self.device,
   3194                    NativeTileId {
   3195                        surface_id: native_surface_id,
   3196                        x: 0,
   3197                        y: 0,
   3198                    },
   3199                    surface_rect,
   3200                    surface_rect,
   3201                );
   3202 
   3203            // Bind the native surface to current FBO target
   3204            let draw_target = DrawTarget::NativeSurface {
   3205                offset: surface_info.origin,
   3206                external_fbo_id: surface_info.fbo_id,
   3207                dimensions: surface_size,
   3208            };
   3209            self.device.bind_draw_target(draw_target);
   3210 
   3211            let projection = Transform3D::ortho(
   3212                0.0,
   3213                surface_size.width as f32,
   3214                0.0,
   3215                surface_size.height as f32,
   3216                self.device.ortho_near_plane(),
   3217                self.device.ortho_far_plane(),
   3218            );
   3219 
   3220            let ( textures, instance ) = match surface.color_data {
   3221                ResolvedExternalSurfaceColorData::Yuv{
   3222                        ref planes, color_space, format, channel_bit_depth, .. } => {
   3223 
   3224                    let textures = BatchTextures::composite_yuv(
   3225                        planes[0].texture,
   3226                        planes[1].texture,
   3227                        planes[2].texture,
   3228                    );
   3229 
   3230                    // When the texture is an external texture, the UV rect is not known when
   3231                    // the external surface descriptor is created, because external textures
   3232                    // are not resolved until the lock() callback is invoked at the start of
   3233                    // the frame render. To handle this, query the texture resolver for the
   3234                    // UV rect if it's an external texture, otherwise use the default UV rect.
   3235                    let uv_rects = [
   3236                        self.texture_resolver.get_uv_rect(&textures.input.colors[0], planes[0].uv_rect),
   3237                        self.texture_resolver.get_uv_rect(&textures.input.colors[1], planes[1].uv_rect),
   3238                        self.texture_resolver.get_uv_rect(&textures.input.colors[2], planes[2].uv_rect),
   3239                    ];
   3240 
   3241                    let instance = CompositeInstance::new_yuv(
   3242                        surface_rect.to_f32(),
   3243                        surface_rect.to_f32(),
   3244                        // z-id is not relevant when updating a native compositor surface.
   3245                        // TODO(gw): Support compositor surfaces without z-buffer, for memory / perf win here.
   3246                        color_space,
   3247                        format,
   3248                        channel_bit_depth,
   3249                        uv_rects,
   3250                        (false, false),
   3251                        None,
   3252                    );
   3253 
   3254                    // Bind an appropriate YUV shader for the texture format kind
   3255                    self.shaders
   3256                        .borrow_mut()
   3257                        .get_composite_shader(
   3258                            CompositeSurfaceFormat::Yuv,
   3259                            surface.image_buffer_kind,
   3260                            instance.get_yuv_features(),
   3261                        ).bind(
   3262                            &mut self.device,
   3263                            &projection,
   3264                            None,
   3265                            &mut self.renderer_errors,
   3266                            &mut self.profile,
   3267                            &mut self.command_log,
   3268                        );
   3269 
   3270                    ( textures, instance )
   3271                },
   3272                ResolvedExternalSurfaceColorData::Rgb{ ref plane, .. } => {
   3273                    let textures = BatchTextures::composite_rgb(plane.texture);
   3274                    let uv_rect = self.texture_resolver.get_uv_rect(&textures.input.colors[0], plane.uv_rect);
   3275                    let instance = CompositeInstance::new_rgb(
   3276                        surface_rect.to_f32(),
   3277                        surface_rect.to_f32(),
   3278                        PremultipliedColorF::WHITE,
   3279                        uv_rect,
   3280                        plane.texture.uses_normalized_uvs(),
   3281                        (false, false),
   3282                        None,
   3283                    );
   3284                    let features = instance.get_rgb_features();
   3285 
   3286                    self.shaders
   3287                        .borrow_mut()
   3288                        .get_composite_shader(
   3289                            CompositeSurfaceFormat::Rgba,
   3290                            surface.image_buffer_kind,
   3291                            features,
   3292                        ).bind(
   3293                            &mut self.device,
   3294                            &projection,
   3295                            None,
   3296                            &mut self.renderer_errors,
   3297                            &mut self.profile,
   3298                            &mut self.command_log,
   3299                        );
   3300 
   3301                    ( textures, instance )
   3302                },
   3303            };
   3304 
   3305            self.draw_instanced_batch(
   3306                &[instance],
   3307                VertexArrayKind::Composite,
   3308                &textures,
   3309                &mut results.stats,
   3310            );
   3311 
   3312            self.compositor_config
   3313                .compositor()
   3314                .unwrap()
   3315                .unbind(&mut self.device);
   3316        }
   3317 
   3318        self.gpu_profiler.finish_sampler(opaque_sampler);
   3319    }
   3320 
   3321    /// Draw a list of tiles to the framebuffer
   3322    fn draw_tile_list<'a, I: Iterator<Item = &'a occlusion::Item<OcclusionItemKey>>>(
   3323        &mut self,
   3324        tiles_iter: I,
   3325        composite_state: &CompositeState,
   3326        external_surfaces: &[ResolvedExternalSurface],
   3327        projection: &default::Transform3D<f32>,
   3328        stats: &mut RendererStats,
   3329    ) {
   3330        let mut current_shader_params = (
   3331            CompositeSurfaceFormat::Rgba,
   3332            ImageBufferKind::Texture2D,
   3333            CompositeFeatures::empty(),
   3334            None,
   3335        );
   3336        let mut current_textures = BatchTextures::empty();
   3337        let mut instances = Vec::new();
   3338 
   3339        self.shaders
   3340            .borrow_mut()
   3341            .get_composite_shader(
   3342                current_shader_params.0,
   3343                current_shader_params.1,
   3344                current_shader_params.2,
   3345            ).bind(
   3346                &mut self.device,
   3347                projection,
   3348                None,
   3349                &mut self.renderer_errors,
   3350                &mut self.profile,
   3351                &mut self.command_log,
   3352            );
   3353 
   3354        for item in tiles_iter {
   3355            let tile = &composite_state.tiles[item.key.tile_index];
   3356 
   3357            let clip_rect = item.rectangle;
   3358            let tile_rect = composite_state.get_device_rect(&tile.local_rect, tile.transform_index);
   3359            let transform = composite_state.get_device_transform(tile.transform_index);
   3360            let flip = (transform.scale.x < 0.0, transform.scale.y < 0.0);
   3361 
   3362            let clip = if item.key.needs_mask {
   3363                tile.clip_index.map(|index| {
   3364                    composite_state.get_compositor_clip(index)
   3365                })
   3366            } else {
   3367                None
   3368            };
   3369 
   3370            // Work out the draw params based on the tile surface
   3371            let (instance, textures, shader_params) = match tile.surface {
   3372                CompositeTileSurface::Color { color } => {
   3373                    let dummy = TextureSource::Dummy;
   3374                    let image_buffer_kind = dummy.image_buffer_kind();
   3375                    let instance = CompositeInstance::new(
   3376                        tile_rect,
   3377                        clip_rect,
   3378                        color.premultiplied(),
   3379                        flip,
   3380                        clip,
   3381                    );
   3382                    let features = instance.get_rgb_features();
   3383                    (
   3384                        instance,
   3385                        BatchTextures::composite_rgb(dummy),
   3386                        (CompositeSurfaceFormat::Rgba, image_buffer_kind, features, None),
   3387                    )
   3388                }
   3389                CompositeTileSurface::Texture { surface: ResolvedSurfaceTexture::TextureCache { texture } } => {
   3390                    let instance = CompositeInstance::new(
   3391                        tile_rect,
   3392                        clip_rect,
   3393                        PremultipliedColorF::WHITE,
   3394                        flip,
   3395                        clip,
   3396                    );
   3397                    let features = instance.get_rgb_features();
   3398                    (
   3399                        instance,
   3400                        BatchTextures::composite_rgb(texture),
   3401                        (
   3402                            CompositeSurfaceFormat::Rgba,
   3403                            ImageBufferKind::Texture2D,
   3404                            features,
   3405                            None,
   3406                        ),
   3407                    )
   3408                }
   3409                CompositeTileSurface::ExternalSurface { external_surface_index } => {
   3410                    let surface = &external_surfaces[external_surface_index.0];
   3411 
   3412                    match surface.color_data {
   3413                        ResolvedExternalSurfaceColorData::Yuv{ ref planes, color_space, format, channel_bit_depth, .. } => {
   3414                            let textures = BatchTextures::composite_yuv(
   3415                                planes[0].texture,
   3416                                planes[1].texture,
   3417                                planes[2].texture,
   3418                            );
   3419 
   3420                            // When the texture is an external texture, the UV rect is not known when
   3421                            // the external surface descriptor is created, because external textures
   3422                            // are not resolved until the lock() callback is invoked at the start of
   3423                            // the frame render. To handle this, query the texture resolver for the
   3424                            // UV rect if it's an external texture, otherwise use the default UV rect.
   3425                            let uv_rects = [
   3426                                self.texture_resolver.get_uv_rect(&textures.input.colors[0], planes[0].uv_rect),
   3427                                self.texture_resolver.get_uv_rect(&textures.input.colors[1], planes[1].uv_rect),
   3428                                self.texture_resolver.get_uv_rect(&textures.input.colors[2], planes[2].uv_rect),
   3429                            ];
   3430 
   3431                            let instance = CompositeInstance::new_yuv(
   3432                                tile_rect,
   3433                                clip_rect,
   3434                                color_space,
   3435                                format,
   3436                                channel_bit_depth,
   3437                                uv_rects,
   3438                                flip,
   3439                                clip,
   3440                            );
   3441                            let features = instance.get_yuv_features();
   3442 
   3443                            (
   3444                                instance,
   3445                                textures,
   3446                                (
   3447                                    CompositeSurfaceFormat::Yuv,
   3448                                    surface.image_buffer_kind,
   3449                                    features,
   3450                                    None
   3451                                ),
   3452                            )
   3453                        },
   3454                        ResolvedExternalSurfaceColorData::Rgb { ref plane, .. } => {
   3455                            let uv_rect = self.texture_resolver.get_uv_rect(&plane.texture, plane.uv_rect);
   3456                            let instance = CompositeInstance::new_rgb(
   3457                                tile_rect,
   3458                                clip_rect,
   3459                                PremultipliedColorF::WHITE,
   3460                                uv_rect,
   3461                                plane.texture.uses_normalized_uvs(),
   3462                                flip,
   3463                                clip,
   3464                            );
   3465                            let features = instance.get_rgb_features();
   3466                            (
   3467                                instance,
   3468                                BatchTextures::composite_rgb(plane.texture),
   3469                                (
   3470                                    CompositeSurfaceFormat::Rgba,
   3471                                    surface.image_buffer_kind,
   3472                                    features,
   3473                                    Some(self.texture_resolver.get_texture_size(&plane.texture).to_f32()),
   3474                                ),
   3475                            )
   3476                        },
   3477                    }
   3478                }
   3479                CompositeTileSurface::Texture { surface: ResolvedSurfaceTexture::Native { .. } } => {
   3480                    unreachable!("bug: found native surface in simple composite path");
   3481                }
   3482            };
   3483 
   3484            // Flush batch if shader params or textures changed
   3485            let flush_batch = !current_textures.is_compatible_with(&textures) ||
   3486                shader_params != current_shader_params;
   3487 
   3488            if flush_batch {
   3489                if !instances.is_empty() {
   3490                    self.draw_instanced_batch(
   3491                        &instances,
   3492                        VertexArrayKind::Composite,
   3493                        &current_textures,
   3494                        stats,
   3495                    );
   3496                    instances.clear();
   3497                }
   3498            }
   3499 
   3500            if shader_params != current_shader_params {
   3501                self.shaders
   3502                    .borrow_mut()
   3503                    .get_composite_shader(shader_params.0, shader_params.1, shader_params.2)
   3504                    .bind(
   3505                        &mut self.device,
   3506                        projection,
   3507                        shader_params.3,
   3508                        &mut self.renderer_errors,
   3509                        &mut self.profile,
   3510                        &mut self.command_log,
   3511                    );
   3512 
   3513                current_shader_params = shader_params;
   3514            }
   3515 
   3516            current_textures = textures;
   3517 
   3518            // Add instance to current batch
   3519            instances.push(instance);
   3520        }
   3521 
   3522        // Flush the last batch
   3523        if !instances.is_empty() {
   3524            self.draw_instanced_batch(
   3525                &instances,
   3526                VertexArrayKind::Composite,
   3527                &current_textures,
   3528                stats,
   3529            );
   3530        }
   3531    }
   3532 
   3533    // Composite tiles in a swapchain. When using LayerCompositor, we may
   3534    // split the compositing in to multiple swapchains.
   3535    fn composite_pass(
   3536        &mut self,
   3537        composite_state: &CompositeState,
   3538        draw_target: DrawTarget,
   3539        clear_color: ColorF,
   3540        projection: &default::Transform3D<f32>,
   3541        results: &mut RenderResults,
   3542        partial_present_mode: Option<PartialPresentMode>,
   3543        layer: &SwapChainLayer,
   3544    ) {
   3545        self.device.bind_draw_target(draw_target);
   3546        self.device.disable_depth_write();
   3547        self.device.disable_depth();
   3548 
   3549        // If using KHR_partial_update, call eglSetDamageRegion.
   3550        // This must be called exactly once per frame, and prior to any rendering to the main
   3551        // framebuffer. Additionally, on Mali-G77 we encountered rendering issues when calling
   3552        // this earlier in the frame, during offscreen render passes. So call it now, immediately
   3553        // before rendering to the main framebuffer. See bug 1685276 for details.
   3554        if let Some(partial_present) = self.compositor_config.partial_present() {
   3555            if let Some(PartialPresentMode::Single { dirty_rect }) = partial_present_mode {
   3556                partial_present.set_buffer_damage_region(&[dirty_rect.to_i32()]);
   3557            }
   3558        }
   3559 
   3560        // Clear the framebuffer
   3561        let clear_color = Some(clear_color.to_array());
   3562 
   3563        match partial_present_mode {
   3564            Some(PartialPresentMode::Single { dirty_rect }) => {
   3565                // There is no need to clear if the dirty rect is occluded. Additionally,
   3566                // on Mali-G77 we have observed artefacts when calling glClear (even with
   3567                // the empty scissor rect set) after calling eglSetDamageRegion with an
   3568                // empty damage region. So avoid clearing in that case. See bug 1709548.
   3569                if !dirty_rect.is_empty() && layer.occlusion.test(&dirty_rect) {
   3570                    // We have a single dirty rect, so clear only that
   3571                    self.device.clear_target(clear_color,
   3572                                             None,
   3573                                             Some(draw_target.to_framebuffer_rect(dirty_rect.to_i32())));
   3574                }
   3575            }
   3576            None => {
   3577                // Partial present is disabled, so clear the entire framebuffer
   3578                self.device.clear_target(clear_color,
   3579                                         None,
   3580                                         None);
   3581            }
   3582        }
   3583 
   3584        // Draw opaque tiles
   3585        let opaque_items = layer.occlusion.opaque_items();
   3586        if !opaque_items.is_empty() {
   3587            let opaque_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_OPAQUE);
   3588            self.set_blend(false, FramebufferKind::Main);
   3589            self.draw_tile_list(
   3590                opaque_items.iter(),
   3591                &composite_state,
   3592                &composite_state.external_surfaces,
   3593                projection,
   3594                &mut results.stats,
   3595            );
   3596            self.gpu_profiler.finish_sampler(opaque_sampler);
   3597        }
   3598 
   3599        // Draw alpha tiles
   3600        let alpha_items = layer.occlusion.alpha_items();
   3601        if !alpha_items.is_empty() {
   3602            let transparent_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_TRANSPARENT);
   3603            self.set_blend(true, FramebufferKind::Main);
   3604            self.set_blend_mode_premultiplied_alpha(FramebufferKind::Main);
   3605            self.draw_tile_list(
   3606                alpha_items.iter().rev(),
   3607                &composite_state,
   3608                &composite_state.external_surfaces,
   3609                projection,
   3610                &mut results.stats,
   3611            );
   3612            self.gpu_profiler.finish_sampler(transparent_sampler);
   3613        }
   3614    }
   3615 
   3616    /// Composite picture cache tiles into the framebuffer. This is currently
   3617    /// the only way that picture cache tiles get drawn. In future, the tiles
   3618    /// will often be handed to the OS compositor, and this method will be
   3619    /// rarely used.
   3620    fn composite_simple(
   3621        &mut self,
   3622        composite_state: &CompositeState,
   3623        frame_device_size: DeviceIntSize,
   3624        fb_draw_target: DrawTarget,
   3625        projection: &default::Transform3D<f32>,
   3626        results: &mut RenderResults,
   3627        partial_present_mode: Option<PartialPresentMode>,
   3628        device_size: DeviceIntSize,
   3629    ) {
   3630        let _gm = self.gpu_profiler.start_marker("framebuffer");
   3631        let _timer = self.gpu_profiler.start_timer(GPU_TAG_COMPOSITE);
   3632 
   3633        let num_tiles = composite_state.tiles.len();
   3634        self.profile.set(profiler::PICTURE_TILES, num_tiles);
   3635 
   3636        let (window_is_opaque, enable_screenshot)  = match self.compositor_config.layer_compositor() {
   3637            Some(ref compositor) => {
   3638                let props = compositor.get_window_properties();
   3639                (props.is_opaque, props.enable_screenshot)
   3640            }
   3641            None => (true, true)
   3642        };
   3643 
   3644        let mut input_layers: Vec<CompositorInputLayer> = Vec::new();
   3645        let mut swapchain_layers = Vec::new();
   3646        let cap = composite_state.tiles.len();
   3647        let mut segment_builder = SegmentBuilder::new();
   3648        let mut tile_index_to_layer_index = vec![None; composite_state.tiles.len()];
   3649        let mut full_render_occlusion = occlusion::FrontToBackBuilder::with_capacity(cap, cap);
   3650        let mut layer_compositor_frame_state = LayerCompositorFrameState{
   3651            tile_states: FastHashMap::default(),
   3652            rects_without_id: Vec::new(),
   3653        };
   3654 
   3655        // Calculate layers with full device rect
   3656 
   3657        // Add a debug overlay request if enabled
   3658        if self.debug_overlay_state.is_enabled {
   3659            self.debug_overlay_state.layer_index = input_layers.len();
   3660 
   3661            input_layers.push(CompositorInputLayer {
   3662                usage: CompositorSurfaceUsage::DebugOverlay,
   3663                is_opaque: false,
   3664                offset: DeviceIntPoint::zero(),
   3665                clip_rect: device_size.into(),
   3666                rounded_clip_rect: device_size.into(),
   3667                rounded_clip_radii: ClipRadius::EMPTY,                
   3668            });
   3669 
   3670            swapchain_layers.push(SwapChainLayer {
   3671                occlusion: occlusion::FrontToBackBuilder::with_capacity(cap, cap),
   3672            });
   3673        }
   3674 
   3675        // NOTE: Tiles here are being iterated in front-to-back order by
   3676        //       z-id, due to the sort in composite_state.end_frame()
   3677        for (idx, tile) in composite_state.tiles.iter().enumerate() {
   3678            let device_tile_box = composite_state.get_device_rect(
   3679                &tile.local_rect,
   3680                tile.transform_index
   3681            );
   3682 
   3683            if let Some(ref _compositor) = self.compositor_config.layer_compositor() {
   3684                match tile.tile_id {
   3685                    Some(tile_id) => {
   3686                        layer_compositor_frame_state.
   3687                            tile_states
   3688                            .insert(
   3689                            tile_id,
   3690                            CompositeTileState {
   3691                                local_rect: tile.local_rect,
   3692                                local_valid_rect: tile.local_valid_rect,
   3693                                device_clip_rect: tile.device_clip_rect,
   3694                                z_id: tile.z_id,
   3695                                device_tile_box: device_tile_box,
   3696                                visible_rects: Vec::new(),
   3697                            },
   3698                        );
   3699                    }
   3700                    None => {}
   3701                }
   3702            }
   3703 
   3704            // Simple compositor needs the valid rect in device space to match clip rect
   3705            let device_valid_rect = composite_state
   3706                .get_device_rect(&tile.local_valid_rect, tile.transform_index);
   3707 
   3708            let rect = device_tile_box
   3709                .intersection_unchecked(&tile.device_clip_rect)
   3710                .intersection_unchecked(&device_valid_rect);
   3711 
   3712            if rect.is_empty() {
   3713                continue;
   3714            }
   3715 
   3716            // Determine if the tile is an external surface or content
   3717            let usage = match tile.surface {
   3718                CompositeTileSurface::Texture { .. } |
   3719                CompositeTileSurface::Color { .. } => {
   3720                    CompositorSurfaceUsage::Content
   3721                }
   3722                CompositeTileSurface::ExternalSurface { external_surface_index } => {
   3723                    match (self.current_compositor_kind, enable_screenshot) {
   3724                        (CompositorKind::Native { .. }, _) | (CompositorKind::Draw { .. }, _) => {
   3725                            CompositorSurfaceUsage::Content
   3726                        }
   3727                        (CompositorKind::Layer { .. }, true) => {
   3728                            CompositorSurfaceUsage::Content
   3729                        }
   3730                        (CompositorKind::Layer { .. }, false) => {
   3731                            let surface = &composite_state.external_surfaces[external_surface_index.0];
   3732 
   3733                            // TODO(gwc): For now, we only select a hardware overlay swapchain if we
   3734                            // have an external image, but it may make sense to do for compositor
   3735                            // surfaces without in future.
   3736                            match surface.external_image_id {
   3737                                Some(external_image_id) => {
   3738                                    let image_key = match surface.color_data {
   3739                                        ResolvedExternalSurfaceColorData::Rgb { image_dependency, .. } => image_dependency.key,
   3740                                        ResolvedExternalSurfaceColorData::Yuv { image_dependencies, .. } => image_dependencies[0].key,
   3741                                    };
   3742 
   3743                                    CompositorSurfaceUsage::External {
   3744                                        image_key,
   3745                                        external_image_id,
   3746                                        transform_index: tile.transform_index,
   3747                                    }
   3748                                }
   3749                                None => {
   3750                                    CompositorSurfaceUsage::Content
   3751                                }
   3752                            }
   3753                        }
   3754                    }
   3755                }
   3756            };
   3757 
   3758            if let Some(ref _compositor) = self.compositor_config.layer_compositor() {
   3759                if let CompositeTileSurface::ExternalSurface { .. } = tile.surface {
   3760                    assert!(tile.tile_id.is_none());
   3761                    // ExternalSurface is not promoted to external composite.
   3762                    if let CompositorSurfaceUsage::Content = usage {
   3763                        layer_compositor_frame_state.rects_without_id.push(rect);
   3764                    }
   3765                } else {
   3766                    assert!(tile.tile_id.is_some());
   3767                }
   3768            }
   3769 
   3770            // Determine whether we need a new layer, and if so, what kind
   3771            let new_layer_kind = match input_layers.last() {
   3772                Some(curr_layer) => {
   3773                    match (curr_layer.usage, usage) {
   3774                        // Content -> content, composite in to same layer
   3775                        (CompositorSurfaceUsage::Content, CompositorSurfaceUsage::Content) => None,
   3776                        (CompositorSurfaceUsage::External { .. }, CompositorSurfaceUsage::Content) => Some(usage),
   3777 
   3778                        // Switch of layer type, or video -> video, need new swapchain
   3779                        (CompositorSurfaceUsage::Content, CompositorSurfaceUsage::External { .. }) |
   3780                        (CompositorSurfaceUsage::External { .. }, CompositorSurfaceUsage::External { .. }) => {
   3781                            // Only create a new layer if we're using LayerCompositor
   3782                            match self.compositor_config {
   3783                                CompositorConfig::Draw { .. } | CompositorConfig::Native { .. } => None,
   3784                                CompositorConfig::Layer { .. } => {
   3785                                    Some(usage)
   3786                                }
   3787                            }
   3788                        }
   3789                        (CompositorSurfaceUsage::DebugOverlay, _) => {
   3790                            Some(usage)
   3791                        }
   3792                        // Should not encounter debug layers as new layer
   3793                        (_, CompositorSurfaceUsage::DebugOverlay) => {
   3794                            unreachable!();
   3795                        }
   3796                    }
   3797                }
   3798                None => {
   3799                    // No layers yet, so we need a new one
   3800                    Some(usage)
   3801                }
   3802            };
   3803 
   3804            if let Some(new_layer_kind) = new_layer_kind {
   3805                let (offset, clip_rect, is_opaque, rounded_clip_rect, rounded_clip_radii) = match usage {
   3806                    CompositorSurfaceUsage::Content => {
   3807                        (
   3808                            DeviceIntPoint::zero(),
   3809                            device_size.into(),
   3810                            false,      // Assume not opaque, we'll calculate this later
   3811                            device_size.into(),
   3812                            ClipRadius::EMPTY,
   3813                        )
   3814                    }
   3815                    CompositorSurfaceUsage::External { .. } => {
   3816                        let rect = composite_state.get_device_rect(
   3817                            &tile.local_rect,
   3818                            tile.transform_index
   3819                        );
   3820 
   3821                        let clip_rect = tile.device_clip_rect.to_i32();
   3822                        let is_opaque = tile.kind != TileKind::Alpha;
   3823 
   3824                        if self.debug_flags.contains(DebugFlags::EXTERNAL_COMPOSITE_BORDERS) {
   3825                            self.external_composite_debug_items.push(DebugItem::Rect {
   3826                                outer_color: debug_colors::ORANGERED,
   3827                                inner_color: ColorF { r: 0.0, g: 0.0, b: 0.0, a: 0.0 },
   3828                                rect: tile.device_clip_rect,
   3829                                thickness: 10,
   3830                            });
   3831                        }
   3832 
   3833                        let (rounded_clip_rect, rounded_clip_radii) = match tile.clip_index {
   3834                            Some(clip_index) => {
   3835                                let clip = composite_state.get_compositor_clip(clip_index);
   3836                                let radius = ClipRadius {
   3837                                    top_left: clip.radius.top_left.width.round() as i32,
   3838                                    top_right: clip.radius.top_right.width.round() as i32,
   3839                                    bottom_left: clip.radius.bottom_left.width.round() as i32,
   3840                                    bottom_right: clip.radius.bottom_right.width.round() as i32,
   3841                                };
   3842                                (clip.rect.to_i32(), radius)
   3843                            }
   3844                            None => {
   3845                                (clip_rect, ClipRadius::EMPTY)
   3846                            }
   3847                        };
   3848 
   3849                        (
   3850                            rect.min.to_i32(),
   3851                            clip_rect,
   3852                            is_opaque,
   3853                            rounded_clip_rect,
   3854                            rounded_clip_radii,
   3855                        )
   3856                    }
   3857                    CompositorSurfaceUsage::DebugOverlay => unreachable!(),
   3858                };
   3859 
   3860                input_layers.push(CompositorInputLayer {
   3861                    usage: new_layer_kind,
   3862                    is_opaque,
   3863                    offset,
   3864                    clip_rect,
   3865                    rounded_clip_rect,
   3866                    rounded_clip_radii,
   3867                });
   3868 
   3869                swapchain_layers.push(SwapChainLayer {
   3870                    occlusion: occlusion::FrontToBackBuilder::with_capacity(cap, cap),
   3871                })
   3872            }
   3873            tile_index_to_layer_index[idx] = Some(input_layers.len() - 1);
   3874 
   3875            // Caluclate actual visible tile's rects
   3876 
   3877            let is_opaque = tile.kind == TileKind::Opaque;
   3878 
   3879            match tile.clip_index {
   3880                Some(clip_index) => {
   3881                    let clip = composite_state.get_compositor_clip(clip_index);
   3882 
   3883                    // TODO(gw): Make segment builder generic on unit to avoid casts below.
   3884                    segment_builder.initialize(
   3885                        rect.cast_unit(),
   3886                        None,
   3887                        rect.cast_unit(),
   3888                    );
   3889                    segment_builder.push_clip_rect(
   3890                        clip.rect.cast_unit(),
   3891                        Some(clip.radius),
   3892                        ClipMode::Clip,
   3893                    );
   3894                    segment_builder.build(|segment| {
   3895                        let key = OcclusionItemKey { tile_index: idx, needs_mask: segment.has_mask };
   3896 
   3897                        full_render_occlusion.add(
   3898                            &segment.rect.cast_unit(),
   3899                            is_opaque && !segment.has_mask,
   3900                            key,
   3901                        );
   3902                    });
   3903                }
   3904                None => {
   3905                    full_render_occlusion.add(&rect, is_opaque, OcclusionItemKey {
   3906                        tile_index: idx,
   3907                        needs_mask: false,
   3908                    });
   3909                }
   3910            }
   3911        }
   3912 
   3913        assert_eq!(swapchain_layers.len(), input_layers.len());
   3914 
   3915        if window_is_opaque {
   3916            match input_layers.last_mut() {
   3917                Some(_layer) => {
   3918                    // If the window is opaque, and the last(back) layer is
   3919                    //  a content layer then mark that as opaque.
   3920                    // TODO: This causes talos performance regressions.
   3921                    // if let CompositorSurfaceUsage::Content = layer.usage {
   3922                    //     layer.is_opaque = true;
   3923                    // }
   3924                }
   3925                None => {
   3926                    // If no tiles were present, and we expect an opaque window,
   3927                    // add an empty layer to force a composite that clears the screen,
   3928                    // to match existing semantics.
   3929                    input_layers.push(CompositorInputLayer {
   3930                        usage: CompositorSurfaceUsage::Content,
   3931                        is_opaque: true,
   3932                        offset: DeviceIntPoint::zero(),
   3933                        clip_rect: device_size.into(),
   3934                        rounded_clip_rect: device_size.into(),
   3935                        rounded_clip_radii: ClipRadius::EMPTY,
   3936                    });
   3937 
   3938                    swapchain_layers.push(SwapChainLayer {
   3939                        occlusion: occlusion::FrontToBackBuilder::with_capacity(cap, cap),
   3940                    });
   3941                }
   3942            }
   3943        }
   3944 
   3945        let mut full_render = self.debug_overlay_state.is_enabled;
   3946 
   3947        // Start compositing if using OS compositor
   3948        if let Some(ref mut compositor) = self.compositor_config.layer_compositor() {
   3949            let input = CompositorInputConfig {
   3950                enable_screenshot,
   3951                layers: &input_layers,
   3952            };
   3953            full_render |= compositor.begin_frame(&input);
   3954        }
   3955 
   3956        // Full render is requested when layer tree is updated.
   3957        let mut partial_present_mode = if full_render {
   3958            None
   3959        } else {
   3960            partial_present_mode
   3961        };
   3962 
   3963        assert_eq!(swapchain_layers.len(), input_layers.len());
   3964 
   3965        // Recalculate dirty rect for layer compositor
   3966        if let Some(ref _compositor) = self.compositor_config.layer_compositor() {
   3967            // Set visible rests of current frame to each tile's CompositeTileState.
   3968            for item in full_render_occlusion
   3969            .opaque_items()
   3970            .iter()
   3971            .chain(full_render_occlusion.alpha_items().iter()) {
   3972                let tile = &composite_state.tiles[item.key.tile_index];
   3973                match tile.tile_id {
   3974                    Some(tile_id) => {
   3975                        if let Some(tile_state) = layer_compositor_frame_state.tile_states.get_mut(&tile_id) {
   3976                            tile_state.visible_rects.push(item.rectangle);
   3977                        } else {
   3978                            unreachable!();
   3979                        }
   3980                    }
   3981                    None => {}
   3982                }
   3983            }
   3984 
   3985            let can_use_partial_present =
   3986                !self.force_redraw && !full_render &&
   3987                self.layer_compositor_frame_state_in_prev_frame.is_some();
   3988 
   3989            if can_use_partial_present {
   3990                let mut combined_dirty_rect = DeviceRect::zero();
   3991 
   3992                for tile in composite_state.tiles.iter() {
   3993                    if tile.tile_id.is_none() {
   3994                        match tile.surface {
   3995                            CompositeTileSurface::ExternalSurface { .. } => {}
   3996                            CompositeTileSurface::Texture { .. }  |
   3997                            CompositeTileSurface::Color { .. } => {
   3998                                unreachable!();
   3999                            },
   4000                        }
   4001                        continue;
   4002                    }
   4003 
   4004                    assert!(tile.tile_id.is_some());
   4005 
   4006                    let tiles_exists_in_prev_frame =
   4007                        self.layer_compositor_frame_state_in_prev_frame
   4008                        .as_ref()
   4009                        .unwrap()
   4010                        .tile_states
   4011                        .contains_key(&tile.tile_id.unwrap());
   4012                    let tile_id = tile.tile_id.unwrap();
   4013                    let tile_state = layer_compositor_frame_state.tile_states.get(&tile_id).unwrap();
   4014 
   4015                    if tiles_exists_in_prev_frame {
   4016                        let prev_tile_state = self.layer_compositor_frame_state_in_prev_frame
   4017                            .as_ref()
   4018                            .unwrap()
   4019                            .tile_states
   4020                            .get(&tile_id)
   4021                            .unwrap();
   4022 
   4023                        if tile_state.same_state(prev_tile_state) {
   4024                            // Case that tile is same state in previous frame and current frame.
   4025                            // Intersection of tile's dirty rect and tile's visible rects are actual dirty rects.
   4026                            let dirty_rect = composite_state.get_device_rect(
   4027                                &tile.local_dirty_rect,
   4028                                tile.transform_index,
   4029                            );
   4030                            for rect in tile_state.visible_rects.iter()  {
   4031                                let visible_dirty_rect = rect.intersection(&dirty_rect);
   4032                                if visible_dirty_rect.is_some() {
   4033                                    combined_dirty_rect = combined_dirty_rect.union(&visible_dirty_rect.unwrap());
   4034                                }
   4035                            }
   4036                        } else {
   4037                            // If tile is rendered in previous frame, but its state is different,
   4038                            // both visible rects in previous frame and current frame are dirty rects.
   4039                            for rect in tile_state.visible_rects
   4040                                .iter()
   4041                                .chain(prev_tile_state.visible_rects.iter())  {
   4042                                combined_dirty_rect = combined_dirty_rect.union(&rect);
   4043                            }
   4044                        }
   4045                    } else {
   4046                        // If tile is not rendered in previous frame, its all visible rects are dirty rects.
   4047                        for rect in &tile_state.visible_rects {
   4048                            combined_dirty_rect = combined_dirty_rect.union(&rect);
   4049                        }
   4050                    }
   4051                }
   4052 
   4053                // Case that tile is rendered in pervious frame, but not in current frame.
   4054                for (tile_id, tile_state) in self.layer_compositor_frame_state_in_prev_frame
   4055                    .as_ref()
   4056                    .unwrap()
   4057                    .tile_states
   4058                    .iter() {
   4059                    if !layer_compositor_frame_state.tile_states.contains_key(&tile_id) {
   4060                        for rect in tile_state.visible_rects.iter()  {
   4061                            combined_dirty_rect = combined_dirty_rect.union(&rect);
   4062                        }
   4063                    }
   4064                }
   4065 
   4066                // Case that ExternalSurface is not promoted to external composite.
   4067                for rect in layer_compositor_frame_state
   4068                    .rects_without_id
   4069                    .iter()
   4070                    .chain(self.layer_compositor_frame_state_in_prev_frame.as_ref().unwrap().rects_without_id.iter())  {
   4071                    combined_dirty_rect = combined_dirty_rect.union(&rect);
   4072                }
   4073 
   4074                partial_present_mode = Some(PartialPresentMode::Single {
   4075                    dirty_rect: combined_dirty_rect,
   4076                });
   4077            } else {
   4078                partial_present_mode = None;
   4079            }
   4080 
   4081            self.layer_compositor_frame_state_in_prev_frame = Some(layer_compositor_frame_state);
   4082        }
   4083 
   4084        // Check tiles handling with partial_present_mode
   4085 
   4086        let mut opaque_rounded_corners: HashSet<CompositeRoundedCorner> = HashSet::new();
   4087 
   4088        // NOTE: Tiles here are being iterated in front-to-back order by
   4089        //       z-id, due to the sort in composite_state.end_frame()
   4090        for (idx, tile) in composite_state.tiles.iter().enumerate() {
   4091            let device_tile_box = composite_state.get_device_rect(
   4092                &tile.local_rect,
   4093                tile.transform_index
   4094            );
   4095 
   4096            // Determine a clip rect to apply to this tile, depending on what
   4097            // the partial present mode is.
   4098            let partial_clip_rect = match partial_present_mode {
   4099                Some(PartialPresentMode::Single { dirty_rect }) => dirty_rect,
   4100                None => device_tile_box,
   4101            };
   4102 
   4103            // Simple compositor needs the valid rect in device space to match clip rect
   4104            let device_valid_rect = composite_state
   4105                .get_device_rect(&tile.local_valid_rect, tile.transform_index);
   4106 
   4107            let rect = device_tile_box
   4108                .intersection_unchecked(&tile.device_clip_rect)
   4109                .intersection_unchecked(&partial_clip_rect)
   4110                .intersection_unchecked(&device_valid_rect);
   4111 
   4112            if rect.is_empty() {
   4113                continue;
   4114            }
   4115 
   4116            let layer_index = match tile_index_to_layer_index[idx] {
   4117                None => {
   4118                    // The rect of partial present should be subset of the rect of full render.
   4119                    error!("rect {:?} should have valid layer index", rect);
   4120                    continue;
   4121                }
   4122                Some(layer_index) => layer_index,
   4123            };
   4124 
   4125            // For normal tiles, add to occlusion tracker
   4126            let layer = &mut swapchain_layers[layer_index];
   4127 
   4128            let is_opaque = tile.kind == TileKind::Opaque;
   4129 
   4130            match tile.clip_index {
   4131                Some(clip_index) => {
   4132                    let clip = composite_state.get_compositor_clip(clip_index);
   4133 
   4134                        // TODO(gw): Make segment builder generic on unit to avoid casts below.
   4135                    segment_builder.initialize(
   4136                        rect.cast_unit(),
   4137                        None,
   4138                        rect.cast_unit(),
   4139                    );
   4140                    segment_builder.push_clip_rect(
   4141                        clip.rect.cast_unit(),
   4142                        Some(clip.radius),
   4143                        ClipMode::Clip,
   4144                    );
   4145                    segment_builder.build(|segment| {
   4146                        let key = OcclusionItemKey { tile_index: idx, needs_mask: segment.has_mask };
   4147 
   4148                        let radius = if segment.edge_flags ==
   4149                            EdgeAaSegmentMask::TOP | EdgeAaSegmentMask::LEFT &&
   4150                            !clip.radius.top_left.is_empty() {
   4151                            Some(clip.radius.top_left)
   4152                        } else if segment.edge_flags ==
   4153                            EdgeAaSegmentMask::TOP | EdgeAaSegmentMask::RIGHT &&
   4154                            !clip.radius.top_right.is_empty() {
   4155                            Some(clip.radius.top_right)
   4156                        } else if segment.edge_flags ==
   4157                            EdgeAaSegmentMask::BOTTOM | EdgeAaSegmentMask::LEFT &&
   4158                            !clip.radius.bottom_left.is_empty() {
   4159                            Some(clip.radius.bottom_left)
   4160                        } else if segment.edge_flags ==
   4161                            EdgeAaSegmentMask::BOTTOM | EdgeAaSegmentMask::RIGHT &&
   4162                            !clip.radius.bottom_right.is_empty() {
   4163                            Some(clip.radius.bottom_right)
   4164                        } else {
   4165                            None
   4166                        };
   4167 
   4168                        if let Some(radius) = radius {
   4169                            let rounded_corner = CompositeRoundedCorner {
   4170                                    rect: segment.rect.cast_unit(),
   4171                                    radius: radius,
   4172                                    edge_flags: segment.edge_flags,
   4173                            };
   4174 
   4175                            // Drop overdraw rounded rect
   4176                            if opaque_rounded_corners.contains(&rounded_corner) {
   4177                                return;
   4178                            }
   4179                            
   4180                            if is_opaque {
   4181                                opaque_rounded_corners.insert(rounded_corner);
   4182                            }
   4183                        }
   4184 
   4185                        layer.occlusion.add(
   4186                            &segment.rect.cast_unit(),
   4187                            is_opaque && !segment.has_mask,
   4188                            key,
   4189                        );
   4190                    });
   4191                }
   4192                None => {
   4193                    layer.occlusion.add(&rect, is_opaque, OcclusionItemKey {
   4194                        tile_index: idx,
   4195                        needs_mask: false,
   4196                    });
   4197                }
   4198            }
   4199        }
   4200 
   4201        assert_eq!(swapchain_layers.len(), input_layers.len());
   4202        let mut content_clear_color = Some(self.clear_color);
   4203 
   4204        for (layer_index, (layer, swapchain_layer)) in input_layers.iter().zip(swapchain_layers.iter()).enumerate() {
   4205            self.device.reset_state();
   4206 
   4207            // Skip compositing external images or debug layers here
   4208            match layer.usage {
   4209                CompositorSurfaceUsage::Content => {}
   4210                CompositorSurfaceUsage::External { .. } | CompositorSurfaceUsage::DebugOverlay => {
   4211                    continue;
   4212                }
   4213            }
   4214 
   4215            // Only use supplied clear color for first content layer we encounter
   4216            let clear_color = content_clear_color.take().unwrap_or(ColorF::TRANSPARENT);
   4217 
   4218            if let Some(ref mut _compositor) = self.compositor_config.layer_compositor() {
   4219                if let Some(PartialPresentMode::Single { dirty_rect }) = partial_present_mode {
   4220                    if dirty_rect.is_empty() {
   4221                        continue;
   4222                    }
   4223                }
   4224            }
   4225 
   4226            let draw_target = match self.compositor_config {
   4227                CompositorConfig::Layer { ref mut compositor } => {
   4228                    match partial_present_mode {
   4229                        Some(PartialPresentMode::Single { dirty_rect }) => {
   4230                            compositor.bind_layer(layer_index, &[dirty_rect.to_i32()]);
   4231                        }
   4232                        None => {
   4233                            compositor.bind_layer(layer_index, &[]);
   4234                        }
   4235                    };
   4236 
   4237                    DrawTarget::NativeSurface {
   4238                        offset: -layer.offset,
   4239                        external_fbo_id: 0,
   4240                        dimensions: frame_device_size,
   4241                    }
   4242                }
   4243                // Native can be hit when switching compositors (disable when using Layer)
   4244                CompositorConfig::Draw { .. } | CompositorConfig::Native { .. } => {
   4245                    fb_draw_target
   4246                }
   4247            };
   4248 
   4249            // TODO(gwc): When supporting external attached swapchains, need to skip the composite pass here
   4250 
   4251            // Draw each compositing pass in to a swap chain
   4252            self.composite_pass(
   4253                composite_state,
   4254                draw_target,
   4255                clear_color,
   4256                projection,
   4257                results,
   4258                partial_present_mode,
   4259                swapchain_layer,
   4260            );
   4261 
   4262            if let Some(ref mut compositor) = self.compositor_config.layer_compositor() {
   4263                match partial_present_mode {
   4264                    Some(PartialPresentMode::Single { dirty_rect }) => {
   4265                        compositor.present_layer(layer_index, &[dirty_rect.to_i32()]);
   4266                    }
   4267                    None => {
   4268                        compositor.present_layer(layer_index, &[]);
   4269                    }
   4270                };
   4271            }
   4272        }
   4273 
   4274        // End frame notify for experimental compositor
   4275        if let Some(ref mut compositor) = self.compositor_config.layer_compositor() {
   4276            for (layer_index, layer) in input_layers.iter().enumerate() {
   4277                // External surfaces need transform applied, but content
   4278                // surfaces are always at identity
   4279                let transform = match layer.usage {
   4280                    CompositorSurfaceUsage::Content => CompositorSurfaceTransform::identity(),
   4281                    CompositorSurfaceUsage::External { transform_index, .. } => composite_state.get_compositor_transform(transform_index),
   4282                    CompositorSurfaceUsage::DebugOverlay => CompositorSurfaceTransform::identity(),
   4283                };
   4284 
   4285                compositor.add_surface(
   4286                    layer_index,
   4287                    transform,
   4288                    layer.clip_rect,
   4289                    ImageRendering::Auto,
   4290                    layer.rounded_clip_rect,
   4291                    layer.rounded_clip_radii,
   4292                );
   4293            }
   4294        }
   4295    }
   4296 
   4297    fn clear_render_target(
   4298        &mut self,
   4299        target: &RenderTarget,
   4300        draw_target: DrawTarget,
   4301        framebuffer_kind: FramebufferKind,
   4302        projection: &default::Transform3D<f32>,
   4303        stats: &mut RendererStats,
   4304    ) {
   4305        let needs_depth = target.needs_depth();
   4306 
   4307        let clear_depth = if needs_depth {
   4308            Some(1.0)
   4309        } else {
   4310            None
   4311        };
   4312 
   4313        let _timer = self.gpu_profiler.start_timer(GPU_TAG_SETUP_TARGET);
   4314 
   4315        self.device.disable_depth();
   4316        self.set_blend(false, framebuffer_kind);
   4317 
   4318        let is_alpha = target.target_kind == RenderTargetKind::Alpha;
   4319        let require_precise_clear = target.cached;
   4320 
   4321        // On some Mali-T devices we have observed crashes in subsequent draw calls
   4322        // immediately after clearing the alpha render target regions with glClear().
   4323        // Using the shader to clear the regions avoids the crash. See bug 1638593.
   4324        let clear_with_quads = (target.cached && self.clear_caches_with_quads)
   4325            || (is_alpha && self.clear_alpha_targets_with_quads);
   4326 
   4327        let favor_partial_updates = self.device.get_capabilities().supports_render_target_partial_update
   4328            && self.enable_clear_scissor;
   4329 
   4330        // On some Adreno 4xx devices we have seen render tasks to alpha targets have no
   4331        // effect unless the target is fully cleared prior to rendering. See bug 1714227.
   4332        let full_clears_on_adreno = is_alpha && self.device.get_capabilities().requires_alpha_target_full_clear;
   4333        let require_full_clear = !require_precise_clear
   4334            && (full_clears_on_adreno || !favor_partial_updates);
   4335 
   4336        let clear_color = target
   4337            .clear_color
   4338            .map(|color| color.to_array());
   4339 
   4340        let mut cleared_depth = false;
   4341        if clear_with_quads {
   4342            // Will be handled last. Only specific rects will be cleared.
   4343        } else if require_precise_clear {
   4344            // Only clear specific rects
   4345            for (rect, color) in &target.clears {
   4346                self.device.clear_target(
   4347                    Some(color.to_array()),
   4348                    None,
   4349                    Some(draw_target.to_framebuffer_rect(*rect)),
   4350                );
   4351            }
   4352        } else {
   4353            // At this point we know we don't require precise clears for correctness.
   4354            // We may still attempt to restruct the clear rect as an optimization on
   4355            // some configurations.
   4356            let clear_rect = if require_full_clear {
   4357                None
   4358            } else {
   4359                match draw_target {
   4360                    DrawTarget::Default { rect, total_size, .. } => {
   4361                        if rect.min == FramebufferIntPoint::zero() && rect.size() == total_size {
   4362                            // Whole screen is covered, no need for scissor
   4363                            None
   4364                        } else {
   4365                            Some(rect)
   4366                        }
   4367                    }
   4368                    DrawTarget::Texture { .. } => {
   4369                        // TODO(gw): Applying a scissor rect and minimal clear here
   4370                        // is a very large performance win on the Intel and nVidia
   4371                        // GPUs that I have tested with. It's possible it may be a
   4372                        // performance penalty on other GPU types - we should test this
   4373                        // and consider different code paths.
   4374                        //
   4375                        // Note: The above measurements were taken when render
   4376                        // target slices were minimum 2048x2048. Now that we size
   4377                        // them adaptively, this may be less of a win (except perhaps
   4378                        // on a mostly-unused last slice of a large texture array).
   4379                        target.used_rect.map(|rect| draw_target.to_framebuffer_rect(rect))
   4380                    }
   4381                    // Full clear.
   4382                    _ => None,
   4383                }
   4384            };
   4385 
   4386            self.device.clear_target(
   4387                clear_color,
   4388                clear_depth,
   4389                clear_rect,
   4390            );
   4391            cleared_depth = true;
   4392        }
   4393 
   4394        // Make sure to clear the depth buffer if it is used.
   4395        if needs_depth && !cleared_depth {
   4396            // TODO: We could also clear the depth buffer via ps_clear. This
   4397            // is done by picture cache targets in some cases.
   4398            self.device.clear_target(None, clear_depth, None);
   4399        }
   4400 
   4401        // Finally, if we decided to clear with quads or if we need to clear
   4402        // some areas with specific colors that don't match the global clear
   4403        // color, clear more areas using a draw call.
   4404 
   4405        let mut clear_instances = Vec::with_capacity(target.clears.len());
   4406        for (rect, color) in &target.clears {
   4407            if clear_with_quads || (!require_precise_clear && target.clear_color != Some(*color)) {
   4408                let rect = rect.to_f32();
   4409                clear_instances.push(ClearInstance {
   4410                    rect: [
   4411                        rect.min.x, rect.min.y,
   4412                        rect.max.x, rect.max.y,
   4413                    ],
   4414                    color: color.to_array(),
   4415                })
   4416            }
   4417        }
   4418 
   4419        if !clear_instances.is_empty() {
   4420            self.shaders.borrow_mut().ps_clear().bind(
   4421                &mut self.device,
   4422                &projection,
   4423                None,
   4424                &mut self.renderer_errors,
   4425                &mut self.profile,
   4426                &mut self.command_log,
   4427            );
   4428            self.draw_instanced_batch(
   4429                &clear_instances,
   4430                VertexArrayKind::Clear,
   4431                &BatchTextures::empty(),
   4432                stats,
   4433            );
   4434        }
   4435    }
   4436 
   4437    fn draw_render_target(
   4438        &mut self,
   4439        texture_id: CacheTextureId,
   4440        target: &RenderTarget,
   4441        render_tasks: &RenderTaskGraph,
   4442        stats: &mut RendererStats,
   4443    ) {
   4444        let needs_depth = target.needs_depth();
   4445 
   4446        let texture = self.texture_resolver.get_cache_texture_mut(&texture_id);
   4447 
   4448        if let Some(history) = &mut self.command_log {
   4449            let label = match target.target_kind {
   4450                RenderTargetKind::Color => "color",
   4451                RenderTargetKind::Alpha => "alpha",
   4452            };
   4453            history.begin_render_target(label, texture.get_dimensions());
   4454        }
   4455 
   4456        if needs_depth {
   4457            self.device.reuse_render_target::<u8>(
   4458                texture,
   4459                RenderTargetInfo { has_depth: needs_depth },
   4460            );
   4461        }
   4462 
   4463        let draw_target = DrawTarget::from_texture(
   4464            texture,
   4465            needs_depth,
   4466        );
   4467 
   4468        let projection = Transform3D::ortho(
   4469            0.0,
   4470            draw_target.dimensions().width as f32,
   4471            0.0,
   4472            draw_target.dimensions().height as f32,
   4473            self.device.ortho_near_plane(),
   4474            self.device.ortho_far_plane(),
   4475        );
   4476 
   4477        profile_scope!("draw_render_target");
   4478        let _gm = self.gpu_profiler.start_marker("render target");
   4479 
   4480        let counter = match target.target_kind {
   4481            RenderTargetKind::Color => profiler::COLOR_PASSES,
   4482            RenderTargetKind::Alpha => profiler::ALPHA_PASSES,
   4483        };
   4484        self.profile.inc(counter);
   4485 
   4486        let sampler_query = match target.target_kind {
   4487            RenderTargetKind::Color => None,
   4488            RenderTargetKind::Alpha => Some(self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_ALPHA)),
   4489        };
   4490 
   4491        // sanity check for the depth buffer
   4492        if let DrawTarget::Texture { with_depth, .. } = draw_target {
   4493            assert!(with_depth >= target.needs_depth());
   4494        }
   4495 
   4496        let framebuffer_kind = if draw_target.is_default() {
   4497            FramebufferKind::Main
   4498        } else {
   4499            FramebufferKind::Other
   4500        };
   4501 
   4502        self.device.bind_draw_target(draw_target);
   4503 
   4504        if self.device.get_capabilities().supports_qcom_tiled_rendering {
   4505            let preserve_mask = match target.clear_color {
   4506                Some(_) => 0,
   4507                None => gl::COLOR_BUFFER_BIT0_QCOM,
   4508            };
   4509            if let Some(used_rect) = target.used_rect {
   4510                self.device.gl().start_tiling_qcom(
   4511                    used_rect.min.x.max(0) as _,
   4512                    used_rect.min.y.max(0) as _,
   4513                    used_rect.width() as _,
   4514                    used_rect.height() as _,
   4515                    preserve_mask,
   4516                );
   4517            }
   4518        }
   4519 
   4520        if needs_depth {
   4521            self.device.enable_depth_write();
   4522        } else {
   4523            self.device.disable_depth_write();
   4524        }
   4525 
   4526        self.clear_render_target(
   4527            target,
   4528            draw_target,
   4529            framebuffer_kind,
   4530            &projection,
   4531            stats,
   4532        );
   4533 
   4534        if needs_depth {
   4535            self.device.disable_depth_write();
   4536        }
   4537 
   4538        // Handle any resolves from parent pictures to this target
   4539        self.handle_resolves(
   4540            &target.resolve_ops,
   4541            render_tasks,
   4542            draw_target,
   4543        );
   4544 
   4545        // Handle any blits from the texture cache to this target.
   4546        self.handle_blits(
   4547            &target.blits,
   4548            render_tasks,
   4549            draw_target,
   4550        );
   4551 
   4552        // Draw any borders for this target.
   4553        if !target.border_segments_solid.is_empty() ||
   4554           !target.border_segments_complex.is_empty()
   4555        {
   4556            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_BORDER);
   4557 
   4558            self.set_blend(true, FramebufferKind::Other);
   4559            self.set_blend_mode_premultiplied_alpha(FramebufferKind::Other);
   4560 
   4561            if !target.border_segments_solid.is_empty() {
   4562                self.shaders.borrow_mut().cs_border_solid().bind(
   4563                    &mut self.device,
   4564                    &projection,
   4565                    None,
   4566                    &mut self.renderer_errors,
   4567                    &mut self.profile,
   4568                    &mut self.command_log,
   4569                );
   4570 
   4571                self.draw_instanced_batch(
   4572                    &target.border_segments_solid,
   4573                    VertexArrayKind::Border,
   4574                    &BatchTextures::empty(),
   4575                    stats,
   4576                );
   4577            }
   4578 
   4579            if !target.border_segments_complex.is_empty() {
   4580                self.shaders.borrow_mut().cs_border_segment().bind(
   4581                    &mut self.device,
   4582                    &projection,
   4583                    None,
   4584                    &mut self.renderer_errors,
   4585                    &mut self.profile,
   4586                    &mut self.command_log,
   4587                );
   4588 
   4589                self.draw_instanced_batch(
   4590                    &target.border_segments_complex,
   4591                    VertexArrayKind::Border,
   4592                    &BatchTextures::empty(),
   4593                    stats,
   4594                );
   4595            }
   4596 
   4597            self.set_blend(false, FramebufferKind::Other);
   4598        }
   4599 
   4600        // Draw any line decorations for this target.
   4601        if !target.line_decorations.is_empty() {
   4602            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_LINE_DECORATION);
   4603 
   4604            self.set_blend(true, FramebufferKind::Other);
   4605            self.set_blend_mode_premultiplied_alpha(FramebufferKind::Other);
   4606 
   4607            self.shaders.borrow_mut().cs_line_decoration().bind(
   4608                &mut self.device,
   4609                &projection,
   4610                None,
   4611                &mut self.renderer_errors,
   4612                &mut self.profile,
   4613                &mut self.command_log,
   4614            );
   4615 
   4616            self.draw_instanced_batch(
   4617                &target.line_decorations,
   4618                VertexArrayKind::LineDecoration,
   4619                &BatchTextures::empty(),
   4620                stats,
   4621            );
   4622 
   4623            self.set_blend(false, FramebufferKind::Other);
   4624        }
   4625 
   4626        // Draw any fast path linear gradients for this target.
   4627        if !target.fast_linear_gradients.is_empty() {
   4628            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_FAST_LINEAR_GRADIENT);
   4629 
   4630            self.set_blend(false, FramebufferKind::Other);
   4631 
   4632            self.shaders.borrow_mut().cs_fast_linear_gradient().bind(
   4633                &mut self.device,
   4634                &projection,
   4635                None,
   4636                &mut self.renderer_errors,
   4637                &mut self.profile,
   4638                &mut self.command_log,
   4639            );
   4640 
   4641            self.draw_instanced_batch(
   4642                &target.fast_linear_gradients,
   4643                VertexArrayKind::FastLinearGradient,
   4644                &BatchTextures::empty(),
   4645                stats,
   4646            );
   4647        }
   4648 
   4649        // Draw any linear gradients for this target.
   4650        if !target.linear_gradients.is_empty() {
   4651            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_LINEAR_GRADIENT);
   4652 
   4653            self.set_blend(false, FramebufferKind::Other);
   4654 
   4655            self.shaders.borrow_mut().cs_linear_gradient().bind(
   4656                &mut self.device,
   4657                &projection,
   4658                None,
   4659                &mut self.renderer_errors,
   4660                &mut self.profile,
   4661                &mut self.command_log,
   4662            );
   4663 
   4664            if let Some(ref texture) = self.dither_matrix_texture {
   4665                self.device.bind_texture(TextureSampler::Dither, texture, Swizzle::default());
   4666            }
   4667 
   4668            self.draw_instanced_batch(
   4669                &target.linear_gradients,
   4670                VertexArrayKind::LinearGradient,
   4671                &BatchTextures::empty(),
   4672                stats,
   4673            );
   4674        }
   4675 
   4676        // Draw any radial gradients for this target.
   4677        if !target.radial_gradients.is_empty() {
   4678            let _timer = self.gpu_profiler.start_timer(GPU_TAG_RADIAL_GRADIENT);
   4679 
   4680            self.set_blend(false, FramebufferKind::Other);
   4681 
   4682            self.shaders.borrow_mut().cs_radial_gradient().bind(
   4683                &mut self.device,
   4684                &projection,
   4685                None,
   4686                &mut self.renderer_errors,
   4687                &mut self.profile,
   4688                &mut self.command_log,
   4689            );
   4690 
   4691            if let Some(ref texture) = self.dither_matrix_texture {
   4692                self.device.bind_texture(TextureSampler::Dither, texture, Swizzle::default());
   4693            }
   4694 
   4695            self.draw_instanced_batch(
   4696                &target.radial_gradients,
   4697                VertexArrayKind::RadialGradient,
   4698                &BatchTextures::empty(),
   4699                stats,
   4700            );
   4701        }
   4702 
   4703        // Draw any conic gradients for this target.
   4704        if !target.conic_gradients.is_empty() {
   4705            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CONIC_GRADIENT);
   4706 
   4707            self.set_blend(false, FramebufferKind::Other);
   4708 
   4709            self.shaders.borrow_mut().cs_conic_gradient().bind(
   4710                &mut self.device,
   4711                &projection,
   4712                None,
   4713                &mut self.renderer_errors,
   4714                &mut self.profile,
   4715                &mut self.command_log,
   4716            );
   4717 
   4718            if let Some(ref texture) = self.dither_matrix_texture {
   4719                self.device.bind_texture(TextureSampler::Dither, texture, Swizzle::default());
   4720            }
   4721 
   4722            self.draw_instanced_batch(
   4723                &target.conic_gradients,
   4724                VertexArrayKind::ConicGradient,
   4725                &BatchTextures::empty(),
   4726                stats,
   4727            );
   4728        }
   4729 
   4730        // Draw any blurs for this target.
   4731        // Blurs are rendered as a standard 2-pass
   4732        // separable implementation.
   4733        // TODO(gw): In the future, consider having
   4734        //           fast path blur shaders for common
   4735        //           blur radii with fixed weights.
   4736        if !target.vertical_blurs.is_empty() || !target.horizontal_blurs.is_empty() {
   4737            let _timer = self.gpu_profiler.start_timer(GPU_TAG_BLUR);
   4738 
   4739            self.set_blend(false, framebuffer_kind);
   4740            self.shaders.borrow_mut().cs_blur_rgba8().bind(
   4741                &mut self.device,
   4742                &projection,
   4743                None,
   4744                &mut self.renderer_errors,
   4745                &mut self.profile,
   4746                &mut self.command_log,
   4747            );
   4748 
   4749            if !target.vertical_blurs.is_empty() {
   4750                self.draw_blurs(
   4751                    &target.vertical_blurs,
   4752                    stats,
   4753                );
   4754            }
   4755 
   4756            if !target.horizontal_blurs.is_empty() {
   4757                self.draw_blurs(
   4758                    &target.horizontal_blurs,
   4759                    stats,
   4760                );
   4761            }
   4762        }
   4763 
   4764        self.handle_scaling(
   4765            &target.scalings,
   4766            &projection,
   4767            stats,
   4768        );
   4769 
   4770        for (ref textures, ref filters) in &target.svg_nodes {
   4771            self.handle_svg_nodes(textures, filters, &projection, stats);
   4772        }
   4773 
   4774        for alpha_batch_container in &target.alpha_batch_containers {
   4775            self.draw_alpha_batch_container(
   4776                alpha_batch_container,
   4777                draw_target,
   4778                framebuffer_kind,
   4779                &projection,
   4780                render_tasks,
   4781                stats,
   4782            );
   4783        }
   4784 
   4785        self.handle_prims(
   4786            &draw_target,
   4787            &target.prim_instances,
   4788            &target.prim_instances_with_scissor,
   4789            &projection,
   4790            stats,
   4791        );
   4792 
   4793        // Draw the clip items into the tiled alpha mask.
   4794        let has_primary_clips = !target.clip_batcher.primary_clips.is_empty();
   4795        let has_secondary_clips = !target.clip_batcher.secondary_clips.is_empty();
   4796        let has_clip_masks = !target.clip_masks.is_empty();
   4797        if has_primary_clips | has_secondary_clips | has_clip_masks {
   4798            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_CLIP);
   4799 
   4800            // TODO(gw): Consider grouping multiple clip masks per shader
   4801            //           invocation here to reduce memory bandwith further?
   4802 
   4803            if has_primary_clips {
   4804                // Draw the primary clip mask - since this is the first mask
   4805                // for the task, we can disable blending, knowing that it will
   4806                // overwrite every pixel in the mask area.
   4807                self.set_blend(false, FramebufferKind::Other);
   4808                self.draw_clip_batch_list(
   4809                    &target.clip_batcher.primary_clips,
   4810                    &projection,
   4811                    stats,
   4812                );
   4813            }
   4814 
   4815            if has_secondary_clips {
   4816                // switch to multiplicative blending for secondary masks, using
   4817                // multiplicative blending to accumulate clips into the mask.
   4818                self.set_blend(true, FramebufferKind::Other);
   4819                self.set_blend_mode_multiply(FramebufferKind::Other);
   4820                self.draw_clip_batch_list(
   4821                    &target.clip_batcher.secondary_clips,
   4822                    &projection,
   4823                    stats,
   4824                );
   4825            }
   4826 
   4827            if has_clip_masks {
   4828                self.handle_clips(
   4829                    &draw_target,
   4830                    &target.clip_masks,
   4831                    &projection,
   4832                    stats,
   4833                );
   4834            }
   4835        }
   4836 
   4837        if needs_depth {
   4838            self.device.invalidate_depth_target();
   4839        }
   4840        if self.device.get_capabilities().supports_qcom_tiled_rendering {
   4841            self.device.gl().end_tiling_qcom(gl::COLOR_BUFFER_BIT0_QCOM);
   4842        }
   4843 
   4844        if let Some(sampler) = sampler_query {
   4845            self.gpu_profiler.finish_sampler(sampler);
   4846        }
   4847    }
   4848 
   4849    fn draw_blurs(
   4850        &mut self,
   4851        blurs: &FastHashMap<TextureSource, FrameVec<BlurInstance>>,
   4852        stats: &mut RendererStats,
   4853    ) {
   4854        for (texture, blurs) in blurs {
   4855            let textures = BatchTextures::composite_rgb(
   4856                *texture,
   4857            );
   4858 
   4859            self.draw_instanced_batch(
   4860                blurs,
   4861                VertexArrayKind::Blur,
   4862                &textures,
   4863                stats,
   4864            );
   4865        }
   4866    }
   4867 
   4868    /// Draw all the instances in a clip batcher list to the current target.
   4869    fn draw_clip_batch_list(
   4870        &mut self,
   4871        list: &ClipBatchList,
   4872        projection: &default::Transform3D<f32>,
   4873        stats: &mut RendererStats,
   4874    ) {
   4875        if self.debug_flags.contains(DebugFlags::DISABLE_CLIP_MASKS) {
   4876            return;
   4877        }
   4878 
   4879        // draw rounded cornered rectangles
   4880        if !list.slow_rectangles.is_empty() {
   4881            let _gm2 = self.gpu_profiler.start_marker("slow clip rectangles");
   4882            self.shaders.borrow_mut().cs_clip_rectangle_slow().bind(
   4883                &mut self.device,
   4884                projection,
   4885                None,
   4886                &mut self.renderer_errors,
   4887                &mut self.profile,
   4888                &mut self.command_log,
   4889            );
   4890            self.draw_instanced_batch(
   4891                &list.slow_rectangles,
   4892                VertexArrayKind::ClipRect,
   4893                &BatchTextures::empty(),
   4894                stats,
   4895            );
   4896        }
   4897        if !list.fast_rectangles.is_empty() {
   4898            let _gm2 = self.gpu_profiler.start_marker("fast clip rectangles");
   4899            self.shaders.borrow_mut().cs_clip_rectangle_fast().bind(
   4900                &mut self.device,
   4901                projection,
   4902                None,
   4903                &mut self.renderer_errors,
   4904                &mut self.profile,
   4905                &mut self.command_log,
   4906            );
   4907            self.draw_instanced_batch(
   4908                &list.fast_rectangles,
   4909                VertexArrayKind::ClipRect,
   4910                &BatchTextures::empty(),
   4911                stats,
   4912            );
   4913        }
   4914 
   4915        // draw box-shadow clips
   4916        for (mask_texture_id, items) in list.box_shadows.iter() {
   4917            let _gm2 = self.gpu_profiler.start_marker("box-shadows");
   4918            let textures = BatchTextures::composite_rgb(*mask_texture_id);
   4919            self.shaders.borrow_mut().cs_clip_box_shadow().bind(
   4920                &mut self.device,
   4921                projection,
   4922                None,
   4923                &mut self.renderer_errors,
   4924                &mut self.profile,
   4925                &mut self.command_log,
   4926            );
   4927            self.draw_instanced_batch(
   4928                items,
   4929                VertexArrayKind::ClipBoxShadow,
   4930                &textures,
   4931                stats,
   4932            );
   4933        }
   4934    }
   4935 
   4936    fn update_deferred_resolves(
   4937        &mut self,
   4938        deferred_resolves: &[DeferredResolve],
   4939        gpu_buffer: &mut GpuBufferF,
   4940    ) {
   4941        // The first thing we do is run through any pending deferred
   4942        // resolves, and use a callback to get the UV rect for this
   4943        // custom item. Then we patch the resource_rects structure
   4944        // here before it's uploaded to the GPU.
   4945        if deferred_resolves.is_empty() {
   4946            return;
   4947        }
   4948 
   4949        let handler = self.external_image_handler
   4950            .as_mut()
   4951            .expect("Found external image, but no handler set!");
   4952 
   4953        for (i, deferred_resolve) in deferred_resolves.iter().enumerate() {
   4954            self.gpu_profiler.place_marker("deferred resolve");
   4955            let props = &deferred_resolve.image_properties;
   4956            let ext_image = props
   4957                .external_image
   4958                .expect("BUG: Deferred resolves must be external images!");
   4959            // Provide rendering information for NativeTexture external images.
   4960            let image = handler.lock(ext_image.id, ext_image.channel_index, deferred_resolve.is_composited);
   4961            let texture_target = match ext_image.image_type {
   4962                ExternalImageType::TextureHandle(target) => target,
   4963                ExternalImageType::Buffer => {
   4964                    panic!("not a suitable image type in update_deferred_resolves()");
   4965                }
   4966            };
   4967 
   4968            // In order to produce the handle, the external image handler may call into
   4969            // the GL context and change some states.
   4970            self.device.reset_state();
   4971 
   4972            let texture = match image.source {
   4973                ExternalImageSource::NativeTexture(texture_id) => {
   4974                    ExternalTexture::new(
   4975                        texture_id,
   4976                        texture_target,
   4977                        image.uv,
   4978                        deferred_resolve.rendering,
   4979                    )
   4980                }
   4981                ExternalImageSource::Invalid => {
   4982                    warn!("Invalid ext-image");
   4983                    debug!(
   4984                        "For ext_id:{:?}, channel:{}.",
   4985                        ext_image.id,
   4986                        ext_image.channel_index
   4987                    );
   4988                    // Just use 0 as the gl handle for this failed case.
   4989                    ExternalTexture::new(
   4990                        0,
   4991                        texture_target,
   4992                        image.uv,
   4993                        deferred_resolve.rendering,
   4994                    )
   4995                }
   4996                ExternalImageSource::RawData(_) => {
   4997                    panic!("Raw external data is not expected for deferred resolves!");
   4998                }
   4999            };
   5000 
   5001            self.texture_resolver
   5002                .external_images
   5003                .insert(DeferredResolveIndex(i as u32), texture);
   5004 
   5005            let addr = gpu_buffer.resolve_handle(deferred_resolve.handle);
   5006            let index = addr.as_u32() as usize;
   5007            gpu_buffer.data[index] = image.uv.to_array().into();
   5008            gpu_buffer.data[index + 1] = [0f32; 4].into();
   5009        }
   5010    }
   5011 
   5012    fn unlock_external_images(
   5013        &mut self,
   5014        deferred_resolves: &[DeferredResolve],
   5015    ) {
   5016        if !self.texture_resolver.external_images.is_empty() {
   5017            let handler = self.external_image_handler
   5018                .as_mut()
   5019                .expect("Found external image, but no handler set!");
   5020 
   5021            for (index, _) in self.texture_resolver.external_images.drain() {
   5022                let props = &deferred_resolves[index.0 as usize].image_properties;
   5023                let ext_image = props
   5024                    .external_image
   5025                    .expect("BUG: Deferred resolves must be external images!");
   5026                handler.unlock(ext_image.id, ext_image.channel_index);
   5027            }
   5028        }
   5029    }
   5030 
   5031    /// Update the dirty rects based on current compositing mode and config
   5032    // TODO(gw): This can be tidied up significantly once the Draw compositor
   5033    //           is implemented in terms of the compositor trait.
   5034    fn calculate_dirty_rects(
   5035        &mut self,
   5036        buffer_age: usize,
   5037        composite_state: &CompositeState,
   5038        draw_target_dimensions: DeviceIntSize,
   5039        results: &mut RenderResults,
   5040    ) -> Option<PartialPresentMode> {
   5041 
   5042        if let Some(ref _compositor) = self.compositor_config.layer_compositor() {
   5043            // Calculate dirty rects of layer compositor in composite_simple()
   5044            return None;
   5045        }
   5046 
   5047        let mut partial_present_mode = None;
   5048 
   5049        let (max_partial_present_rects, draw_previous_partial_present_regions) = match self.current_compositor_kind {
   5050            CompositorKind::Native { .. } => {
   5051                // Assume that we can return a single dirty rect for native
   5052                // compositor for now, and that there is no buffer-age functionality.
   5053                // These params can be exposed by the compositor capabilities struct
   5054                // as the Draw compositor is ported to use it.
   5055                (1, false)
   5056            }
   5057            CompositorKind::Draw { draw_previous_partial_present_regions, max_partial_present_rects } => {
   5058                (max_partial_present_rects, draw_previous_partial_present_regions)
   5059            }
   5060            CompositorKind::Layer { .. } => {
   5061                unreachable!();
   5062            }
   5063        };
   5064 
   5065        if max_partial_present_rects > 0 {
   5066            let prev_frames_damage_rect = if let Some(..) = self.compositor_config.partial_present() {
   5067                self.buffer_damage_tracker
   5068                    .get_damage_rect(buffer_age)
   5069                    .or_else(|| Some(DeviceRect::from_size(draw_target_dimensions.to_f32())))
   5070            } else {
   5071                None
   5072            };
   5073 
   5074            let can_use_partial_present =
   5075                composite_state.dirty_rects_are_valid &&
   5076                !self.force_redraw &&
   5077                !(prev_frames_damage_rect.is_none() && draw_previous_partial_present_regions) &&
   5078                !self.debug_overlay_state.is_enabled;
   5079 
   5080            if can_use_partial_present {
   5081                let mut combined_dirty_rect = DeviceRect::zero();
   5082                let fb_rect = DeviceRect::from_size(draw_target_dimensions.to_f32());
   5083 
   5084                // Work out how many dirty rects WR produced, and if that's more than
   5085                // what the device supports.
   5086                for tile in &composite_state.tiles {
   5087                    let dirty_rect = composite_state.get_device_rect(
   5088                        &tile.local_dirty_rect,
   5089                        tile.transform_index,
   5090                    );
   5091 
   5092                    // In pathological cases where a tile is extremely zoomed, it
   5093                    // may end up with device coords outside the range of an i32,
   5094                    // so clamp it to the frame buffer rect here, before it gets
   5095                    // casted to an i32 rect below.
   5096                    if let Some(dirty_rect) = dirty_rect.intersection(&fb_rect) {
   5097                        combined_dirty_rect = combined_dirty_rect.union(&dirty_rect);
   5098                    }
   5099                }
   5100 
   5101                let combined_dirty_rect = combined_dirty_rect.round();
   5102                let combined_dirty_rect_i32 = combined_dirty_rect.to_i32();
   5103                // Return this frame's dirty region. If nothing has changed, don't return any dirty
   5104                // rects at all (the client can use this as a signal to skip present completely).
   5105                if !combined_dirty_rect.is_empty() {
   5106                    results.dirty_rects.push(combined_dirty_rect_i32);
   5107                }
   5108 
   5109                // Track this frame's dirty region, for calculating subsequent frames' damage.
   5110                if draw_previous_partial_present_regions {
   5111                    self.buffer_damage_tracker.push_dirty_rect(&combined_dirty_rect);
   5112                }
   5113 
   5114                // If the implementation requires manually keeping the buffer consistent,
   5115                // then we must combine this frame's dirty region with that of previous frames
   5116                // to determine the total_dirty_rect. The is used to determine what region we
   5117                // render to, and is what we send to the compositor as the buffer damage region
   5118                // (eg for KHR_partial_update).
   5119                let total_dirty_rect = if draw_previous_partial_present_regions {
   5120                    combined_dirty_rect.union(&prev_frames_damage_rect.unwrap())
   5121                } else {
   5122                    combined_dirty_rect
   5123                };
   5124 
   5125                partial_present_mode = Some(PartialPresentMode::Single {
   5126                    dirty_rect: total_dirty_rect,
   5127                });
   5128            } else {
   5129                // If we don't have a valid partial present scenario, return a single
   5130                // dirty rect to the client that covers the entire framebuffer.
   5131                let fb_rect = DeviceIntRect::from_size(
   5132                    draw_target_dimensions,
   5133                );
   5134                results.dirty_rects.push(fb_rect);
   5135 
   5136                if draw_previous_partial_present_regions {
   5137                    self.buffer_damage_tracker.push_dirty_rect(&fb_rect.to_f32());
   5138                }
   5139            }
   5140        }
   5141 
   5142        partial_present_mode
   5143    }
   5144 
   5145    fn bind_frame_data(&mut self, frame: &mut Frame) {
   5146        profile_scope!("bind_frame_data");
   5147 
   5148        let _timer = self.gpu_profiler.start_timer(GPU_TAG_SETUP_DATA);
   5149 
   5150        self.vertex_data_textures[self.current_vertex_data_textures].update(
   5151            &mut self.device,
   5152            &mut self.texture_upload_pbo_pool,
   5153            frame,
   5154        );
   5155        self.current_vertex_data_textures =
   5156            (self.current_vertex_data_textures + 1) % VERTEX_DATA_TEXTURE_COUNT;
   5157 
   5158        if let Some(texture) = &self.gpu_buffer_texture_f {
   5159            self.device.bind_texture(
   5160                TextureSampler::GpuBufferF,
   5161                &texture,
   5162                Swizzle::default(),
   5163            );
   5164        }
   5165 
   5166        if let Some(texture) = &self.gpu_buffer_texture_i {
   5167            self.device.bind_texture(
   5168                TextureSampler::GpuBufferI,
   5169                &texture,
   5170                Swizzle::default(),
   5171            );
   5172        }
   5173    }
   5174 
   5175    fn update_native_surfaces(&mut self) {
   5176        profile_scope!("update_native_surfaces");
   5177 
   5178        match self.compositor_config {
   5179            CompositorConfig::Native { ref mut compositor, .. } => {
   5180                for op in self.pending_native_surface_updates.drain(..) {
   5181                    match op.details {
   5182                        NativeSurfaceOperationDetails::CreateSurface { id, virtual_offset, tile_size, is_opaque } => {
   5183                            let _inserted = self.allocated_native_surfaces.insert(id);
   5184                            debug_assert!(_inserted, "bug: creating existing surface");
   5185                            compositor.create_surface(
   5186                                    &mut self.device,
   5187                                    id,
   5188                                    virtual_offset,
   5189                                    tile_size,
   5190                                    is_opaque,
   5191                            );
   5192                        }
   5193                        NativeSurfaceOperationDetails::CreateExternalSurface { id, is_opaque } => {
   5194                            let _inserted = self.allocated_native_surfaces.insert(id);
   5195                            debug_assert!(_inserted, "bug: creating existing surface");
   5196                            compositor.create_external_surface(
   5197                                &mut self.device,
   5198                                id,
   5199                                is_opaque,
   5200                            );
   5201                        }
   5202                        NativeSurfaceOperationDetails::CreateBackdropSurface { id, color } => {
   5203                            let _inserted = self.allocated_native_surfaces.insert(id);
   5204                            debug_assert!(_inserted, "bug: creating existing surface");
   5205                            compositor.create_backdrop_surface(
   5206                                &mut self.device,
   5207                                id,
   5208                                color,
   5209                            );
   5210                        }
   5211                        NativeSurfaceOperationDetails::DestroySurface { id } => {
   5212                            let _existed = self.allocated_native_surfaces.remove(&id);
   5213                            debug_assert!(_existed, "bug: removing unknown surface");
   5214                            compositor.destroy_surface(&mut self.device, id);
   5215                        }
   5216                        NativeSurfaceOperationDetails::CreateTile { id } => {
   5217                            compositor.create_tile(&mut self.device, id);
   5218                        }
   5219                        NativeSurfaceOperationDetails::DestroyTile { id } => {
   5220                            compositor.destroy_tile(&mut self.device, id);
   5221                        }
   5222                        NativeSurfaceOperationDetails::AttachExternalImage { id, external_image } => {
   5223                            compositor.attach_external_image(&mut self.device, id, external_image);
   5224                        }
   5225                    }
   5226                }
   5227            }
   5228            CompositorConfig::Draw { .. } | CompositorConfig::Layer { .. } => {
   5229                // Ensure nothing is added in simple composite mode, since otherwise
   5230                // memory will leak as this doesn't get drained
   5231                debug_assert!(self.pending_native_surface_updates.is_empty());
   5232            }
   5233        }
   5234    }
   5235 
   5236    fn update_gpu_buffer_texture<T: Texel>(
   5237        device: &mut Device,
   5238        buffer: &GpuBuffer<T>,
   5239        dst_texture: &mut Option<Texture>,
   5240        pbo_pool: &mut UploadPBOPool,
   5241    ) {
   5242        if buffer.is_empty() {
   5243            return;
   5244        }
   5245 
   5246        if let Some(texture) = dst_texture {
   5247            assert!(texture.get_dimensions().width == buffer.size.width);
   5248            if texture.get_dimensions().height < buffer.size.height {
   5249                device.delete_texture(dst_texture.take().unwrap());
   5250            }
   5251        }
   5252 
   5253        if dst_texture.is_none() {
   5254            let height = ((buffer.size.height + 7) & !7).max(8);
   5255            assert!(height >= buffer.size.height);
   5256            *dst_texture = Some(
   5257                device.create_texture(
   5258                    ImageBufferKind::Texture2D,
   5259                    buffer.format,
   5260                    buffer.size.width,
   5261                    height,
   5262                    TextureFilter::Nearest,
   5263                    None,
   5264                )
   5265            );
   5266        }
   5267 
   5268        let mut uploader = device.upload_texture(pbo_pool);
   5269 
   5270        uploader.upload(
   5271            device,
   5272            dst_texture.as_mut().unwrap(),
   5273            DeviceIntRect {
   5274                min: DeviceIntPoint::zero(),
   5275                max: DeviceIntPoint::new(buffer.size.width, buffer.size.height),
   5276            },
   5277            None,
   5278            None,
   5279            buffer.data.as_ptr(),
   5280            buffer.data.len(),
   5281        );
   5282 
   5283        uploader.flush(device);
   5284    }
   5285 
   5286    fn maybe_evict_gpu_buffer_texture(
   5287        device: &mut Device,
   5288        gpu_buffer_height: i32,
   5289        texture: &mut Option<Texture>,
   5290        texture_too_large: &mut i32,
   5291    ) {
   5292        if let Some(tex) = texture {
   5293            if tex.get_dimensions().height > gpu_buffer_height * 2 {
   5294                *texture_too_large += 1;
   5295            } else {
   5296                *texture_too_large = 0;
   5297            }
   5298        }
   5299 
   5300        // Delete the texture if it has been too large for 10 frames
   5301        // or more.
   5302        if *texture_too_large > 10 {
   5303            device.delete_texture(texture.take().unwrap());
   5304            *texture_too_large = 0;
   5305        }
   5306    }
   5307 
   5308    fn draw_frame(
   5309        &mut self,
   5310        frame: &mut Frame,
   5311        device_size: Option<DeviceIntSize>,
   5312        buffer_age: usize,
   5313        results: &mut RenderResults,
   5314    ) {
   5315        profile_scope!("draw_frame");
   5316 
   5317        // These markers seem to crash a lot on Android, see bug 1559834
   5318        #[cfg(not(target_os = "android"))]
   5319        let _gm = self.gpu_profiler.start_marker("draw frame");
   5320 
   5321        if frame.passes.is_empty() {
   5322            frame.has_been_rendered = true;
   5323            return;
   5324        }
   5325 
   5326        {
   5327            let _gm = self.gpu_profiler.start_marker("gpu buffer update");
   5328 
   5329            Self::update_gpu_buffer_texture(
   5330                &mut self.device,
   5331                &frame.gpu_buffer_f,
   5332                &mut self.gpu_buffer_texture_f,
   5333                &mut self.texture_upload_pbo_pool,
   5334            );
   5335            Self::update_gpu_buffer_texture(
   5336                &mut self.device,
   5337                &frame.gpu_buffer_i,
   5338                &mut self.gpu_buffer_texture_i,
   5339                &mut self.texture_upload_pbo_pool,
   5340            );
   5341        }
   5342 
   5343        self.device.disable_depth_write();
   5344        self.set_blend(false, FramebufferKind::Other);
   5345        self.device.disable_stencil();
   5346 
   5347        self.bind_frame_data(frame);
   5348 
   5349        let bytes_to_mb = 1.0 / 1000000.0;
   5350        let gpu_buffer_bytes_f = frame.gpu_buffer_f.size.to_f32().area() * 16.0;
   5351        let gpu_buffer_bytes_i = frame.gpu_buffer_i.size.to_f32().area() * 16.0;
   5352        let gpu_buffer_mb = (gpu_buffer_bytes_f + gpu_buffer_bytes_i) as f32 * bytes_to_mb;
   5353        self.profile.set(profiler::GPU_BUFFER_MEM, gpu_buffer_mb);
   5354 
   5355        // Determine the present mode and dirty rects, if device_size
   5356        // is Some(..). If it's None, no composite will occur and only
   5357        // picture cache and texture cache targets will be updated.
   5358        // TODO(gw): Split Frame so that it's clearer when a composite
   5359        //           is occurring.
   5360        let present_mode = device_size.and_then(|device_size| {
   5361            self.calculate_dirty_rects(
   5362                buffer_age,
   5363                &frame.composite_state,
   5364                device_size,
   5365                results,
   5366            )
   5367        });
   5368 
   5369        // If we have a native OS compositor, then make use of that interface to
   5370        // specify how to composite each of the picture cache surfaces. First, we
   5371        // need to find each tile that may be bound and updated later in the frame
   5372        // and invalidate it so that the native render compositor knows that these
   5373        // tiles can't be composited early. Next, after all such tiles have been
   5374        // invalidated, then we queue surfaces for native composition by the render
   5375        // compositor before we actually update the tiles. This allows the render
   5376        // compositor to start early composition while the tiles are updating.
   5377        if let CompositorKind::Native { .. } = self.current_compositor_kind {
   5378            let compositor = self.compositor_config.compositor().unwrap();
   5379            // Invalidate any native surface tiles that might be updated by passes.
   5380            if !frame.has_been_rendered {
   5381                for tile in &frame.composite_state.tiles {
   5382                    if !tile.local_dirty_rect.is_empty() {
   5383                        if let CompositeTileSurface::Texture { surface: ResolvedSurfaceTexture::Native { id, .. } } = tile.surface {
   5384                            let valid_rect = frame.composite_state.get_surface_rect(
   5385                                &tile.local_valid_rect,
   5386                                &tile.local_rect,
   5387                                tile.transform_index,
   5388                            ).to_i32();
   5389 
   5390                            compositor.invalidate_tile(&mut self.device, id, valid_rect);
   5391                        }
   5392                    }
   5393                }
   5394            }
   5395            // Ensure any external surfaces that might be used during early composition
   5396            // are invalidated first so that the native compositor can properly schedule
   5397            // composition to happen only when the external surface is updated.
   5398            // See update_external_native_surfaces for more details.
   5399            for surface in &frame.composite_state.external_surfaces {
   5400                if let Some((native_surface_id, size)) = surface.update_params {
   5401                    let surface_rect = size.into();
   5402                    compositor.invalidate_tile(&mut self.device, NativeTileId { surface_id: native_surface_id, x: 0, y: 0 }, surface_rect);
   5403                }
   5404            }
   5405            // Finally queue native surfaces for early composition, if applicable. By now,
   5406            // we have already invalidated any tiles that such surfaces may depend upon, so
   5407            // the native render compositor can keep track of when to actually schedule
   5408            // composition as surfaces are updated.
   5409            if device_size.is_some() {
   5410                frame.composite_state.composite_native(
   5411                    self.clear_color,
   5412                    &results.dirty_rects,
   5413                    &mut self.device,
   5414                    &mut **compositor,
   5415                );
   5416            }
   5417        }
   5418 
   5419        for (_pass_index, pass) in frame.passes.iter_mut().enumerate() {
   5420            #[cfg(not(target_os = "android"))]
   5421            let _gm = self.gpu_profiler.start_marker(&format!("pass {}", _pass_index));
   5422 
   5423            profile_scope!("offscreen target");
   5424 
   5425            // If this frame has already been drawn, then any texture
   5426            // cache targets have already been updated and can be
   5427            // skipped this time.
   5428            if !frame.has_been_rendered {
   5429                for (&texture_id, target) in &pass.texture_cache {
   5430                    self.draw_render_target(
   5431                        texture_id,
   5432                        target,
   5433                        &frame.render_tasks,
   5434                        &mut results.stats,
   5435                    );
   5436                }
   5437 
   5438                if !pass.picture_cache.is_empty() {
   5439                    self.profile.inc(profiler::COLOR_PASSES);
   5440                }
   5441 
   5442                // Draw picture caching tiles for this pass.
   5443                for picture_target in &pass.picture_cache {
   5444                    results.stats.color_target_count += 1;
   5445 
   5446                    let draw_target = match picture_target.surface {
   5447                        ResolvedSurfaceTexture::TextureCache { ref texture } => {
   5448                            let (texture, _) = self.texture_resolver
   5449                                .resolve(texture)
   5450                                .expect("bug");
   5451 
   5452                            DrawTarget::from_texture(
   5453                                texture,
   5454                                true,
   5455                            )
   5456                        }
   5457                        ResolvedSurfaceTexture::Native { id, size } => {
   5458                            let surface_info = match self.current_compositor_kind {
   5459                                CompositorKind::Native { .. } => {
   5460                                    let compositor = self.compositor_config.compositor().unwrap();
   5461                                    compositor.bind(
   5462                                        &mut self.device,
   5463                                        id,
   5464                                        picture_target.dirty_rect,
   5465                                        picture_target.valid_rect,
   5466                                    )
   5467                                }
   5468                                CompositorKind::Draw { .. } | CompositorKind::Layer { .. } => {
   5469                                    unreachable!();
   5470                                }
   5471                            };
   5472 
   5473                            DrawTarget::NativeSurface {
   5474                                offset: surface_info.origin,
   5475                                external_fbo_id: surface_info.fbo_id,
   5476                                dimensions: size,
   5477                            }
   5478                        }
   5479                    };
   5480 
   5481                    let projection = Transform3D::ortho(
   5482                        0.0,
   5483                        draw_target.dimensions().width as f32,
   5484                        0.0,
   5485                        draw_target.dimensions().height as f32,
   5486                        self.device.ortho_near_plane(),
   5487                        self.device.ortho_far_plane(),
   5488                    );
   5489 
   5490                    self.draw_picture_cache_target(
   5491                        picture_target,
   5492                        draw_target,
   5493                        &projection,
   5494                        &frame.render_tasks,
   5495                        &mut results.stats,
   5496                    );
   5497 
   5498                    // Native OS surfaces must be unbound at the end of drawing to them
   5499                    if let ResolvedSurfaceTexture::Native { .. } = picture_target.surface {
   5500                        match self.current_compositor_kind {
   5501                            CompositorKind::Native { .. } => {
   5502                                let compositor = self.compositor_config.compositor().unwrap();
   5503                                compositor.unbind(&mut self.device);
   5504                            }
   5505                            CompositorKind::Draw { .. } | CompositorKind::Layer { .. } => {
   5506                                unreachable!();
   5507                            }
   5508                        }
   5509                    }
   5510                }
   5511            }
   5512 
   5513            for target in &pass.alpha.targets {
   5514                results.stats.alpha_target_count += 1;
   5515                self.draw_render_target(
   5516                    target.texture_id(),
   5517                    target,
   5518                    &frame.render_tasks,
   5519                    &mut results.stats,
   5520                );
   5521            }
   5522 
   5523            for target in &pass.color.targets {
   5524                results.stats.color_target_count += 1;
   5525                self.draw_render_target(
   5526                    target.texture_id(),
   5527                    target,
   5528                    &frame.render_tasks,
   5529                    &mut results.stats,
   5530                );
   5531            }
   5532 
   5533            // Only end the pass here and invalidate previous textures for
   5534            // off-screen targets. Deferring return of the inputs to the
   5535            // frame buffer until the implicit end_pass in end_frame allows
   5536            // debug draw overlays to be added without triggering a copy
   5537            // resolve stage in mobile / tiled GPUs.
   5538            self.texture_resolver.end_pass(
   5539                &mut self.device,
   5540                &pass.textures_to_invalidate,
   5541            );
   5542        }
   5543 
   5544        self.composite_frame(
   5545            frame,
   5546            device_size,
   5547            results,
   5548            present_mode,
   5549        );
   5550 
   5551        frame.has_been_rendered = true;
   5552 
   5553        Self::maybe_evict_gpu_buffer_texture(
   5554            &mut self.device,
   5555            frame.gpu_buffer_f.size.height,
   5556            &mut self.gpu_buffer_texture_f,
   5557            &mut self.gpu_buffer_texture_f_too_large,
   5558        );
   5559 
   5560        Self::maybe_evict_gpu_buffer_texture(
   5561            &mut self.device,
   5562            frame.gpu_buffer_i.size.height,
   5563            &mut self.gpu_buffer_texture_i,
   5564            &mut self.gpu_buffer_texture_i_too_large,
   5565        );
   5566    }
   5567 
   5568    fn composite_frame(
   5569        &mut self,
   5570        frame: &mut Frame,
   5571        device_size: Option<DeviceIntSize>,
   5572        results: &mut RenderResults,
   5573        present_mode: Option<PartialPresentMode>,
   5574    ) {
   5575        profile_scope!("main target");
   5576        if let Some(device_size) = device_size {
   5577            if let Some(history) = &mut self.command_log {
   5578                history.begin_render_target("Window", device_size);
   5579            }
   5580 
   5581            results.stats.color_target_count += 1;
   5582            results.picture_cache_debug = mem::replace(
   5583                &mut frame.composite_state.picture_cache_debug,
   5584                PictureCacheDebugInfo::new(),
   5585            );
   5586 
   5587            let size = frame.device_rect.size().to_f32();
   5588            let surface_origin_is_top_left = self.device.surface_origin_is_top_left();
   5589            let (bottom, top) = if surface_origin_is_top_left {
   5590              (0.0, size.height)
   5591            } else {
   5592              (size.height, 0.0)
   5593            };
   5594 
   5595            let projection = Transform3D::ortho(
   5596                0.0,
   5597                size.width,
   5598                bottom,
   5599                top,
   5600                self.device.ortho_near_plane(),
   5601                self.device.ortho_far_plane(),
   5602            );
   5603 
   5604            let fb_scale = Scale::<_, _, FramebufferPixel>::new(1i32);
   5605            let mut fb_rect = frame.device_rect * fb_scale;
   5606 
   5607            if !surface_origin_is_top_left {
   5608                let h = fb_rect.height();
   5609                fb_rect.min.y = device_size.height - fb_rect.max.y;
   5610                fb_rect.max.y = fb_rect.min.y + h;
   5611            }
   5612 
   5613            let draw_target = DrawTarget::Default {
   5614                rect: fb_rect,
   5615                total_size: device_size * fb_scale,
   5616                surface_origin_is_top_left,
   5617            };
   5618 
   5619            // If we have a native OS compositor, then make use of that interface
   5620            // to specify how to composite each of the picture cache surfaces.
   5621            match self.current_compositor_kind {
   5622                CompositorKind::Native { .. } => {
   5623                    // We have already queued surfaces for early native composition by this point.
   5624                    // All that is left is to finally update any external native surfaces that were
   5625                    // invalidated so that composition can complete.
   5626                    self.update_external_native_surfaces(
   5627                        &frame.composite_state.external_surfaces,
   5628                        results,
   5629                    );
   5630                }
   5631                CompositorKind::Draw { .. } | CompositorKind::Layer { .. } => {
   5632                    self.composite_simple(
   5633                        &frame.composite_state,
   5634                        frame.device_rect.size(),
   5635                        draw_target,
   5636                        &projection,
   5637                        results,
   5638                        present_mode,
   5639                        device_size,
   5640                    );
   5641                }
   5642            }
   5643            // Reset force_redraw. It was used in composite_simple() with layer compositor.
   5644            self.force_redraw = false;
   5645        } else {
   5646            // Rendering a frame without presenting it will confuse the partial
   5647            // present logic, so force a full present for the next frame.
   5648            self.force_redraw = true;
   5649        }
   5650    }
   5651 
   5652    pub fn debug_renderer(&mut self) -> Option<&mut DebugRenderer> {
   5653        self.debug.get_mut(&mut self.device)
   5654    }
   5655 
   5656    pub fn get_debug_flags(&self) -> DebugFlags {
   5657        self.debug_flags
   5658    }
   5659 
   5660    pub fn set_debug_flags(&mut self, flags: DebugFlags) {
   5661        if let Some(enabled) = flag_changed(self.debug_flags, flags, DebugFlags::GPU_TIME_QUERIES) {
   5662            if enabled {
   5663                self.gpu_profiler.enable_timers();
   5664            } else {
   5665                self.gpu_profiler.disable_timers();
   5666            }
   5667        }
   5668        if let Some(enabled) = flag_changed(self.debug_flags, flags, DebugFlags::GPU_SAMPLE_QUERIES) {
   5669            if enabled {
   5670                self.gpu_profiler.enable_samplers();
   5671            } else {
   5672                self.gpu_profiler.disable_samplers();
   5673            }
   5674        }
   5675 
   5676        self.debug_flags = flags;
   5677    }
   5678 
   5679    pub fn set_profiler_ui(&mut self, ui_str: &str) {
   5680        self.profiler.set_ui(ui_str);
   5681    }
   5682 
   5683    fn draw_frame_debug_items(&mut self, items: &[DebugItem]) {
   5684        if items.is_empty() {
   5685            return;
   5686        }
   5687 
   5688        let debug_renderer = match self.debug.get_mut(&mut self.device) {
   5689            Some(render) => render,
   5690            None => return,
   5691        };
   5692 
   5693        for item in items {
   5694            match item {
   5695                DebugItem::Rect { rect, outer_color, inner_color, thickness } => {
   5696                    if inner_color.a > 0.001 {
   5697                        let rect = rect.inflate(-thickness as f32, -thickness as f32);
   5698                        debug_renderer.add_quad(
   5699                            rect.min.x,
   5700                            rect.min.y,
   5701                            rect.max.x,
   5702                            rect.max.y,
   5703                            (*inner_color).into(),
   5704                            (*inner_color).into(),
   5705                        );
   5706                    }
   5707 
   5708                    if outer_color.a > 0.001 {
   5709                        debug_renderer.add_rect(
   5710                            &rect.to_i32(),
   5711                            *thickness,
   5712                            (*outer_color).into(),
   5713                        );
   5714                    }
   5715                }
   5716                DebugItem::Text { ref msg, position, color } => {
   5717                    debug_renderer.add_text(
   5718                        position.x,
   5719                        position.y,
   5720                        msg,
   5721                        (*color).into(),
   5722                        None,
   5723                    );
   5724                }
   5725            }
   5726        }
   5727    }
   5728 
   5729    fn draw_render_target_debug(&mut self, draw_target: &DrawTarget) {
   5730        if !self.debug_flags.contains(DebugFlags::RENDER_TARGET_DBG) {
   5731            return;
   5732        }
   5733 
   5734        let debug_renderer = match self.debug.get_mut(&mut self.device) {
   5735            Some(render) => render,
   5736            None => return,
   5737        };
   5738 
   5739        let textures = self.texture_resolver
   5740            .texture_cache_map
   5741            .values()
   5742            .filter(|item| item.category == TextureCacheCategory::RenderTarget)
   5743            .map(|item| &item.texture)
   5744            .collect::<Vec<&Texture>>();
   5745 
   5746        Self::do_debug_blit(
   5747            &mut self.device,
   5748            debug_renderer,
   5749            textures,
   5750            draw_target,
   5751            0,
   5752            &|_| [0.0, 1.0, 0.0, 1.0], // Use green for all RTs.
   5753        );
   5754    }
   5755 
   5756    fn draw_zoom_debug(
   5757        &mut self,
   5758        device_size: DeviceIntSize,
   5759    ) {
   5760        if !self.debug_flags.contains(DebugFlags::ZOOM_DBG) {
   5761            return;
   5762        }
   5763 
   5764        let debug_renderer = match self.debug.get_mut(&mut self.device) {
   5765            Some(render) => render,
   5766            None => return,
   5767        };
   5768 
   5769        let source_size = DeviceIntSize::new(64, 64);
   5770        let target_size = DeviceIntSize::new(1024, 1024);
   5771 
   5772        let source_origin = DeviceIntPoint::new(
   5773            (self.cursor_position.x - source_size.width / 2)
   5774                .min(device_size.width - source_size.width)
   5775                .max(0),
   5776            (self.cursor_position.y - source_size.height / 2)
   5777                .min(device_size.height - source_size.height)
   5778                .max(0),
   5779        );
   5780 
   5781        let source_rect = DeviceIntRect::from_origin_and_size(
   5782            source_origin,
   5783            source_size,
   5784        );
   5785 
   5786        let target_rect = DeviceIntRect::from_origin_and_size(
   5787            DeviceIntPoint::new(
   5788                device_size.width - target_size.width - 64,
   5789                device_size.height - target_size.height - 64,
   5790            ),
   5791            target_size,
   5792        );
   5793 
   5794        let texture_rect = FramebufferIntRect::from_size(
   5795            source_rect.size().cast_unit(),
   5796        );
   5797 
   5798        debug_renderer.add_rect(
   5799            &target_rect.inflate(1, 1),
   5800            1,
   5801            debug_colors::RED.into(),
   5802        );
   5803 
   5804        if self.zoom_debug_texture.is_none() {
   5805            let texture = self.device.create_texture(
   5806                ImageBufferKind::Texture2D,
   5807                ImageFormat::BGRA8,
   5808                source_rect.width(),
   5809                source_rect.height(),
   5810                TextureFilter::Nearest,
   5811                Some(RenderTargetInfo { has_depth: false }),
   5812            );
   5813 
   5814            self.zoom_debug_texture = Some(texture);
   5815        }
   5816 
   5817        // Copy frame buffer into the zoom texture
   5818        let read_target = DrawTarget::new_default(device_size, self.device.surface_origin_is_top_left());
   5819        self.device.blit_render_target(
   5820            read_target.into(),
   5821            read_target.to_framebuffer_rect(source_rect),
   5822            DrawTarget::from_texture(
   5823                self.zoom_debug_texture.as_ref().unwrap(),
   5824                false,
   5825            ),
   5826            texture_rect,
   5827            TextureFilter::Nearest,
   5828        );
   5829 
   5830        // Draw the zoom texture back to the framebuffer
   5831        self.device.blit_render_target(
   5832            ReadTarget::from_texture(
   5833                self.zoom_debug_texture.as_ref().unwrap(),
   5834            ),
   5835            texture_rect,
   5836            read_target,
   5837            read_target.to_framebuffer_rect(target_rect),
   5838            TextureFilter::Nearest,
   5839        );
   5840    }
   5841 
   5842    fn draw_texture_cache_debug(&mut self, draw_target: &DrawTarget) {
   5843        if !self.debug_flags.contains(DebugFlags::TEXTURE_CACHE_DBG) {
   5844            return;
   5845        }
   5846 
   5847        let debug_renderer = match self.debug.get_mut(&mut self.device) {
   5848            Some(render) => render,
   5849            None => return,
   5850        };
   5851 
   5852        let textures = self.texture_resolver
   5853            .texture_cache_map
   5854            .values()
   5855            .filter(|item| item.category == TextureCacheCategory::Atlas)
   5856            .map(|item| &item.texture)
   5857            .collect::<Vec<&Texture>>();
   5858 
   5859        fn select_color(texture: &Texture) -> [f32; 4] {
   5860            if texture.flags().contains(TextureFlags::IS_SHARED_TEXTURE_CACHE) {
   5861                [1.0, 0.5, 0.0, 1.0] // Orange for shared.
   5862            } else {
   5863                [1.0, 0.0, 1.0, 1.0] // Fuchsia for standalone.
   5864            }
   5865        }
   5866 
   5867        Self::do_debug_blit(
   5868            &mut self.device,
   5869            debug_renderer,
   5870            textures,
   5871            draw_target,
   5872            if self.debug_flags.contains(DebugFlags::RENDER_TARGET_DBG) { 544 } else { 0 },
   5873            &select_color,
   5874        );
   5875    }
   5876 
   5877    fn do_debug_blit(
   5878        device: &mut Device,
   5879        debug_renderer: &mut DebugRenderer,
   5880        mut textures: Vec<&Texture>,
   5881        draw_target: &DrawTarget,
   5882        bottom: i32,
   5883        select_color: &dyn Fn(&Texture) -> [f32; 4],
   5884    ) {
   5885        let mut spacing = 16;
   5886        let mut size = 512;
   5887 
   5888        let device_size = draw_target.dimensions();
   5889        let fb_width = device_size.width;
   5890        let fb_height = device_size.height;
   5891        let surface_origin_is_top_left = draw_target.surface_origin_is_top_left();
   5892 
   5893        let num_textures = textures.len() as i32;
   5894 
   5895        if num_textures * (size + spacing) > fb_width {
   5896            let factor = fb_width as f32 / (num_textures * (size + spacing)) as f32;
   5897            size = (size as f32 * factor) as i32;
   5898            spacing = (spacing as f32 * factor) as i32;
   5899        }
   5900 
   5901        let text_height = 14; // Visually approximated.
   5902        let text_margin = 1;
   5903        let tag_height = text_height + text_margin * 2;
   5904        let tag_y = fb_height - (bottom + spacing + tag_height);
   5905        let image_y = tag_y - size;
   5906 
   5907        // Sort the display by size (in bytes), so that left-to-right is
   5908        // largest-to-smallest.
   5909        //
   5910        // Note that the vec here is in increasing order, because the elements
   5911        // get drawn right-to-left.
   5912        textures.sort_by_key(|t| t.size_in_bytes());
   5913 
   5914        let mut i = 0;
   5915        for texture in textures.iter() {
   5916            let dimensions = texture.get_dimensions();
   5917            let src_rect = FramebufferIntRect::from_size(
   5918                FramebufferIntSize::new(dimensions.width as i32, dimensions.height as i32),
   5919            );
   5920 
   5921            let x = fb_width - (spacing + size) * (i as i32 + 1);
   5922 
   5923            // If we have more targets than fit on one row in screen, just early exit.
   5924            if x > fb_width {
   5925                return;
   5926            }
   5927 
   5928            // Draw the info tag.
   5929            let tag_rect = rect(x, tag_y, size, tag_height).to_box2d();
   5930            let tag_color = select_color(texture);
   5931            device.clear_target(
   5932                Some(tag_color),
   5933                None,
   5934                Some(draw_target.to_framebuffer_rect(tag_rect)),
   5935            );
   5936 
   5937            // Draw the dimensions onto the tag.
   5938            let dim = texture.get_dimensions();
   5939            let text_rect = tag_rect.inflate(-text_margin, -text_margin);
   5940            debug_renderer.add_text(
   5941                text_rect.min.x as f32,
   5942                text_rect.max.y as f32, // Top-relative.
   5943                &format!("{}x{}", dim.width, dim.height),
   5944                ColorU::new(0, 0, 0, 255),
   5945                Some(tag_rect.to_f32())
   5946            );
   5947 
   5948            // Blit the contents of the texture.
   5949            let dest_rect = draw_target.to_framebuffer_rect(rect(x, image_y, size, size).to_box2d());
   5950            let read_target = ReadTarget::from_texture(texture);
   5951 
   5952            if surface_origin_is_top_left {
   5953                device.blit_render_target(
   5954                    read_target,
   5955                    src_rect,
   5956                    *draw_target,
   5957                    dest_rect,
   5958                    TextureFilter::Linear,
   5959                );
   5960            } else {
   5961                 // Invert y.
   5962                 device.blit_render_target_invert_y(
   5963                    read_target,
   5964                    src_rect,
   5965                    *draw_target,
   5966                    dest_rect,
   5967                );
   5968            }
   5969            i += 1;
   5970        }
   5971    }
   5972 
   5973    fn draw_epoch_debug(&mut self) {
   5974        if !self.debug_flags.contains(DebugFlags::EPOCHS) {
   5975            return;
   5976        }
   5977 
   5978        let debug_renderer = match self.debug.get_mut(&mut self.device) {
   5979            Some(render) => render,
   5980            None => return,
   5981        };
   5982 
   5983        let dy = debug_renderer.line_height();
   5984        let x0: f32 = 30.0;
   5985        let y0: f32 = 30.0;
   5986        let mut y = y0;
   5987        let mut text_width = 0.0;
   5988        for ((pipeline, document_id), epoch) in  &self.pipeline_info.epochs {
   5989            y += dy;
   5990            let w = debug_renderer.add_text(
   5991                x0, y,
   5992                &format!("({:?}, {:?}): {:?}", pipeline, document_id, epoch),
   5993                ColorU::new(255, 255, 0, 255),
   5994                None,
   5995            ).size.width;
   5996            text_width = f32::max(text_width, w);
   5997        }
   5998 
   5999        let margin = 10.0;
   6000        debug_renderer.add_quad(
   6001            x0 - margin,
   6002            y0 - margin,
   6003            x0 + text_width + margin,
   6004            y + margin,
   6005            ColorU::new(25, 25, 25, 200),
   6006            ColorU::new(51, 51, 51, 200),
   6007        );
   6008    }
   6009 
   6010    fn draw_window_visibility_debug(&mut self) {
   6011        if !self.debug_flags.contains(DebugFlags::WINDOW_VISIBILITY_DBG) {
   6012            return;
   6013        }
   6014 
   6015        let debug_renderer = match self.debug.get_mut(&mut self.device) {
   6016            Some(render) => render,
   6017            None => return,
   6018        };
   6019 
   6020        let x: f32 = 30.0;
   6021        let y: f32 = 40.0;
   6022 
   6023        if let CompositorConfig::Native { ref mut compositor, .. } = self.compositor_config {
   6024            let visibility = compositor.get_window_visibility(&mut self.device);
   6025            let color = if visibility.is_fully_occluded {
   6026                ColorU::new(255, 0, 0, 255)
   6027 
   6028            } else {
   6029                ColorU::new(0, 0, 255, 255)
   6030            };
   6031 
   6032            debug_renderer.add_text(
   6033                x, y,
   6034                &format!("{:?}", visibility),
   6035                color,
   6036                None,
   6037            );
   6038        }
   6039 
   6040 
   6041    }
   6042 
   6043    fn draw_external_composite_borders_debug(&mut self) {
   6044        if !self.debug_flags.contains(DebugFlags::EXTERNAL_COMPOSITE_BORDERS) {
   6045            return;
   6046        }
   6047 
   6048        let debug_renderer = match self.debug.get_mut(&mut self.device) {
   6049            Some(render) => render,
   6050            None => return,
   6051        };
   6052 
   6053        for item in &self.external_composite_debug_items {
   6054            match item {
   6055                DebugItem::Rect { rect, outer_color, inner_color: _, thickness } => {
   6056                    if outer_color.a > 0.001 {
   6057                        debug_renderer.add_rect(
   6058                            &rect.to_i32(),
   6059                            *thickness,
   6060                            (*outer_color).into(),
   6061                        );
   6062                    }
   6063                }
   6064                DebugItem::Text { .. } => {}
   6065            }
   6066        }
   6067    }
   6068 
   6069    /// Pass-through to `Device::read_pixels_into`, used by Gecko's WR bindings.
   6070    pub fn read_pixels_into(&mut self, rect: FramebufferIntRect, format: ImageFormat, output: &mut [u8]) {
   6071        self.device.read_pixels_into(rect, format, output);
   6072    }
   6073 
   6074    pub fn read_pixels_rgba8(&mut self, rect: FramebufferIntRect) -> Vec<u8> {
   6075        let mut pixels = vec![0; (rect.area() * 4) as usize];
   6076        self.device.read_pixels_into(rect, ImageFormat::RGBA8, &mut pixels);
   6077        pixels
   6078    }
   6079 
   6080    // De-initialize the Renderer safely, assuming the GL is still alive and active.
   6081    pub fn deinit(mut self) {
   6082        //Note: this is a fake frame, only needed because texture deletion is require to happen inside a frame
   6083        self.device.begin_frame();
   6084        // If we are using a native compositor, ensure that any remaining native
   6085        // surfaces are freed.
   6086        if let CompositorConfig::Native { mut compositor, .. } = self.compositor_config {
   6087            for id in self.allocated_native_surfaces.drain() {
   6088                compositor.destroy_surface(&mut self.device, id);
   6089            }
   6090            // Destroy the debug overlay surface, if currently allocated.
   6091            if self.debug_overlay_state.current_size.is_some() {
   6092                compositor.destroy_surface(&mut self.device, NativeSurfaceId::DEBUG_OVERLAY);
   6093            }
   6094            compositor.deinit(&mut self.device);
   6095        }
   6096        if let Some(dither_matrix_texture) = self.dither_matrix_texture {
   6097            self.device.delete_texture(dither_matrix_texture);
   6098        }
   6099        if let Some(zoom_debug_texture) = self.zoom_debug_texture {
   6100            self.device.delete_texture(zoom_debug_texture);
   6101        }
   6102        if let Some(texture) = self.gpu_buffer_texture_f {
   6103            self.device.delete_texture(texture);
   6104        }
   6105        if let Some(texture) = self.gpu_buffer_texture_i {
   6106            self.device.delete_texture(texture);
   6107        }
   6108        for textures in self.vertex_data_textures.drain(..) {
   6109            textures.deinit(&mut self.device);
   6110        }
   6111        self.texture_upload_pbo_pool.deinit(&mut self.device);
   6112        self.staging_texture_pool.delete_textures(&mut self.device);
   6113        self.texture_resolver.deinit(&mut self.device);
   6114        self.vaos.deinit(&mut self.device);
   6115        self.debug.deinit(&mut self.device);
   6116 
   6117        if let Ok(shaders) = Rc::try_unwrap(self.shaders) {
   6118            shaders.into_inner().deinit(&mut self.device);
   6119        }
   6120 
   6121        if let Some(async_screenshots) = self.async_screenshots.take() {
   6122            async_screenshots.deinit(&mut self.device);
   6123        }
   6124 
   6125        if let Some(async_frame_recorder) = self.async_frame_recorder.take() {
   6126            async_frame_recorder.deinit(&mut self.device);
   6127        }
   6128 
   6129        #[cfg(feature = "capture")]
   6130        self.device.delete_fbo(self.read_fbo);
   6131        #[cfg(feature = "replay")]
   6132        for (_, ext) in self.owned_external_images {
   6133            self.device.delete_external_texture(ext);
   6134        }
   6135        self.device.end_frame();
   6136    }
   6137 
   6138    /// Collects a memory report.
   6139    pub fn report_memory(&self, swgl: *mut c_void) -> MemoryReport {
   6140        let mut report = MemoryReport::default();
   6141 
   6142        self.staging_texture_pool.report_memory_to(&mut report, self.size_of_ops.as_ref().unwrap());
   6143 
   6144        // Render task CPU memory.
   6145        for (_id, doc) in &self.active_documents {
   6146            let frame_alloc_stats = doc.frame.allocator_memory.get_stats();
   6147            report.frame_allocator += frame_alloc_stats.reserved_bytes;
   6148            report.render_tasks += doc.frame.render_tasks.report_memory();
   6149        }
   6150 
   6151        // Vertex data GPU memory.
   6152        for textures in &self.vertex_data_textures {
   6153            report.vertex_data_textures += textures.size_in_bytes();
   6154        }
   6155 
   6156        // Texture cache and render target GPU memory.
   6157        report += self.texture_resolver.report_memory();
   6158 
   6159        // Texture upload PBO memory.
   6160        report += self.texture_upload_pbo_pool.report_memory();
   6161 
   6162        // Textures held internally within the device layer.
   6163        report += self.device.report_memory(self.size_of_ops.as_ref().unwrap(), swgl);
   6164 
   6165        report
   6166    }
   6167 
   6168    // Sets the blend mode. Blend is unconditionally set if the "show overdraw" debugging mode is
   6169    // enabled.
   6170    fn set_blend(&mut self, mut blend: bool, framebuffer_kind: FramebufferKind) {
   6171        if framebuffer_kind == FramebufferKind::Main &&
   6172                self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) {
   6173            blend = true
   6174        }
   6175        self.device.set_blend(blend)
   6176    }
   6177 
   6178    fn set_blend_mode_multiply(&mut self, framebuffer_kind: FramebufferKind) {
   6179        if framebuffer_kind == FramebufferKind::Main &&
   6180                self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) {
   6181            self.device.set_blend_mode_show_overdraw();
   6182        } else {
   6183            self.device.set_blend_mode_multiply();
   6184        }
   6185    }
   6186 
   6187    fn set_blend_mode_premultiplied_alpha(&mut self, framebuffer_kind: FramebufferKind) {
   6188        if framebuffer_kind == FramebufferKind::Main &&
   6189                self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) {
   6190            self.device.set_blend_mode_show_overdraw();
   6191        } else {
   6192            self.device.set_blend_mode_premultiplied_alpha();
   6193        }
   6194    }
   6195 
   6196    /// Clears the texture with a given color.
   6197    fn clear_texture(&mut self, texture: &Texture, color: [f32; 4]) {
   6198        self.device.bind_draw_target(DrawTarget::from_texture(
   6199            &texture,
   6200            false,
   6201        ));
   6202        self.device.clear_target(Some(color), None, None);
   6203    }
   6204 }
   6205 
   6206 bitflags! {
   6207    /// Flags that control how shaders are pre-cached, if at all.
   6208    #[derive(Default, Debug, Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)]
   6209    pub struct ShaderPrecacheFlags: u32 {
   6210        /// Needed for const initialization
   6211        const EMPTY                 = 0;
   6212 
   6213        /// Only start async compile
   6214        const ASYNC_COMPILE         = 1 << 2;
   6215 
   6216        /// Do a full compile/link during startup
   6217        const FULL_COMPILE          = 1 << 3;
   6218    }
   6219 }
   6220 
   6221 /// The cumulative times spent in each painting phase to generate this frame.
   6222 #[derive(Debug, Default)]
   6223 pub struct FullFrameStats {
   6224    pub full_display_list: bool,
   6225    pub gecko_display_list_time: f64,
   6226    pub wr_display_list_time: f64,
   6227    pub scene_build_time: f64,
   6228    pub frame_build_time: f64,
   6229 }
   6230 
   6231 impl FullFrameStats {
   6232    pub fn merge(&self, other: &FullFrameStats) -> Self {
   6233        Self {
   6234            full_display_list: self.full_display_list || other.full_display_list,
   6235            gecko_display_list_time: self.gecko_display_list_time + other.gecko_display_list_time,
   6236            wr_display_list_time: self.wr_display_list_time + other.wr_display_list_time,
   6237            scene_build_time: self.scene_build_time + other.scene_build_time,
   6238            frame_build_time: self.frame_build_time + other.frame_build_time
   6239        }
   6240    }
   6241 
   6242    pub fn total(&self) -> f64 {
   6243      self.gecko_display_list_time + self.wr_display_list_time + self.scene_build_time + self.frame_build_time
   6244    }
   6245 }
   6246 
   6247 /// Some basic statistics about the rendered scene, used in Gecko, as
   6248 /// well as in wrench reftests to ensure that tests are batching and/or
   6249 /// allocating on render targets as we expect them to.
   6250 #[repr(C)]
   6251 #[derive(Debug, Default)]
   6252 pub struct RendererStats {
   6253    pub total_draw_calls: usize,
   6254    pub alpha_target_count: usize,
   6255    pub color_target_count: usize,
   6256    pub texture_upload_mb: f64,
   6257    pub resource_upload_time: f64,
   6258    pub gecko_display_list_time: f64,
   6259    pub wr_display_list_time: f64,
   6260    pub scene_build_time: f64,
   6261    pub frame_build_time: f64,
   6262    pub full_display_list: bool,
   6263    pub full_paint: bool,
   6264 }
   6265 
   6266 impl RendererStats {
   6267    pub fn merge(&mut self, stats: &FullFrameStats) {
   6268        self.gecko_display_list_time = stats.gecko_display_list_time;
   6269        self.wr_display_list_time = stats.wr_display_list_time;
   6270        self.scene_build_time = stats.scene_build_time;
   6271        self.frame_build_time = stats.frame_build_time;
   6272        self.full_display_list = stats.full_display_list;
   6273        self.full_paint = true;
   6274    }
   6275 }
   6276 
   6277 /// Return type from render(), which contains some repr(C) statistics as well as
   6278 /// some non-repr(C) data.
   6279 #[derive(Debug, Default)]
   6280 pub struct RenderResults {
   6281    /// Statistics about the frame that was rendered.
   6282    pub stats: RendererStats,
   6283 
   6284    /// A list of the device dirty rects that were updated
   6285    /// this frame.
   6286    /// TODO(gw): This is an initial interface, likely to change in future.
   6287    /// TODO(gw): The dirty rects here are currently only useful when scrolling
   6288    ///           is not occurring. They are still correct in the case of
   6289    ///           scrolling, but will be very large (until we expose proper
   6290    ///           OS compositor support where the dirty rects apply to a
   6291    ///           specific picture cache slice / OS compositor surface).
   6292    pub dirty_rects: Vec<DeviceIntRect>,
   6293 
   6294    /// Information about the state of picture cache tiles. This is only
   6295    /// allocated and stored if config.testing is true (such as wrench)
   6296    pub picture_cache_debug: PictureCacheDebugInfo,
   6297 }
   6298 
   6299 #[cfg(any(feature = "capture", feature = "replay"))]
   6300 #[cfg_attr(feature = "capture", derive(Serialize))]
   6301 #[cfg_attr(feature = "replay", derive(Deserialize))]
   6302 struct PlainTexture {
   6303    data: String,
   6304    size: DeviceIntSize,
   6305    format: ImageFormat,
   6306    filter: TextureFilter,
   6307    has_depth: bool,
   6308    category: Option<TextureCacheCategory>,
   6309 }
   6310 
   6311 
   6312 #[cfg(any(feature = "capture", feature = "replay"))]
   6313 #[cfg_attr(feature = "capture", derive(Serialize))]
   6314 #[cfg_attr(feature = "replay", derive(Deserialize))]
   6315 struct PlainRenderer {
   6316    device_size: Option<DeviceIntSize>,
   6317    textures: FastHashMap<CacheTextureId, PlainTexture>,
   6318 }
   6319 
   6320 #[cfg(any(feature = "capture", feature = "replay"))]
   6321 #[cfg_attr(feature = "capture", derive(Serialize))]
   6322 #[cfg_attr(feature = "replay", derive(Deserialize))]
   6323 struct PlainExternalResources {
   6324    images: Vec<ExternalCaptureImage>
   6325 }
   6326 
   6327 #[cfg(feature = "replay")]
   6328 enum CapturedExternalImageData {
   6329    NativeTexture(gl::GLuint),
   6330    Buffer(Arc<Vec<u8>>),
   6331 }
   6332 
   6333 #[cfg(feature = "replay")]
   6334 struct DummyExternalImageHandler {
   6335    data: FastHashMap<(ExternalImageId, u8), (CapturedExternalImageData, TexelRect)>,
   6336 }
   6337 
   6338 #[cfg(feature = "replay")]
   6339 impl ExternalImageHandler for DummyExternalImageHandler {
   6340    fn lock(&mut self, key: ExternalImageId, channel_index: u8, _is_composited: bool) -> ExternalImage {
   6341        let (ref captured_data, ref uv) = self.data[&(key, channel_index)];
   6342        ExternalImage {
   6343            uv: *uv,
   6344            source: match *captured_data {
   6345                CapturedExternalImageData::NativeTexture(tid) => ExternalImageSource::NativeTexture(tid),
   6346                CapturedExternalImageData::Buffer(ref arc) => ExternalImageSource::RawData(&*arc),
   6347            }
   6348        }
   6349    }
   6350    fn unlock(&mut self, _key: ExternalImageId, _channel_index: u8) {}
   6351 }
   6352 
   6353 #[derive(Default)]
   6354 pub struct PipelineInfo {
   6355    pub epochs: FastHashMap<(PipelineId, DocumentId), Epoch>,
   6356    pub removed_pipelines: Vec<(PipelineId, DocumentId)>,
   6357 }
   6358 
   6359 impl Renderer {
   6360    #[cfg(feature = "capture")]
   6361    fn save_texture(
   6362        texture: &Texture, category: Option<TextureCacheCategory>, name: &str, root: &PathBuf, device: &mut Device
   6363    ) -> PlainTexture {
   6364        use std::fs;
   6365        use std::io::Write;
   6366 
   6367        let short_path = format!("textures/{}.raw", name);
   6368 
   6369        let bytes_per_pixel = texture.get_format().bytes_per_pixel();
   6370        let read_format = texture.get_format();
   6371        let rect_size = texture.get_dimensions();
   6372 
   6373        let mut file = fs::File::create(root.join(&short_path))
   6374            .expect(&format!("Unable to create {}", short_path));
   6375        let bytes_per_texture = (rect_size.width * rect_size.height * bytes_per_pixel) as usize;
   6376        let mut data = vec![0; bytes_per_texture];
   6377 
   6378        //TODO: instead of reading from an FBO with `read_pixels*`, we could
   6379        // read from textures directly with `get_tex_image*`.
   6380 
   6381        let rect = device_size_as_framebuffer_size(rect_size).into();
   6382 
   6383        device.attach_read_texture(texture);
   6384        #[cfg(feature = "png")]
   6385        {
   6386            let mut png_data;
   6387            let (data_ref, format) = match texture.get_format() {
   6388                ImageFormat::RGBAF32 => {
   6389                    png_data = vec![0; (rect_size.width * rect_size.height * 4) as usize];
   6390                    device.read_pixels_into(rect, ImageFormat::RGBA8, &mut png_data);
   6391                    (&png_data, ImageFormat::RGBA8)
   6392                }
   6393                fm => (&data, fm),
   6394            };
   6395            CaptureConfig::save_png(
   6396                root.join(format!("textures/{}-{}.png", name, 0)),
   6397                rect_size, format,
   6398                None,
   6399                data_ref,
   6400            );
   6401        }
   6402        device.read_pixels_into(rect, read_format, &mut data);
   6403        file.write_all(&data)
   6404            .unwrap();
   6405 
   6406        PlainTexture {
   6407            data: short_path,
   6408            size: rect_size,
   6409            format: texture.get_format(),
   6410            filter: texture.get_filter(),
   6411            has_depth: texture.supports_depth(),
   6412            category,
   6413        }
   6414    }
   6415 
   6416    #[cfg(feature = "replay")]
   6417    fn load_texture(
   6418        target: ImageBufferKind,
   6419        plain: &PlainTexture,
   6420        rt_info: Option<RenderTargetInfo>,
   6421        root: &PathBuf,
   6422        device: &mut Device
   6423    ) -> (Texture, Vec<u8>)
   6424    {
   6425        use std::fs::File;
   6426        use std::io::Read;
   6427 
   6428        let mut texels = Vec::new();
   6429        File::open(root.join(&plain.data))
   6430            .expect(&format!("Unable to open texture at {}", plain.data))
   6431            .read_to_end(&mut texels)
   6432            .unwrap();
   6433 
   6434        let texture = device.create_texture(
   6435            target,
   6436            plain.format,
   6437            plain.size.width,
   6438            plain.size.height,
   6439            plain.filter,
   6440            rt_info,
   6441        );
   6442        device.upload_texture_immediate(&texture, &texels);
   6443 
   6444        (texture, texels)
   6445    }
   6446 
   6447    #[cfg(feature = "capture")]
   6448    fn save_capture(
   6449        &mut self,
   6450        config: CaptureConfig,
   6451        deferred_images: Vec<ExternalCaptureImage>,
   6452    ) {
   6453        use std::fs;
   6454        use std::io::Write;
   6455        use api::ExternalImageData;
   6456        use crate::render_api::CaptureBits;
   6457 
   6458        let root = config.resource_root();
   6459 
   6460        self.device.begin_frame();
   6461        let _gm = self.gpu_profiler.start_marker("read GPU data");
   6462        self.device.bind_read_target_impl(self.read_fbo, DeviceIntPoint::zero());
   6463 
   6464        if config.bits.contains(CaptureBits::EXTERNAL_RESOURCES) && !deferred_images.is_empty() {
   6465            info!("saving external images");
   6466            let mut arc_map = FastHashMap::<*const u8, String>::default();
   6467            let mut tex_map = FastHashMap::<u32, String>::default();
   6468            let handler = self.external_image_handler
   6469                .as_mut()
   6470                .expect("Unable to lock the external image handler!");
   6471            for def in &deferred_images {
   6472                info!("\t{}", def.short_path);
   6473                let ExternalImageData { id, channel_index, image_type, .. } = def.external;
   6474                // The image rendering parameter is irrelevant because no filtering happens during capturing.
   6475                let ext_image = handler.lock(id, channel_index, false);
   6476                let (data, short_path) = match ext_image.source {
   6477                    ExternalImageSource::RawData(data) => {
   6478                        let arc_id = arc_map.len() + 1;
   6479                        match arc_map.entry(data.as_ptr()) {
   6480                            Entry::Occupied(e) => {
   6481                                (None, e.get().clone())
   6482                            }
   6483                            Entry::Vacant(e) => {
   6484                                let short_path = format!("externals/d{}.raw", arc_id);
   6485                                (Some(data.to_vec()), e.insert(short_path).clone())
   6486                            }
   6487                        }
   6488                    }
   6489                    ExternalImageSource::NativeTexture(gl_id) => {
   6490                        let tex_id = tex_map.len() + 1;
   6491                        match tex_map.entry(gl_id) {
   6492                            Entry::Occupied(e) => {
   6493                                (None, e.get().clone())
   6494                            }
   6495                            Entry::Vacant(e) => {
   6496                                let target = match image_type {
   6497                                    ExternalImageType::TextureHandle(target) => target,
   6498                                    ExternalImageType::Buffer => unreachable!(),
   6499                                };
   6500                                info!("\t\tnative texture of target {:?}", target);
   6501                                self.device.attach_read_texture_external(gl_id, target);
   6502                                let data = self.device.read_pixels(&def.descriptor);
   6503                                let short_path = format!("externals/t{}.raw", tex_id);
   6504                                (Some(data), e.insert(short_path).clone())
   6505                            }
   6506                        }
   6507                    }
   6508                    ExternalImageSource::Invalid => {
   6509                        info!("\t\tinvalid source!");
   6510                        (None, String::new())
   6511                    }
   6512                };
   6513                if let Some(bytes) = data {
   6514                    fs::File::create(root.join(&short_path))
   6515                        .expect(&format!("Unable to create {}", short_path))
   6516                        .write_all(&bytes)
   6517                        .unwrap();
   6518                    #[cfg(feature = "png")]
   6519                    CaptureConfig::save_png(
   6520                        root.join(&short_path).with_extension("png"),
   6521                        def.descriptor.size,
   6522                        def.descriptor.format,
   6523                        def.descriptor.stride,
   6524                        &bytes,
   6525                    );
   6526                }
   6527                let plain = PlainExternalImage {
   6528                    data: short_path,
   6529                    external: def.external,
   6530                    uv: ext_image.uv,
   6531                };
   6532                config.serialize_for_resource(&plain, &def.short_path);
   6533            }
   6534            for def in &deferred_images {
   6535                handler.unlock(def.external.id, def.external.channel_index);
   6536            }
   6537            let plain_external = PlainExternalResources {
   6538                images: deferred_images,
   6539            };
   6540            config.serialize_for_resource(&plain_external, "external_resources");
   6541        }
   6542 
   6543        if config.bits.contains(CaptureBits::FRAME) {
   6544            let path_textures = root.join("textures");
   6545            if !path_textures.is_dir() {
   6546                fs::create_dir(&path_textures).unwrap();
   6547            }
   6548 
   6549            let mut plain_self = PlainRenderer {
   6550                device_size: self.device_size,
   6551                textures: FastHashMap::default(),
   6552            };
   6553 
   6554            info!("saving cached textures");
   6555            for (id, item) in &self.texture_resolver.texture_cache_map {
   6556                let file_name = format!("cache-{}", plain_self.textures.len() + 1);
   6557                info!("\t{}", file_name);
   6558                let plain = Self::save_texture(&item.texture, Some(item.category), &file_name, &root, &mut self.device);
   6559                plain_self.textures.insert(*id, plain);
   6560            }
   6561 
   6562            config.serialize_for_resource(&plain_self, "renderer");
   6563        }
   6564 
   6565        self.device.reset_read_target();
   6566        self.device.end_frame();
   6567 
   6568        let mut stats_file = fs::File::create(config.root.join("profiler-stats.txt"))
   6569            .expect(&format!("Unable to create profiler-stats.txt"));
   6570        if self.debug_flags.intersects(DebugFlags::PROFILER_DBG | DebugFlags::PROFILER_CAPTURE) {
   6571            self.profiler.dump_stats(&mut stats_file).unwrap();
   6572        } else {
   6573            writeln!(stats_file, "Turn on PROFILER_DBG or PROFILER_CAPTURE to get stats here!").unwrap();
   6574        }
   6575 
   6576        info!("done.");
   6577    }
   6578 
   6579    #[cfg(feature = "replay")]
   6580    fn load_capture(
   6581        &mut self,
   6582        config: CaptureConfig,
   6583        plain_externals: Vec<PlainExternalImage>,
   6584    ) {
   6585        use std::{fs::File, io::Read};
   6586 
   6587        info!("loading external buffer-backed images");
   6588        assert!(self.texture_resolver.external_images.is_empty());
   6589        let mut raw_map = FastHashMap::<String, Arc<Vec<u8>>>::default();
   6590        let mut image_handler = DummyExternalImageHandler {
   6591            data: FastHashMap::default(),
   6592        };
   6593 
   6594        let root = config.resource_root();
   6595 
   6596        // Note: this is a `SCENE` level population of the external image handlers
   6597        // It would put both external buffers and texture into the map.
   6598        // But latter are going to be overwritten later in this function
   6599        // if we are in the `FRAME` level.
   6600        for plain_ext in plain_externals {
   6601            let data = match raw_map.entry(plain_ext.data) {
   6602                Entry::Occupied(e) => e.get().clone(),
   6603                Entry::Vacant(e) => {
   6604                    let mut buffer = Vec::new();
   6605                    File::open(root.join(e.key()))
   6606                        .expect(&format!("Unable to open {}", e.key()))
   6607                        .read_to_end(&mut buffer)
   6608                        .unwrap();
   6609                    e.insert(Arc::new(buffer)).clone()
   6610                }
   6611            };
   6612            let ext = plain_ext.external;
   6613            let value = (CapturedExternalImageData::Buffer(data), plain_ext.uv);
   6614            image_handler.data.insert((ext.id, ext.channel_index), value);
   6615        }
   6616 
   6617        if let Some(external_resources) = config.deserialize_for_resource::<PlainExternalResources, _>("external_resources") {
   6618            info!("loading external texture-backed images");
   6619            let mut native_map = FastHashMap::<String, gl::GLuint>::default();
   6620            for ExternalCaptureImage { short_path, external, descriptor } in external_resources.images {
   6621                let target = match external.image_type {
   6622                    ExternalImageType::TextureHandle(target) => target,
   6623                    ExternalImageType::Buffer => continue,
   6624                };
   6625                let plain_ext = config.deserialize_for_resource::<PlainExternalImage, _>(&short_path)
   6626                    .expect(&format!("Unable to read {}.ron", short_path));
   6627                let key = (external.id, external.channel_index);
   6628 
   6629                let tid = match native_map.entry(plain_ext.data) {
   6630                    Entry::Occupied(e) => e.get().clone(),
   6631                    Entry::Vacant(e) => {
   6632                        let plain_tex = PlainTexture {
   6633                            data: e.key().clone(),
   6634                            size: descriptor.size,
   6635                            format: descriptor.format,
   6636                            filter: TextureFilter::Linear,
   6637                            has_depth: false,
   6638                            category: None,
   6639                        };
   6640                        let t = Self::load_texture(
   6641                            target,
   6642                            &plain_tex,
   6643                            None,
   6644                            &root,
   6645                            &mut self.device
   6646                        );
   6647                        let extex = t.0.into_external();
   6648                        self.owned_external_images.insert(key, extex.clone());
   6649                        e.insert(extex.internal_id()).clone()
   6650                    }
   6651                };
   6652 
   6653                let value = (CapturedExternalImageData::NativeTexture(tid), plain_ext.uv);
   6654                image_handler.data.insert(key, value);
   6655            }
   6656        }
   6657 
   6658        self.device.begin_frame();
   6659 
   6660        if let Some(renderer) = config.deserialize_for_resource::<PlainRenderer, _>("renderer") {
   6661            info!("loading cached textures");
   6662            self.device_size = renderer.device_size;
   6663 
   6664            for (_id, item) in self.texture_resolver.texture_cache_map.drain() {
   6665                self.device.delete_texture(item.texture);
   6666            }
   6667            for (id, texture) in renderer.textures {
   6668                info!("\t{}", texture.data);
   6669                let target = ImageBufferKind::Texture2D;
   6670                let t = Self::load_texture(
   6671                    target,
   6672                    &texture,
   6673                    Some(RenderTargetInfo { has_depth: texture.has_depth }),
   6674                    &root,
   6675                    &mut self.device
   6676                );
   6677                self.texture_resolver.texture_cache_map.insert(id, CacheTexture {
   6678                    texture: t.0,
   6679                    category: texture.category.unwrap_or(TextureCacheCategory::Standalone),
   6680                });
   6681            }
   6682        } else {
   6683            info!("loading cached textures");
   6684            self.device.begin_frame();
   6685            for (_id, item) in self.texture_resolver.texture_cache_map.drain() {
   6686                self.device.delete_texture(item.texture);
   6687            }
   6688        }
   6689        self.device.end_frame();
   6690 
   6691        self.external_image_handler = Some(Box::new(image_handler) as Box<_>);
   6692        info!("done.");
   6693    }
   6694 }
   6695 
   6696 #[derive(Clone, Copy, PartialEq)]
   6697 enum FramebufferKind {
   6698    Main,
   6699    Other,
   6700 }
   6701 
   6702 fn should_skip_batch(kind: &BatchKind, flags: DebugFlags) -> bool {
   6703    match kind {
   6704        BatchKind::TextRun(_) => {
   6705            flags.contains(DebugFlags::DISABLE_TEXT_PRIMS)
   6706        }
   6707        BatchKind::Brush(BrushBatchKind::LinearGradient) => {
   6708            flags.contains(DebugFlags::DISABLE_GRADIENT_PRIMS)
   6709        }
   6710        _ => false,
   6711    }
   6712 }
   6713 
   6714 impl CompositeState {
   6715    /// Use the client provided native compositor interface to add all picture
   6716    /// cache tiles to the OS compositor
   6717    fn composite_native(
   6718        &self,
   6719        clear_color: ColorF,
   6720        dirty_rects: &[DeviceIntRect],
   6721        device: &mut Device,
   6722        compositor: &mut dyn Compositor,
   6723    ) {
   6724        // Add each surface to the visual tree. z-order is implicit based on
   6725        // order added. Offset and clip rect apply to all tiles within this
   6726        // surface.
   6727        for surface in &self.descriptor.surfaces {
   6728            compositor.add_surface(
   6729                device,
   6730                surface.surface_id.expect("bug: no native surface allocated"),
   6731                surface.transform,
   6732                surface.clip_rect.to_i32(),
   6733                surface.image_rendering,
   6734                surface.rounded_clip_rect.to_i32(),
   6735                surface.rounded_clip_radii,
   6736            );
   6737        }
   6738        compositor.start_compositing(device, clear_color, dirty_rects, &[]);
   6739    }
   6740 }
   6741 
   6742 mod tests {
   6743    #[test]
   6744    fn test_buffer_damage_tracker() {
   6745        use super::BufferDamageTracker;
   6746        use api::units::{DevicePoint, DeviceRect, DeviceSize};
   6747 
   6748        let mut tracker = BufferDamageTracker::default();
   6749        assert_eq!(tracker.get_damage_rect(0), None);
   6750        assert_eq!(tracker.get_damage_rect(1), Some(DeviceRect::zero()));
   6751        assert_eq!(tracker.get_damage_rect(2), Some(DeviceRect::zero()));
   6752        assert_eq!(tracker.get_damage_rect(3), Some(DeviceRect::zero()));
   6753 
   6754        let damage1 = DeviceRect::from_origin_and_size(DevicePoint::new(10.0, 10.0), DeviceSize::new(10.0, 10.0));
   6755        let damage2 = DeviceRect::from_origin_and_size(DevicePoint::new(20.0, 20.0), DeviceSize::new(10.0, 10.0));
   6756        let combined = damage1.union(&damage2);
   6757 
   6758        tracker.push_dirty_rect(&damage1);
   6759        assert_eq!(tracker.get_damage_rect(0), None);
   6760        assert_eq!(tracker.get_damage_rect(1), Some(DeviceRect::zero()));
   6761        assert_eq!(tracker.get_damage_rect(2), Some(damage1));
   6762        assert_eq!(tracker.get_damage_rect(3), Some(damage1));
   6763 
   6764        tracker.push_dirty_rect(&damage2);
   6765        assert_eq!(tracker.get_damage_rect(0), None);
   6766        assert_eq!(tracker.get_damage_rect(1), Some(DeviceRect::zero()));
   6767        assert_eq!(tracker.get_damage_rect(2), Some(damage2));
   6768        assert_eq!(tracker.get_damage_rect(3), Some(combined));
   6769    }
   6770 }