tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

sw_compositor.rs (85056B)


      1 /* This Source Code Form is subject to the terms of the Mozilla Public
      2 * License, v. 2.0. If a copy of the MPL was not distributed with this
      3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      4 
      5 use gleam::{gl, gl::Gl};
      6 use std::cell::{Cell, UnsafeCell};
      7 use std::collections::{hash_map::HashMap, VecDeque};
      8 use std::ops::{Deref, DerefMut, Range};
      9 use std::ptr;
     10 use std::sync::atomic::{AtomicBool, AtomicI8, AtomicPtr, AtomicU32, AtomicU8, Ordering};
     11 use std::sync::{Arc, Condvar, Mutex, MutexGuard};
     12 use std::thread;
     13 use crate::{
     14    api::units::*, api::ColorDepth, api::ColorF, api::ExternalImageId, api::ImageRendering, api::YuvRangedColorSpace,
     15    Compositor, CompositorCapabilities, CompositorSurfaceTransform, NativeSurfaceId, NativeSurfaceInfo, NativeTileId,
     16    profiler, MappableCompositor, SWGLCompositeSurfaceInfo, WindowVisibility,
     17    device::Device, ClipRadius
     18 };
     19 
     20 // Size (in pixels) of the indirection buffer used for applying rounded rect alpha masks as required
     21 const INDIRECT_BUFFER_WIDTH: i32 = 64;
     22 const INDIRECT_BUFFER_HEIGHT: i32 = 64;
     23 
     24 // A rounded rect clip in device-space
     25 #[derive(Debug, Copy, Clone)]
     26 struct RoundedClip {
     27    rect: DeviceIntRect,
     28    radii: ClipRadius,
     29 }
     30 
     31 impl RoundedClip {
     32    // Construct an empty clip
     33    fn zero() -> Self {
     34        RoundedClip {
     35            rect: DeviceIntRect::zero(),
     36            radii: ClipRadius::EMPTY,
     37        }
     38    }
     39 
     40    // Returns true if this clip has any non-zero corners
     41    fn is_valid(&self) -> bool {
     42        self.radii != ClipRadius::EMPTY
     43    }
     44 
     45    // Returns true if a given rect in device space is affected by this clip
     46    fn affects_rect(&self, rect: &DeviceIntRect) -> bool {
     47        // If there are no non-zero rounded corners, no clip needed
     48        if !self.is_valid() {
     49            return false;
     50        }
     51 
     52        // Check if any corners where the mask exists are affected by the clip
     53        let rect_tl = DeviceIntRect::from_origin_and_size(
     54            self.rect.min,
     55            DeviceIntSize::new(self.radii.top_left, self.radii.top_left),
     56        );
     57        if rect_tl.intersects(rect) {
     58            return true;
     59        }
     60 
     61        let rect_tr = DeviceIntRect::from_origin_and_size(
     62            DeviceIntPoint::new(
     63                self.rect.max.x - self.radii.top_right,
     64                self.rect.min.y,
     65            ),
     66            DeviceIntSize::new(self.radii.top_right, self.radii.top_right),
     67        );
     68        if rect_tr.intersects(rect) {
     69            return true;
     70        }
     71 
     72        let rect_br = DeviceIntRect::from_origin_and_size(
     73            DeviceIntPoint::new(
     74                self.rect.max.x - self.radii.bottom_right,
     75                self.rect.max.y - self.radii.bottom_right,
     76            ),
     77            DeviceIntSize::new(self.radii.bottom_right, self.radii.bottom_right),
     78        );
     79        if rect_br.intersects(rect) {
     80            return true;
     81        }
     82 
     83        let rect_bl = DeviceIntRect::from_origin_and_size(
     84            DeviceIntPoint::new(
     85                self.rect.min.x,
     86                self.rect.max.y - self.radii.bottom_left,
     87            ),
     88            DeviceIntSize::new(self.radii.bottom_left, self.radii.bottom_left),
     89        );
     90        if rect_bl.intersects(rect) {
     91            return true;
     92        }
     93 
     94        // TODO(gw): If a clip is inside the bounds of the surface, this will fail to
     95        //           detect that case. It doesn't happen in existing scenarios.
     96 
     97        false
     98    }
     99 }
    100 
    101 // Persistent context that is stored per-thread, and available for use by composite
    102 // jobs as required.
    103 pub struct SwCompositeJobContext {
    104    // Fixed size R8 texture that can be used to write an alpha mask to
    105    mask: swgl::LockedResource,
    106    // Fixed size RGBA8 texture that can be used as a temporary indirection buffer
    107    indirect: swgl::LockedResource,
    108 }
    109 
    110 // Persistent context for mask and indirection buffers, one per thread
    111 pub struct SwCompositeContext {
    112    // Context for the jobs run on the main thread
    113    main: SwCompositeJobContext,
    114    // Context for the jobs run on the composite thread
    115    thread: SwCompositeJobContext,
    116 }
    117 
    118 impl SwCompositeContext {
    119    fn new(gl: &swgl::Context) -> Self {
    120        SwCompositeContext {
    121            main: SwCompositeJobContext::new(gl),
    122            thread: SwCompositeJobContext::new(gl),
    123        }
    124    }
    125 
    126    fn get_job_context(&self, is_composite_thread: bool) -> &SwCompositeJobContext {
    127        if is_composite_thread {
    128            &self.thread
    129        } else {
    130            &self.main
    131        }
    132    }
    133 }
    134 
    135 impl SwCompositeJobContext {
    136    // Construct a new per-thread context for sw composite jobs
    137    fn new(gl: &swgl::Context) -> Self {
    138        let texture_ids = gl.gen_textures(2);
    139        let indirect_id = texture_ids[0];
    140        let mask_id = texture_ids[1];
    141 
    142        gl.set_texture_buffer(
    143            indirect_id,
    144            gl::RGBA8,
    145            INDIRECT_BUFFER_WIDTH,
    146            INDIRECT_BUFFER_HEIGHT,
    147            0,
    148            ptr::null_mut(),
    149            INDIRECT_BUFFER_WIDTH,
    150            INDIRECT_BUFFER_HEIGHT,
    151        );
    152 
    153        gl.set_texture_buffer(
    154            mask_id,
    155            gl::R8,
    156            INDIRECT_BUFFER_WIDTH,
    157            INDIRECT_BUFFER_HEIGHT,
    158            0,
    159            ptr::null_mut(),
    160            INDIRECT_BUFFER_WIDTH,
    161            INDIRECT_BUFFER_HEIGHT,
    162        );
    163 
    164        let indirect = gl.lock_texture(indirect_id).expect("bug: unable to lock indirect");
    165        let mask = gl.lock_texture(mask_id).expect("bug: unable to lock mask");
    166 
    167        SwCompositeJobContext {
    168            indirect,
    169            mask,
    170        }
    171    }
    172 }
    173 
    174 pub struct SwTile {
    175    x: i32,
    176    y: i32,
    177    fbo_id: u32,
    178    color_id: u32,
    179    valid_rect: DeviceIntRect,
    180    /// Composition of tiles must be ordered such that any tiles that may overlap
    181    /// an invalidated tile in an earlier surface only get drawn after that tile
    182    /// is actually updated. We store a count of the number of overlapping invalid
    183    /// here, that gets decremented when the invalid tiles are finally updated so
    184    /// that we know when it is finally safe to draw. Must use a Cell as we might
    185    /// be analyzing multiple tiles and surfaces
    186    overlaps: Cell<u32>,
    187    /// Whether the tile's contents has been invalidated
    188    invalid: Cell<bool>,
    189    /// Graph node for job dependencies of this tile
    190    graph_node: SwCompositeGraphNodeRef,
    191 }
    192 
    193 impl SwTile {
    194    fn new(x: i32, y: i32) -> Self {
    195        SwTile {
    196            x,
    197            y,
    198            fbo_id: 0,
    199            color_id: 0,
    200            valid_rect: DeviceIntRect::zero(),
    201            overlaps: Cell::new(0),
    202            invalid: Cell::new(false),
    203            graph_node: SwCompositeGraphNode::new(),
    204        }
    205    }
    206 
    207    /// The offset of the tile in the local space of the surface before any
    208    /// transform is applied.
    209    fn origin(&self, surface: &SwSurface) -> DeviceIntPoint {
    210        DeviceIntPoint::new(self.x * surface.tile_size.width, self.y * surface.tile_size.height)
    211    }
    212 
    213    /// The offset valid rect positioned within the local space of the surface
    214    /// before any transform is applied.
    215    fn local_bounds(&self, surface: &SwSurface) -> DeviceIntRect {
    216        self.valid_rect.translate(self.origin(surface).to_vector())
    217    }
    218 
    219    /// Bounds used for determining overlap dependencies. This may either be the
    220    /// full tile bounds or the actual valid rect, depending on whether the tile
    221    /// is invalidated this frame. These bounds are more conservative as such and
    222    /// may differ from the precise bounds used to actually composite the tile.
    223    fn overlap_rect(
    224        &self,
    225        surface: &SwSurface,
    226        transform: &CompositorSurfaceTransform,
    227        clip_rect: &DeviceIntRect,
    228    ) -> Option<DeviceIntRect> {
    229        let bounds = self.local_bounds(surface);
    230        let device_rect = transform.map_rect(&bounds.to_f32()).round_out();
    231        Some(device_rect.intersection(&clip_rect.to_f32())?.to_i32())
    232    }
    233 
    234    /// Determine if the tile's bounds may overlap the dependency rect if it were
    235    /// to be composited at the given position.
    236    fn may_overlap(
    237        &self,
    238        surface: &SwSurface,
    239        transform: &CompositorSurfaceTransform,
    240        clip_rect: &DeviceIntRect,
    241        dep_rect: &DeviceIntRect,
    242    ) -> bool {
    243        self.overlap_rect(surface, transform, clip_rect)
    244            .map_or(false, |r| r.intersects(dep_rect))
    245    }
    246 
    247    /// Get valid source and destination rectangles for composition of the tile
    248    /// within a surface, bounded by the clipping rectangle. May return None if
    249    /// it falls outside of the clip rect.
    250    fn composite_rects(
    251        &self,
    252        surface: &SwSurface,
    253        transform: &CompositorSurfaceTransform,
    254        clip_rect: &DeviceIntRect,
    255    ) -> Option<(DeviceIntRect, DeviceIntRect, bool, bool)> {
    256        // Offset the valid rect to the appropriate surface origin.
    257        let valid = self.local_bounds(surface);
    258        // The destination rect is the valid rect transformed and then clipped.
    259        let dest_rect = transform.map_rect(&valid.to_f32()).round_out();
    260        if !dest_rect.intersects(&clip_rect.to_f32()) {
    261            return None;
    262        }
    263        // To get a valid source rect, we need to inverse transform the clipped destination rect to find out the effect
    264        // of the clip rect in source-space. After this, we subtract off the source-space valid rect origin to get
    265        // a source rect that is now relative to the surface origin rather than absolute.
    266        let inv_transform = transform.inverse();
    267        let src_rect = inv_transform
    268            .map_rect(&dest_rect)
    269            .round()
    270            .translate(-valid.min.to_vector().to_f32());
    271        // Ensure source and dest rects when transformed from Box2D to Rect formats will still fit in an i32.
    272        // If p0=i32::MIN and p1=i32::MAX, then evaluating the size with p1-p0 will overflow an i32 and not
    273        // be representable. 
    274        if src_rect.size().try_cast::<i32>().is_none() ||
    275           dest_rect.size().try_cast::<i32>().is_none() {
    276            return None;
    277        }
    278        let flip_x = transform.scale.x < 0.0;
    279        let flip_y = transform.scale.y < 0.0;
    280        Some((src_rect.try_cast()?, dest_rect.try_cast()?, flip_x, flip_y))
    281    }
    282 }
    283 
    284 pub struct SwSurface {
    285    tile_size: DeviceIntSize,
    286    is_opaque: bool,
    287    tiles: Vec<SwTile>,
    288    /// An attached external image for this surface.
    289    external_image: Option<ExternalImageId>,
    290    // The rounded clip that applies to this surface. All corners are zero if not used.
    291    rounded_clip: RoundedClip,
    292 }
    293 
    294 impl SwSurface {
    295    fn new(tile_size: DeviceIntSize, is_opaque: bool) -> Self {
    296        SwSurface {
    297            tile_size,
    298            is_opaque,
    299            tiles: Vec::new(),
    300            external_image: None,
    301            rounded_clip: RoundedClip::zero(),
    302        }
    303    }
    304 
    305    /// Conserative approximation of local bounds of the surface by combining
    306    /// the local bounds of all enclosed tiles.
    307    fn local_bounds(&self) -> DeviceIntRect {
    308        let mut bounds = DeviceIntRect::zero();
    309        for tile in &self.tiles {
    310            bounds = bounds.union(&tile.local_bounds(self));
    311        }
    312        bounds
    313    }
    314 
    315    /// The transformed and clipped conservative device-space bounds of the
    316    /// surface.
    317    fn device_bounds(
    318        &self,
    319        transform: &CompositorSurfaceTransform,
    320        clip_rect: &DeviceIntRect,
    321    ) -> Option<DeviceIntRect> {
    322        let bounds = self.local_bounds();
    323        let device_rect = transform.map_rect(&bounds.to_f32()).round_out();
    324        Some(device_rect.intersection(&clip_rect.to_f32())?.to_i32())
    325    }
    326 
    327    /// Check that there are no missing tiles in the interior, or rather, that
    328    /// the grid of tiles is solidly rectangular.
    329    fn has_all_tiles(&self) -> bool {
    330        if self.tiles.is_empty() {
    331            return false;
    332        }
    333        // Find the min and max tile ids to identify the tile id bounds.
    334        let mut min_x = i32::MAX;
    335        let mut min_y = i32::MAX;
    336        let mut max_x = i32::MIN;
    337        let mut max_y = i32::MIN;
    338        for tile in &self.tiles {
    339            min_x = min_x.min(tile.x);
    340            min_y = min_y.min(tile.y);
    341            max_x = max_x.max(tile.x);
    342            max_y = max_y.max(tile.y);
    343        }
    344        // If all tiles are present within the bounds, then the number of tiles
    345        // should equal the area of the bounds.
    346        (max_x + 1 - min_x) as usize * (max_y + 1 - min_y) as usize == self.tiles.len()
    347    }
    348 }
    349 
    350 fn image_rendering_to_gl_filter(filter: ImageRendering) -> gl::GLenum {
    351    match filter {
    352        ImageRendering::Pixelated => gl::NEAREST,
    353        ImageRendering::Auto | ImageRendering::CrispEdges => gl::LINEAR,
    354    }
    355 }
    356 
    357 /// A source for a composite job which can either be a single BGRA locked SWGL
    358 /// resource or a collection of SWGL resources representing a YUV surface.
    359 #[derive(Clone)]
    360 enum SwCompositeSource {
    361    BGRA(swgl::LockedResource),
    362    YUV(
    363        swgl::LockedResource,
    364        swgl::LockedResource,
    365        swgl::LockedResource,
    366        YuvRangedColorSpace,
    367        ColorDepth,
    368    ),
    369 }
    370 
    371 /// Mark ExternalImage's renderer field as safe to send to SwComposite thread.
    372 unsafe impl Send for SwCompositeSource {}
    373 
    374 /// A tile composition job to be processed by the SwComposite thread.
    375 /// Stores relevant details about the tile and where to composite it.
    376 #[derive(Clone)]
    377 struct SwCompositeJob {
    378    /// Locked texture that will be unlocked immediately following the job
    379    locked_src: SwCompositeSource,
    380    /// Locked framebuffer that may be shared among many jobs
    381    locked_dst: swgl::LockedResource,
    382    src_rect: DeviceIntRect,
    383    dst_rect: DeviceIntRect,
    384    clipped_dst: DeviceIntRect,
    385    opaque: bool,
    386    flip_x: bool,
    387    flip_y: bool,
    388    filter: ImageRendering,
    389    /// The total number of bands for this job
    390    num_bands: u8,
    391    // The rounded clip that applies to this surface. All corners are zero if not used.
    392    rounded_clip: RoundedClip,
    393    context: Arc<SwCompositeContext>,
    394 }
    395 
    396 impl SwCompositeJob {
    397    // Construct a mask for this job's rounded clip, that is stored in the
    398    // shared mask texture of the supplied composite context.
    399    fn create_mask(
    400        &self,
    401        band_clip: &DeviceIntRect,
    402        ctx: &SwCompositeJobContext,
    403    ) {
    404        assert!(band_clip.width() <= INDIRECT_BUFFER_WIDTH);
    405        assert!(band_clip.height() <= INDIRECT_BUFFER_HEIGHT);
    406 
    407        // Write mask
    408        let (mask_pixels, mask_width, mask_height, _) = ctx.mask.get_buffer();
    409        let mask_pixels = unsafe {
    410            std::slice::from_raw_parts_mut(
    411                mask_pixels as *mut u8,
    412                mask_width as usize * mask_height as usize,
    413            )
    414        };
    415 
    416        // Rounded rect SDF function taken from the existing WR mask shaders.
    417        // No doubt this could be done more efficiently, however it typically
    418        // is run on only a very small number of pixels, so it's unlikely to
    419        // show up in profiles.
    420 
    421        fn sd_round_box(
    422            pos: DevicePoint,
    423            half_box_size: DeviceSize,
    424            radii: &ClipRadius,
    425        ) -> f32 {
    426            let radius = if pos.x < 0.0 {
    427                if pos.y < 0.0 { radii.bottom_right } else { radii.top_right }
    428            } else {
    429                if pos.y < 0.0 { radii.bottom_left } else { radii.top_left }
    430            } as f32;
    431 
    432            let qx = pos.x.abs() - half_box_size.width + radius;
    433            let qy = pos.y.abs() - half_box_size.height + radius;
    434 
    435            let qxp = qx.max(0.0);
    436            let qyp = qy.max(0.0);
    437 
    438            let d1 = qx.max(qy).min(0.0);
    439            let d2 = ((qxp*qxp) + (qyp*qyp)).sqrt();
    440 
    441            d1 + d2 - radius
    442        }
    443 
    444        let half_clip_box_size = self.rounded_clip.rect.size().to_f32() * 0.5;
    445 
    446        for y in 0 .. mask_height {
    447            let py = band_clip.min.y + y;
    448 
    449            for x in 0 .. mask_width {
    450                let px = band_clip.min.x + x;
    451 
    452                let pos = DevicePoint::new(
    453                    -0.5 + self.rounded_clip.rect.min.x as f32 + half_clip_box_size.width - px as f32,
    454                    -0.5 + self.rounded_clip.rect.min.y as f32 + half_clip_box_size.height - py as f32,
    455                );
    456 
    457                let i = (y * mask_width + x) as usize;
    458                let d = sd_round_box(
    459                    pos,
    460                    half_clip_box_size,
    461                    &self.rounded_clip.radii,
    462                );
    463 
    464                let d = (0.5 - d).min(1.0).max(0.0);
    465                mask_pixels[i] = (d * 255.0) as u8;
    466            }
    467        }
    468    }
    469 
    470    // Composite `band_clip` region for the given source (RGBA or YUV), optionally
    471    // using an indirection buffer and applying the current alpha mask.
    472    fn composite_rect(
    473        &self,
    474        band_clip: &DeviceIntRect,
    475        use_indirect: bool,
    476        ctx: &SwCompositeJobContext,
    477    ) {
    478        match self.locked_src {
    479            SwCompositeSource::BGRA(ref resource) => {
    480                if use_indirect {
    481                    // Copy tile into temporary buffer
    482                    ctx.indirect.composite(
    483                        resource,
    484 
    485                        self.src_rect.min.x,
    486                        self.src_rect.min.y,
    487                        self.src_rect.width(),
    488                        self.src_rect.height(),
    489 
    490                        -band_clip.min.x + self.dst_rect.min.x,
    491                        -band_clip.min.y + self.dst_rect.min.y,
    492                        self.dst_rect.width(),
    493                        self.dst_rect.height(),
    494 
    495                        true,
    496                        self.flip_x,
    497                        self.flip_y,
    498                        image_rendering_to_gl_filter(self.filter),
    499 
    500                        0,
    501                        0,
    502                        band_clip.width(),
    503                        band_clip.height(),
    504                    );
    505 
    506                    // Apply the mask
    507                    ctx.indirect.apply_mask(&ctx.mask);
    508 
    509                    // Composite indirect buffer to frame buffer
    510                    self.locked_dst.composite(
    511                        &ctx.indirect,
    512 
    513                        0,
    514                        0,
    515                        band_clip.width(),
    516                        band_clip.height(),
    517 
    518                        band_clip.min.x,
    519                        band_clip.min.y,
    520                        band_clip.width(),
    521                        band_clip.height(),
    522 
    523                        false,
    524                        false,
    525                        false,
    526                        gl::NEAREST,
    527 
    528                        band_clip.min.x,
    529                        band_clip.min.y,
    530                        band_clip.width(),
    531                        band_clip.height(),
    532                    );
    533                } else {
    534                    self.locked_dst.composite(
    535                        resource,
    536                        self.src_rect.min.x,
    537                        self.src_rect.min.y,
    538                        self.src_rect.width(),
    539                        self.src_rect.height(),
    540                        self.dst_rect.min.x,
    541                        self.dst_rect.min.y,
    542                        self.dst_rect.width(),
    543                        self.dst_rect.height(),
    544                        self.opaque,
    545                        self.flip_x,
    546                        self.flip_y,
    547                        image_rendering_to_gl_filter(self.filter),
    548                        band_clip.min.x,
    549                        band_clip.min.y,
    550                        band_clip.width(),
    551                        band_clip.height(),
    552                    );
    553                }
    554            }
    555            SwCompositeSource::YUV(ref y, ref u, ref v, color_space, color_depth) => {
    556                let swgl_color_space = match color_space {
    557                    YuvRangedColorSpace::Rec601Narrow => swgl::YuvRangedColorSpace::Rec601Narrow,
    558                    YuvRangedColorSpace::Rec601Full => swgl::YuvRangedColorSpace::Rec601Full,
    559                    YuvRangedColorSpace::Rec709Narrow => swgl::YuvRangedColorSpace::Rec709Narrow,
    560                    YuvRangedColorSpace::Rec709Full => swgl::YuvRangedColorSpace::Rec709Full,
    561                    YuvRangedColorSpace::Rec2020Narrow => swgl::YuvRangedColorSpace::Rec2020Narrow,
    562                    YuvRangedColorSpace::Rec2020Full => swgl::YuvRangedColorSpace::Rec2020Full,
    563                    YuvRangedColorSpace::GbrIdentity => swgl::YuvRangedColorSpace::GbrIdentity,
    564                };
    565                if use_indirect {
    566                    // Copy tile into temporary buffer
    567                    ctx.indirect.composite_yuv(
    568                        y,
    569                        u,
    570                        v,
    571                        swgl_color_space,
    572                        color_depth.bit_depth(),
    573 
    574                        self.src_rect.min.x,
    575                        self.src_rect.min.y,
    576                        self.src_rect.width(),
    577                        self.src_rect.height(),
    578 
    579                        -band_clip.min.x + self.dst_rect.min.x,
    580                        -band_clip.min.y + self.dst_rect.min.y,
    581                        self.dst_rect.width(),
    582                        self.dst_rect.height(),
    583 
    584                        self.flip_x,
    585                        self.flip_y,
    586 
    587                        0,
    588                        0,
    589                        band_clip.width(),
    590                        band_clip.height(),
    591                    );
    592 
    593                    // Apply the mask
    594                    ctx.indirect.apply_mask(&ctx.mask);
    595 
    596                    // Composite indirect buffer to frame buffer
    597                    self.locked_dst.composite(
    598                        &ctx.indirect,
    599 
    600                        0,
    601                        0,
    602                        band_clip.width(),
    603                        band_clip.height(),
    604 
    605                        band_clip.min.x,
    606                        band_clip.min.y,
    607                        band_clip.width(),
    608                        band_clip.height(),
    609 
    610                        false,
    611                        false,
    612                        false,
    613                        gl::NEAREST,
    614 
    615                        band_clip.min.x,
    616                        band_clip.min.y,
    617                        band_clip.width(),
    618                        band_clip.height(),
    619                    );
    620                } else {
    621                    self.locked_dst.composite_yuv(
    622                        y,
    623                        u,
    624                        v,
    625                        swgl_color_space,
    626                        color_depth.bit_depth(),
    627                        self.src_rect.min.x,
    628                        self.src_rect.min.y,
    629                        self.src_rect.width(),
    630                        self.src_rect.height(),
    631                        self.dst_rect.min.x,
    632                        self.dst_rect.min.y,
    633                        self.dst_rect.width(),
    634                        self.dst_rect.height(),
    635                        self.flip_x,
    636                        self.flip_y,
    637                        band_clip.min.x,
    638                        band_clip.min.y,
    639                        band_clip.width(),
    640                        band_clip.height(),
    641                    );
    642                }
    643            }
    644        }
    645    }
    646 
    647    /// Process a composite job
    648    fn process(
    649        &self,
    650        band_index: i32,
    651        is_composite_thread: bool,
    652    ) {
    653        // Retrive the correct context buffers depending on which thread we're on
    654        let ctx = self.context.get_job_context(is_composite_thread);
    655 
    656        // Bands are allocated in reverse order, but we want to process them in increasing order.
    657        let num_bands = self.num_bands as i32;
    658        let band_index = num_bands - 1 - band_index;
    659        // Calculate the Y extents for the job's band, starting at the current index and spanning to
    660        // the following index.
    661        let band_offset = (self.clipped_dst.height() * band_index) / num_bands;
    662        let band_height = (self.clipped_dst.height() * (band_index + 1)) / num_bands - band_offset;
    663        // Create a rect that is the intersection of the band with the clipped dest
    664        let band_clip = DeviceIntRect::from_origin_and_size(
    665            DeviceIntPoint::new(self.clipped_dst.min.x, self.clipped_dst.min.y + band_offset),
    666            DeviceIntSize::new(self.clipped_dst.width(), band_height),
    667        );
    668 
    669        // If this band region is affected by a rounded rect clip, apply an alpha mask during compositing
    670 
    671        if self.rounded_clip.affects_rect(&band_clip) {
    672            // The job context allocates a small fixed size buffer for indirections, so split this band
    673            // in to a number of tiles that can be individually processed.
    674 
    675            let num_x_tiles = (self.clipped_dst.width() + INDIRECT_BUFFER_WIDTH-1) / INDIRECT_BUFFER_WIDTH;
    676 
    677            for x in 0 .. num_x_tiles {
    678                let x_offset = (self.clipped_dst.width() * x) / num_x_tiles;
    679                let tile_width = (self.clipped_dst.width() * (x + 1)) / num_x_tiles - x_offset;
    680 
    681                let tile_rect = DeviceIntRect::from_origin_and_size(
    682                    DeviceIntPoint::new(
    683                        self.clipped_dst.min.x + x_offset,
    684                        self.clipped_dst.min.y + band_offset,
    685                    ),
    686                    DeviceIntSize::new(
    687                        tile_width,
    688                        band_height,
    689                    ),
    690                );
    691 
    692                // Check if each individual tile within the band is affected by the clip, and
    693                // skip indirect buffer (and mask creation) where possible.
    694 
    695                let use_indirect = self.rounded_clip.affects_rect(&tile_rect);
    696 
    697                if use_indirect {
    698                    self.create_mask(&tile_rect, ctx);
    699                }
    700 
    701                self.composite_rect(
    702                    &tile_rect,
    703                    use_indirect,
    704                    ctx,
    705                );
    706            }
    707        } else {
    708            // Simple (direct) composite path if no rounded clip
    709            self.composite_rect(
    710                &band_clip,
    711                false,
    712                ctx,
    713            );
    714        }
    715    }
    716 }
    717 
    718 /// A reference to a SwCompositeGraph node that can be passed from the render
    719 /// thread to the SwComposite thread. Consistency of mutation is ensured in
    720 /// SwCompositeGraphNode via use of Atomic operations that prevent more than
    721 /// one thread from mutating SwCompositeGraphNode at once. This avoids using
    722 /// messy and not-thread-safe RefCells or expensive Mutexes inside the graph
    723 /// node and at least signals to the compiler that potentially unsafe coercions
    724 /// are occurring.
    725 #[derive(Clone)]
    726 struct SwCompositeGraphNodeRef(Arc<UnsafeCell<SwCompositeGraphNode>>);
    727 
    728 impl SwCompositeGraphNodeRef {
    729    fn new(graph_node: SwCompositeGraphNode) -> Self {
    730        SwCompositeGraphNodeRef(Arc::new(UnsafeCell::new(graph_node)))
    731    }
    732 
    733    fn get(&self) -> &SwCompositeGraphNode {
    734        unsafe { &*self.0.get() }
    735    }
    736 
    737    fn get_mut(&self) -> &mut SwCompositeGraphNode {
    738        unsafe { &mut *self.0.get() }
    739    }
    740 
    741    fn get_ptr_mut(&self) -> *mut SwCompositeGraphNode {
    742        self.0.get()
    743    }
    744 }
    745 
    746 unsafe impl Send for SwCompositeGraphNodeRef {}
    747 
    748 impl Deref for SwCompositeGraphNodeRef {
    749    type Target = SwCompositeGraphNode;
    750 
    751    fn deref(&self) -> &Self::Target {
    752        self.get()
    753    }
    754 }
    755 
    756 impl DerefMut for SwCompositeGraphNodeRef {
    757    fn deref_mut(&mut self) -> &mut Self::Target {
    758        self.get_mut()
    759    }
    760 }
    761 
    762 /// Dependency graph of composite jobs to be completed. Keeps a list of child jobs that are dependent on the completion of this job.
    763 /// Also keeps track of the number of parent jobs that this job is dependent upon before it can be processed. Once there are no more
    764 /// in-flight parent jobs that it depends on, the graph node is finally added to the job queue for processing.
    765 struct SwCompositeGraphNode {
    766    /// Job to be queued for this graph node once ready.
    767    job: Option<SwCompositeJob>,
    768    /// Whether there is a job that requires processing.
    769    has_job: AtomicBool,
    770    /// The number of remaining bands associated with this job. When this is
    771    /// non-zero and the node has no more parents left, then the node is being
    772    /// actively used by the composite thread to process jobs. Once it hits
    773    /// zero, the owning thread (which brought it to zero) can safely retire
    774    /// the node as no other thread is using it.
    775    remaining_bands: AtomicU8,
    776    /// The number of bands that are available for processing.
    777    available_bands: AtomicI8,
    778    /// Count of parents this graph node depends on. While this is non-zero the
    779    /// node must ensure that it is only being actively mutated by the render
    780    /// thread and otherwise never being accessed by the render thread.
    781    parents: AtomicU32,
    782    /// Graph nodes of child jobs that are dependent on this job
    783    children: Vec<SwCompositeGraphNodeRef>,
    784 }
    785 
    786 unsafe impl Sync for SwCompositeGraphNode {}
    787 
    788 impl SwCompositeGraphNode {
    789    fn new() -> SwCompositeGraphNodeRef {
    790        SwCompositeGraphNodeRef::new(SwCompositeGraphNode {
    791            job: None,
    792            has_job: AtomicBool::new(false),
    793            remaining_bands: AtomicU8::new(0),
    794            available_bands: AtomicI8::new(0),
    795            parents: AtomicU32::new(0),
    796            children: Vec::new(),
    797        })
    798    }
    799 
    800    /// Reset the node's state for a new frame
    801    fn reset(&mut self) {
    802        self.job = None;
    803        self.has_job.store(false, Ordering::SeqCst);
    804        self.remaining_bands.store(0, Ordering::SeqCst);
    805        self.available_bands.store(0, Ordering::SeqCst);
    806        // Initialize parents to 1 as sentinel dependency for uninitialized job
    807        // to avoid queuing unitialized job as unblocked child dependency.
    808        self.parents.store(1, Ordering::SeqCst);
    809        self.children.clear();
    810    }
    811 
    812    /// Add a dependent child node to dependency list. Update its parent count.
    813    fn add_child(&mut self, child: SwCompositeGraphNodeRef) {
    814        child.parents.fetch_add(1, Ordering::SeqCst);
    815        self.children.push(child);
    816    }
    817 
    818    /// Install a job for this node. Return whether or not the job has any unprocessed parents
    819    /// that would block immediate composition.
    820    fn set_job(&mut self, job: SwCompositeJob, num_bands: u8) -> bool {
    821        self.job = Some(job);
    822        self.has_job.store(true, Ordering::SeqCst);
    823        self.remaining_bands.store(num_bands, Ordering::SeqCst);
    824        self.available_bands.store(num_bands as _, Ordering::SeqCst);
    825        // Subtract off the sentinel parent dependency now that job is initialized and check
    826        // whether there are any remaining parent dependencies to see if this job is ready.
    827        self.parents.fetch_sub(1, Ordering::SeqCst) <= 1
    828    }
    829 
    830    /// Take an available band if possible. Also return whether there are no more bands left
    831    /// so the caller may properly clean up after.
    832    fn take_band(&self) -> (Option<i32>, bool) {
    833        let available = self.available_bands.fetch_sub(1, Ordering::SeqCst);
    834        if available > 0 {
    835            (Some(available as i32 - 1), available == 1)
    836        } else {
    837            (None, true)
    838        }
    839    }
    840 
    841    /// Try to take the job from this node for processing and then process it within the current band.
    842    fn process_job(
    843        &self,
    844        band_index: i32,
    845        is_composite_thread: bool,
    846    ) {
    847        if let Some(ref job) = self.job {
    848            job.process(band_index, is_composite_thread);
    849        }
    850    }
    851 
    852    /// After processing a band, check all child dependencies and remove this parent from
    853    /// their dependency counts. If applicable, queue the new child bands for composition.
    854    fn unblock_children(&mut self, thread: &SwCompositeThread) {
    855        if self.remaining_bands.fetch_sub(1, Ordering::SeqCst) > 1 {
    856            return;
    857        }
    858        // Clear the job to release any locked resources.
    859        self.job = None;
    860        // Signal that resources have been released.
    861        self.has_job.store(false, Ordering::SeqCst);
    862        let mut lock = None;
    863        for child in self.children.drain(..) {
    864            // Remove the child's parent dependency on this node. If there are no more
    865            // parent dependencies left, send the child job bands for composition.
    866            if child.parents.fetch_sub(1, Ordering::SeqCst) <= 1 {
    867                if lock.is_none() {
    868                    lock = Some(thread.lock());
    869                }
    870                thread.send_job(lock.as_mut().unwrap(), child);
    871            }
    872        }
    873    }
    874 }
    875 
    876 /// The SwComposite thread processes a queue of composite jobs, also signaling
    877 /// via a condition when all available jobs have been processed, as tracked by
    878 /// the job count.
    879 struct SwCompositeThread {
    880    /// Queue of available composite jobs
    881    jobs: Mutex<SwCompositeJobQueue>,
    882    /// Cache of the current job being processed. This maintains a pointer to
    883    /// the contents of the SwCompositeGraphNodeRef, which is safe due to the
    884    /// fact that SwCompositor maintains a strong reference to the contents
    885    /// in an SwTile to keep it alive while this is in use.
    886    current_job: AtomicPtr<SwCompositeGraphNode>,
    887    /// Condition signaled when either there are jobs available to process or
    888    /// there are no more jobs left to process. Otherwise stated, this signals
    889    /// when the job queue transitions from an empty to non-empty state or from
    890    /// a non-empty to empty state.
    891    jobs_available: Condvar,
    892    /// Whether all available jobs have been processed.
    893    jobs_completed: AtomicBool,
    894    /// Whether the main thread is waiting for for job completeion.
    895    waiting_for_jobs: AtomicBool,
    896    /// Whether the SwCompositor is shutting down
    897    shutting_down: AtomicBool,
    898 }
    899 
    900 /// The SwCompositeThread struct is shared between the SwComposite thread
    901 /// and the rendering thread so that both ends can access the job queue.
    902 unsafe impl Sync for SwCompositeThread {}
    903 
    904 /// A FIFO queue of composite jobs to be processed.
    905 type SwCompositeJobQueue = VecDeque<SwCompositeGraphNodeRef>;
    906 
    907 /// Locked access to the composite job queue.
    908 type SwCompositeThreadLock<'a> = MutexGuard<'a, SwCompositeJobQueue>;
    909 
    910 impl SwCompositeThread {
    911    /// Create the SwComposite thread. Requires a SWGL context in which
    912    /// to do the composition.
    913    fn new() -> Arc<SwCompositeThread> {
    914        let info = Arc::new(SwCompositeThread {
    915            jobs: Mutex::new(SwCompositeJobQueue::new()),
    916            current_job: AtomicPtr::new(ptr::null_mut()),
    917            jobs_available: Condvar::new(),
    918            jobs_completed: AtomicBool::new(true),
    919            waiting_for_jobs: AtomicBool::new(false),
    920            shutting_down: AtomicBool::new(false),
    921        });
    922        let result = info.clone();
    923        let thread_name = "SwComposite";
    924        thread::Builder::new()
    925            .name(thread_name.into())
    926            // The composite thread only calls into SWGL to composite, and we
    927            // have potentially many composite threads for different windows,
    928            // so using the default stack size is excessive. A reasonably small
    929            // stack size should be more than enough for SWGL and reduce memory
    930            // overhead.
    931            // Bug 1731569 - Need at least 36K to avoid problems with ASAN.
    932            .stack_size(40 * 1024)
    933            .spawn(move || {
    934                profiler::register_thread(thread_name);
    935                // Process any available jobs. This will return a non-Ok
    936                // result when the job queue is dropped, causing the thread
    937                // to eventually exit.
    938                while let Some((job, band)) = info.take_job(true) {
    939                    info.process_job(job, band, true);
    940                }
    941                profiler::unregister_thread();
    942            })
    943            .expect("Failed creating SwComposite thread");
    944        result
    945    }
    946 
    947    fn deinit(&self) {
    948        // Signal that the thread needs to exit.
    949        self.shutting_down.store(true, Ordering::SeqCst);
    950        // Wake up the thread in case it is blocked waiting for new jobs
    951        self.jobs_available.notify_all();
    952    }
    953 
    954    /// Process a job contained in a dependency graph node received from the job queue.
    955    /// Any child dependencies will be unblocked as appropriate after processing. The
    956    /// job count will be updated to reflect this.
    957    fn process_job(
    958        &self,
    959        graph_node: &mut SwCompositeGraphNode,
    960        band: i32,
    961        is_composite_thread: bool,
    962    ) {
    963        // Do the actual processing of the job contained in this node.
    964        graph_node.process_job(band, is_composite_thread);
    965        // Unblock any child dependencies now that this job has been processed.
    966        graph_node.unblock_children(self);
    967    }
    968 
    969    /// Queue a tile for composition by adding to the queue and increasing the job count.
    970    fn queue_composite(
    971        &self,
    972        locked_src: SwCompositeSource,
    973        locked_dst: swgl::LockedResource,
    974        src_rect: DeviceIntRect,
    975        dst_rect: DeviceIntRect,
    976        clipped_dst: DeviceIntRect,
    977        rounded_clip: RoundedClip,
    978        opaque: bool,
    979        flip_x: bool,
    980        flip_y: bool,
    981        filter: ImageRendering,
    982        num_bands: u8,
    983        mut graph_node: SwCompositeGraphNodeRef,
    984        job_queue: &mut SwCompositeJobQueue,
    985        context: Arc<SwCompositeContext>,
    986    ) {
    987        let job = SwCompositeJob {
    988            locked_src,
    989            locked_dst,
    990            src_rect,
    991            dst_rect,
    992            clipped_dst,
    993            opaque,
    994            flip_x,
    995            flip_y,
    996            filter,
    997            num_bands,
    998            rounded_clip,
    999            context,
   1000        };
   1001        if graph_node.set_job(job, num_bands) {
   1002            self.send_job(job_queue, graph_node);
   1003        }
   1004    }
   1005 
   1006    fn prepare_for_composites(&self) {
   1007        // Initially, the job queue is empty. Trivially, this means we consider all
   1008        // jobs queued so far as completed.
   1009        self.jobs_completed.store(true, Ordering::SeqCst);
   1010    }
   1011 
   1012    /// Lock the thread for access to the job queue.
   1013    fn lock(&self) -> SwCompositeThreadLock {
   1014        self.jobs.lock().unwrap()
   1015    }
   1016 
   1017    /// Send a job to the composite thread by adding it to the job queue.
   1018    /// Signal that this job has been added in case the queue was empty and the
   1019    /// SwComposite thread is waiting for jobs.
   1020    fn send_job(&self, queue: &mut SwCompositeJobQueue, job: SwCompositeGraphNodeRef) {
   1021        if queue.is_empty() {
   1022            self.jobs_completed.store(false, Ordering::SeqCst);
   1023            self.jobs_available.notify_all();
   1024        }
   1025        queue.push_back(job);
   1026    }
   1027 
   1028    /// Try to get a band of work from the currently cached job when available.
   1029    /// If there is a job, but it has no available bands left, null out the job
   1030    /// so that other threads do not bother checking the job.
   1031    fn try_take_job(&self) -> Option<(&mut SwCompositeGraphNode, i32)> {
   1032        let current_job_ptr = self.current_job.load(Ordering::SeqCst);
   1033        if let Some(current_job) = unsafe { current_job_ptr.as_mut() } {
   1034            let (band, done) = current_job.take_band();
   1035            if done {
   1036                let _ = self.current_job.compare_exchange(
   1037                    current_job_ptr,
   1038                    ptr::null_mut(),
   1039                    Ordering::SeqCst,
   1040                    Ordering::SeqCst,
   1041                );
   1042            }
   1043            if let Some(band) = band {
   1044                return Some((current_job, band));
   1045            }
   1046        }
   1047        return None;
   1048    }
   1049 
   1050    /// Take a job from the queue. Optionally block waiting for jobs to become
   1051    /// available if this is called from the SwComposite thread.
   1052    fn take_job(&self, wait: bool) -> Option<(&mut SwCompositeGraphNode, i32)> {
   1053        // First try checking the cached job outside the scope of the mutex.
   1054        // For jobs that have multiple bands, this allows us to avoid having
   1055        // to lock the mutex multiple times to check the job for each band.
   1056        if let Some((job, band)) = self.try_take_job() {
   1057            return Some((job, band));
   1058        }
   1059        // Lock the job queue while checking for available jobs. The lock
   1060        // won't be held while the job is processed later outside of this
   1061        // function so that other threads can pull from the queue meanwhile.
   1062        let mut jobs = self.lock();
   1063        loop {
   1064            // While inside the mutex, check the cached job again to see if it
   1065            // has been updated.
   1066            if let Some((job, band)) = self.try_take_job() {
   1067                return Some((job, band));
   1068            }
   1069            // If no cached job was available, try to take a job from the queue
   1070            // and install it as the current job.
   1071            if let Some(job) = jobs.pop_front() {
   1072                self.current_job.store(job.get_ptr_mut(), Ordering::SeqCst);
   1073                continue;
   1074            }
   1075            // Otherwise, the job queue is currently empty. Depending on the
   1076            // job status, we may either wait for jobs to become available or exit.
   1077            if wait {
   1078                // For the SwComposite thread, if we arrive here, the job queue
   1079                // is empty. Signal that all available jobs have been completed.
   1080                self.jobs_completed.store(true, Ordering::SeqCst);
   1081                if self.waiting_for_jobs.load(Ordering::SeqCst) {
   1082                    // Wake the main thread if it is waiting for a change in job status.
   1083                    self.jobs_available.notify_all();
   1084                } else if self.shutting_down.load(Ordering::SeqCst) {
   1085                    // If SwComposite thread needs to shut down, then exit and stop
   1086                    // waiting for jobs.
   1087                    return None;
   1088                }
   1089            } else {
   1090                // If all available jobs have been completed by the SwComposite
   1091                // thread, then the main thread no longer needs to wait for any
   1092                // new jobs to appear in the queue and should exit.
   1093                if self.jobs_completed.load(Ordering::SeqCst) {
   1094                    return None;
   1095                }
   1096                // Otherwise, signal that the main thread is waiting for jobs.
   1097                self.waiting_for_jobs.store(true, Ordering::SeqCst);
   1098            }
   1099            // Wait until jobs are added before checking the job queue again.
   1100            jobs = self.jobs_available.wait(jobs).unwrap();
   1101            if !wait {
   1102                // The main thread is done waiting for jobs.
   1103                self.waiting_for_jobs.store(false, Ordering::SeqCst);
   1104            }
   1105        }
   1106    }
   1107 
   1108    /// Wait for all queued composition jobs to be processed.
   1109    /// Instead of blocking on the SwComposite thread to complete all jobs,
   1110    /// this may steal some jobs and attempt to process them while waiting.
   1111    /// This may optionally process jobs synchronously. When normally doing
   1112    /// asynchronous processing, the graph dependencies are relied upon to
   1113    /// properly order the jobs, which makes it safe for the render thread
   1114    /// to steal jobs from the composite thread without violating those
   1115    /// dependencies. Synchronous processing just disables this job stealing
   1116    /// so that the composite thread always handles the jobs in the order
   1117    /// they were queued without having to rely upon possibly unavailable
   1118    /// graph dependencies.
   1119    fn wait_for_composites(&self, sync: bool) {
   1120        // If processing asynchronously, try to steal jobs from the composite
   1121        // thread if it is busy.
   1122        if !sync {
   1123            while let Some((job, band)) = self.take_job(false) {
   1124                self.process_job(job, band, false);
   1125            }
   1126            // Once there are no more jobs, just fall through to waiting
   1127            // synchronously for the composite thread to finish processing.
   1128        }
   1129        // If processing synchronously, just wait for the composite thread
   1130        // to complete processing any in-flight jobs, then bail.
   1131        let mut jobs = self.lock();
   1132        // Signal that the main thread may wait for job completion so that the
   1133        // SwComposite thread can wake it up if necessary.
   1134        self.waiting_for_jobs.store(true, Ordering::SeqCst);
   1135        // Wait for job completion to ensure there are no more in-flight jobs.
   1136        while !self.jobs_completed.load(Ordering::SeqCst) {
   1137            jobs = self.jobs_available.wait(jobs).unwrap();
   1138        }
   1139        // Done waiting for job completion.
   1140        self.waiting_for_jobs.store(false, Ordering::SeqCst);
   1141    }
   1142 }
   1143 
   1144 /// Parameters describing how to composite a surface within a frame
   1145 type FrameSurface = (
   1146    NativeSurfaceId,
   1147    CompositorSurfaceTransform,
   1148    DeviceIntRect,
   1149    ImageRendering,
   1150 );
   1151 
   1152 /// Adapter for RenderCompositors to work with SWGL that shuttles between
   1153 /// WebRender and the RenderCompositr via the Compositor API.
   1154 pub struct SwCompositor {
   1155    gl: swgl::Context,
   1156    compositor: Box<dyn MappableCompositor>,
   1157    use_native_compositor: bool,
   1158    surfaces: HashMap<NativeSurfaceId, SwSurface>,
   1159    frame_surfaces: Vec<FrameSurface>,
   1160    /// Any surface added after we're already compositing (i.e. debug overlay)
   1161    /// needs to be processed after those frame surfaces. For simplicity we
   1162    /// store them in a separate queue that gets processed later.
   1163    late_surfaces: Vec<FrameSurface>,
   1164    /// Any composite surfaces that were locked during the frame and need to be
   1165    /// unlocked. frame_surfaces and late_surfaces may be pruned, so we can't
   1166    /// rely on them to contain all surfaces that were actually locked and must
   1167    /// track those separately.
   1168    composite_surfaces: HashMap<ExternalImageId, SWGLCompositeSurfaceInfo>,
   1169    cur_tile: NativeTileId,
   1170    /// The maximum tile size required for any of the allocated surfaces.
   1171    max_tile_size: DeviceIntSize,
   1172    /// Reuse the same depth texture amongst all tiles in all surfaces.
   1173    /// This depth texture must be big enough to accommodate the largest used
   1174    /// tile size for any surface. The maximum requested tile size is tracked
   1175    /// to ensure that this depth texture is at least that big.
   1176    /// This is initialized when the first surface is created and freed when
   1177    /// the last surface is destroyed, to ensure compositors with no surfaces
   1178    /// are not holding on to extra memory.
   1179    depth_id: Option<u32>,
   1180    /// Instance of the SwComposite thread, only created if we are not relying
   1181    /// on a native RenderCompositor.
   1182    composite_thread: Option<Arc<SwCompositeThread>>,
   1183    /// SWGL locked resource for sharing framebuffer with SwComposite thread
   1184    locked_framebuffer: Option<swgl::LockedResource>,
   1185    /// Per-thread buffers used for rendering masks and indirection buffers
   1186    composite_context: Option<Arc<SwCompositeContext>>,
   1187    /// Whether we are currently in the middle of compositing
   1188    is_compositing: bool,
   1189 }
   1190 
   1191 impl SwCompositor {
   1192    pub fn new(
   1193        gl: swgl::Context,
   1194        compositor: Box<dyn MappableCompositor>,
   1195        use_native_compositor: bool,
   1196    ) -> Self {
   1197        // Only create the SwComposite thread if we're not using a native render
   1198        // compositor. Thus, we are compositing into the main software framebuffer,
   1199        // which benefits from compositing asynchronously while updating tiles.
   1200        let (composite_thread, composite_context) = if !use_native_compositor {
   1201            (
   1202                Some(SwCompositeThread::new()),
   1203                Some(Arc::new(SwCompositeContext::new(&gl)))
   1204            )
   1205        } else {
   1206            (
   1207                None,
   1208                None,
   1209            )
   1210        };
   1211        SwCompositor {
   1212            gl,
   1213            compositor,
   1214            use_native_compositor,
   1215            surfaces: HashMap::new(),
   1216            frame_surfaces: Vec::new(),
   1217            late_surfaces: Vec::new(),
   1218            composite_surfaces: HashMap::new(),
   1219            cur_tile: NativeTileId {
   1220                surface_id: NativeSurfaceId(0),
   1221                x: 0,
   1222                y: 0,
   1223            },
   1224            max_tile_size: DeviceIntSize::zero(),
   1225            depth_id: None,
   1226            composite_thread,
   1227            locked_framebuffer: None,
   1228            composite_context,
   1229            is_compositing: false,
   1230        }
   1231    }
   1232 
   1233    fn deinit_tile(&self, tile: &SwTile) {
   1234        self.gl.delete_framebuffers(&[tile.fbo_id]);
   1235        self.gl.delete_textures(&[tile.color_id]);
   1236    }
   1237 
   1238    fn deinit_surface(&self, surface: &SwSurface) {
   1239        for tile in &surface.tiles {
   1240            self.deinit_tile(tile);
   1241        }
   1242    }
   1243 
   1244    /// Attempt to occlude any queued surfaces with an opaque occluder rect. If
   1245    /// an existing surface is occluded, we attempt to restrict its clip rect
   1246    /// so long as it can remain a single clip rect. Existing frame surfaces
   1247    /// that are opaque will be fused if possible with the supplied occluder
   1248    /// rect to further try and restrict any underlying surfaces.
   1249    fn occlude_surfaces(&mut self) {
   1250        // Check if inner rect is fully included in outer rect
   1251        fn includes(outer: &Range<i32>, inner: &Range<i32>) -> bool {
   1252            outer.start <= inner.start && outer.end >= inner.end
   1253        }
   1254 
   1255        // Check if outer range overlaps either the start or end of a range. If
   1256        // there is overlap, return the portion of the inner range remaining
   1257        // after the overlap has been removed.
   1258        fn overlaps(outer: &Range<i32>, inner: &Range<i32>) -> Option<Range<i32>> {
   1259            if outer.start <= inner.start && outer.end >= inner.start {
   1260                Some(outer.end..inner.end.max(outer.end))
   1261            } else if outer.start <= inner.end && outer.end >= inner.end {
   1262                Some(inner.start..outer.start.max(inner.start))
   1263            } else {
   1264                None
   1265            }
   1266        }
   1267 
   1268        fn set_x_range(rect: &mut DeviceIntRect, range: &Range<i32>) {
   1269            rect.min.x = range.start;
   1270            rect.max.x = range.end;
   1271        }
   1272 
   1273        fn set_y_range(rect: &mut DeviceIntRect, range: &Range<i32>) {
   1274            rect.min.y = range.start;
   1275            rect.max.y = range.end;
   1276        }
   1277 
   1278        fn union(base: Range<i32>, extra: Range<i32>) -> Range<i32> {
   1279            base.start.min(extra.start)..base.end.max(extra.end)
   1280        }
   1281 
   1282        // Ensure an occluder surface is both opaque and has all interior tiles.
   1283        fn valid_occluder(surface: &SwSurface) -> bool {
   1284            surface.is_opaque &&
   1285            surface.has_all_tiles() &&
   1286            // TODO(gw): Skipping an entire surface as an occluder when it has
   1287            //           a rounded rect is probably too costly. May need to
   1288            //           just skip tiles or bands from being added as occluders.
   1289            !surface.rounded_clip.is_valid()
   1290        }
   1291 
   1292        // Before we can try to occlude any surfaces, we need to fix their clip rects to tightly
   1293        // bound the valid region. The clip rect might otherwise enclose an invalid area that
   1294        // can't fully occlude anything even if the surface is opaque.
   1295        for &mut (ref id, ref transform, ref mut clip_rect, _) in &mut self.frame_surfaces {
   1296            if let Some(surface) = self.surfaces.get(id) {
   1297                // Restrict the clip rect to fall within the valid region of the surface.
   1298                *clip_rect = surface.device_bounds(transform, clip_rect).unwrap_or_default();
   1299            }
   1300        }
   1301 
   1302        // For each frame surface, treat it as an occluder if it is non-empty and opaque. Look
   1303        // through the preceding surfaces to see if any can be occluded.
   1304        for occlude_index in 0..self.frame_surfaces.len() {
   1305            let (ref occlude_id, _, ref occlude_rect, _) = self.frame_surfaces[occlude_index];
   1306            match self.surfaces.get(occlude_id) {
   1307                Some(occluder) if valid_occluder(occluder) && !occlude_rect.is_empty() => {}
   1308                _ => continue,
   1309            }
   1310 
   1311            // Traverse the queued surfaces for this frame in the reverse order of
   1312            // how they are composited, or rather, in order of visibility. For each
   1313            // surface, check if the occluder can restrict the clip rect such that
   1314            // the clip rect can remain a single rect. If the clip rect overlaps
   1315            // the occluder on one axis interval while remaining fully included in
   1316            // the occluder's other axis interval, then we can chop down the edge
   1317            // of the clip rect on the overlapped axis. Further, if the surface is
   1318            // opaque and its clip rect exactly matches the occluder rect on one
   1319            // axis interval while overlapping on the other, fuse it with the
   1320            // occluder rect before considering any underlying surfaces.
   1321            let (mut occlude_x, mut occlude_y) = (occlude_rect.x_range(), occlude_rect.y_range());
   1322            for &mut (ref id, _, ref mut clip_rect, _) in self.frame_surfaces[..occlude_index].iter_mut().rev() {
   1323                if let Some(surface) = self.surfaces.get(id) {
   1324                    let (clip_x, clip_y) = (clip_rect.x_range(), clip_rect.y_range());
   1325                    if includes(&occlude_x, &clip_x) {
   1326                        if let Some(visible) = overlaps(&occlude_y, &clip_y) {
   1327                            set_y_range(clip_rect, &visible);
   1328                            if occlude_x == clip_x && valid_occluder(surface) {
   1329                                occlude_y = union(occlude_y, visible);
   1330                            }
   1331                        }
   1332                    } else if includes(&occlude_y, &clip_y) {
   1333                        if let Some(visible) = overlaps(&occlude_x, &clip_x) {
   1334                            set_x_range(clip_rect, &visible);
   1335                            if occlude_y == clip_y && valid_occluder(surface) {
   1336                                occlude_x = union(occlude_x, visible);
   1337                            }
   1338                        }
   1339                    }
   1340                }
   1341            }
   1342        }
   1343    }
   1344 
   1345    /// Reset tile dependency state for a new frame.
   1346    fn reset_overlaps(&mut self) {
   1347        for surface in self.surfaces.values_mut() {
   1348            for tile in &mut surface.tiles {
   1349                tile.overlaps.set(0);
   1350                tile.invalid.set(false);
   1351                tile.graph_node.reset();
   1352            }
   1353        }
   1354    }
   1355 
   1356    /// Computes an overlap count for a tile that falls within the given composite
   1357    /// destination rectangle. This requires checking all surfaces currently queued for
   1358    /// composition so far in this frame and seeing if they have any invalidated tiles
   1359    /// whose destination rectangles would also overlap the supplied tile. If so, then the
   1360    /// increment the overlap count to account for all such dependencies on invalid tiles.
   1361    /// Tiles with the same overlap count will still be drawn with a stable ordering in
   1362    /// the order the surfaces were queued, so it is safe to ignore other possible sources
   1363    /// of composition ordering dependencies, as the later queued tile will still be drawn
   1364    /// later than the blocking tiles within that stable order. We assume that the tile's
   1365    /// surface hasn't yet been added to the current frame list of surfaces to composite
   1366    /// so that we only process potential blockers from surfaces that would come earlier
   1367    /// in composition.
   1368    fn init_overlaps(
   1369        &self,
   1370        overlap_id: &NativeSurfaceId,
   1371        overlap_surface: &SwSurface,
   1372        overlap_tile: &SwTile,
   1373        overlap_transform: &CompositorSurfaceTransform,
   1374        overlap_clip_rect: &DeviceIntRect,
   1375    ) {
   1376        // Record an extra overlap for an invalid tile to track the tile's dependency
   1377        // on its own future update.
   1378        let mut overlaps = if overlap_tile.invalid.get() { 1 } else { 0 };
   1379 
   1380        let overlap_rect = match overlap_tile.overlap_rect(overlap_surface, overlap_transform, overlap_clip_rect) {
   1381            Some(overlap_rect) => overlap_rect,
   1382            None => {
   1383                overlap_tile.overlaps.set(overlaps);
   1384                return;
   1385            }
   1386        };
   1387 
   1388        for &(ref id, ref transform, ref clip_rect, _) in &self.frame_surfaces {
   1389            // We only want to consider surfaces that were added before the current one we're
   1390            // checking for overlaps. If we find that surface, then we're done.
   1391            if id == overlap_id {
   1392                break;
   1393            }
   1394            // If the surface's clip rect doesn't overlap the tile's rect,
   1395            // then there is no need to check any tiles within the surface.
   1396            if !overlap_rect.intersects(clip_rect) {
   1397                continue;
   1398            }
   1399            if let Some(surface) = self.surfaces.get(id) {
   1400                for tile in &surface.tiles {
   1401                    // If there is a deferred tile that might overlap the destination rectangle,
   1402                    // record the overlap.
   1403                    if tile.may_overlap(surface, transform, clip_rect, &overlap_rect) {
   1404                        if tile.overlaps.get() > 0 {
   1405                            overlaps += 1;
   1406                        }
   1407                        // Regardless of whether this tile is deferred, if it has dependency
   1408                        // overlaps, then record that it is potentially a dependency parent.
   1409                        tile.graph_node.get_mut().add_child(overlap_tile.graph_node.clone());
   1410                    }
   1411                }
   1412            }
   1413        }
   1414        if overlaps > 0 {
   1415            // Has a dependency on some invalid tiles, so need to defer composition.
   1416            overlap_tile.overlaps.set(overlaps);
   1417        }
   1418    }
   1419 
   1420    /// Helper function that queues a composite job to the current locked framebuffer
   1421    fn queue_composite(
   1422        &self,
   1423        surface: &SwSurface,
   1424        transform: &CompositorSurfaceTransform,
   1425        clip_rect: &DeviceIntRect,
   1426        filter: ImageRendering,
   1427        tile: &SwTile,
   1428        job_queue: &mut SwCompositeJobQueue,
   1429    ) {
   1430        if let Some(ref composite_thread) = self.composite_thread {
   1431            if let Some((src_rect, dst_rect, flip_x, flip_y)) = tile.composite_rects(surface, transform, clip_rect) {
   1432                let source = if let Some(ref external_image) = surface.external_image {
   1433                    // If the surface has an attached external image, lock any textures supplied in the descriptor.
   1434                    match self.composite_surfaces.get(external_image) {
   1435                        Some(ref info) => match info.yuv_planes {
   1436                            0 => match self.gl.lock_texture(info.textures[0]) {
   1437                                Some(texture) => SwCompositeSource::BGRA(texture),
   1438                                None => return,
   1439                            },
   1440                            3 => match (
   1441                                self.gl.lock_texture(info.textures[0]),
   1442                                self.gl.lock_texture(info.textures[1]),
   1443                                self.gl.lock_texture(info.textures[2]),
   1444                            ) {
   1445                                (Some(y_texture), Some(u_texture), Some(v_texture)) => SwCompositeSource::YUV(
   1446                                    y_texture,
   1447                                    u_texture,
   1448                                    v_texture,
   1449                                    info.color_space,
   1450                                    info.color_depth,
   1451                                ),
   1452                                _ => return,
   1453                            },
   1454                            _ => panic!("unsupported number of YUV planes: {}", info.yuv_planes),
   1455                        },
   1456                        None => return,
   1457                    }
   1458                } else if let Some(texture) = self.gl.lock_texture(tile.color_id) {
   1459                    // Lock the texture representing the picture cache tile.
   1460                    SwCompositeSource::BGRA(texture)
   1461                } else {
   1462                    return;
   1463                };
   1464                if let Some(ref framebuffer) = self.locked_framebuffer {
   1465                    if let Some(clipped_dst) = dst_rect.intersection(clip_rect) {
   1466                        let num_bands = if surface.rounded_clip.affects_rect(&clipped_dst) {
   1467                            // Create enough bands that we won't exceed the height of the indirection buffer.
   1468                            ((clipped_dst.height() + INDIRECT_BUFFER_HEIGHT-1) / INDIRECT_BUFFER_HEIGHT) as u8
   1469                        } else if clipped_dst.width() >= 64 && clipped_dst.height() >= 64 {
   1470                            // For jobs that would span a sufficiently large destination rectangle, split
   1471                            // it into multiple horizontal bands so that multiple threads can process them.
   1472                            (clipped_dst.height() / 64).min(4) as u8
   1473                        } else {
   1474                            1
   1475                        };
   1476 
   1477                        composite_thread.queue_composite(
   1478                            source,
   1479                            framebuffer.clone(),
   1480                            src_rect,
   1481                            dst_rect,
   1482                            clipped_dst,
   1483                            surface.rounded_clip,
   1484                            surface.is_opaque,
   1485                            flip_x,
   1486                            flip_y,
   1487                            filter,
   1488                            num_bands,
   1489                            tile.graph_node.clone(),
   1490                            job_queue,
   1491                            self.composite_context.as_ref().expect("bug").clone(),
   1492                        );
   1493                    }
   1494                }
   1495            }
   1496        }
   1497    }
   1498 
   1499    /// Lock a surface with an attached external image for compositing.
   1500    fn try_lock_composite_surface(&mut self, device: &mut Device, id: &NativeSurfaceId) {
   1501        if let Some(surface) = self.surfaces.get_mut(id) {
   1502            if let Some(external_image) = surface.external_image {
   1503                assert!(!surface.tiles.is_empty());
   1504                let tile = &mut surface.tiles[0];
   1505                if let Some(info) = self.composite_surfaces.get(&external_image) {
   1506                    tile.valid_rect = DeviceIntRect::from_size(info.size);
   1507                    return;
   1508                }
   1509                // If the surface has an attached external image, attempt to lock the external image
   1510                // for compositing. Yields a descriptor of textures and data necessary for their
   1511                // interpretation on success.
   1512                let mut info = SWGLCompositeSurfaceInfo {
   1513                    yuv_planes: 0,
   1514                    textures: [0; 3],
   1515                    color_space: YuvRangedColorSpace::GbrIdentity,
   1516                    color_depth: ColorDepth::Color8,
   1517                    size: DeviceIntSize::zero(),
   1518                };
   1519                if self.compositor.lock_composite_surface(device, self.gl.into(), external_image, &mut info) {
   1520                    tile.valid_rect = DeviceIntRect::from_size(info.size);
   1521                    self.composite_surfaces.insert(external_image, info);
   1522                } else {
   1523                    tile.valid_rect = DeviceIntRect::zero();
   1524                }
   1525            }
   1526        }
   1527    }
   1528 
   1529    /// Look for any attached external images that have been locked and then unlock them.
   1530    fn unlock_composite_surfaces(&mut self, device: &mut Device) {
   1531        for &external_image in self.composite_surfaces.keys() {
   1532            self.compositor.unlock_composite_surface(device, self.gl.into(), external_image);
   1533        }
   1534        self.composite_surfaces.clear();
   1535    }
   1536 
   1537    /// Issue composites for any tiles that are no longer blocked following a tile update.
   1538    /// We process all surfaces and tiles in the order they were queued.
   1539    fn flush_composites(&self, tile_id: &NativeTileId, surface: &SwSurface, tile: &SwTile) {
   1540        let composite_thread = match &self.composite_thread {
   1541            Some(composite_thread) => composite_thread,
   1542            None => return,
   1543        };
   1544 
   1545        // Look for the tile in the frame list and composite it if it has no dependencies.
   1546        let mut frame_surfaces = self
   1547            .frame_surfaces
   1548            .iter()
   1549            .skip_while(|&(ref id, _, _, _)| *id != tile_id.surface_id);
   1550        let (overlap_rect, mut lock) = match frame_surfaces.next() {
   1551            Some(&(_, ref transform, ref clip_rect, filter)) => {
   1552                // Remove invalid tile's update dependency.
   1553                if tile.invalid.get() {
   1554                    tile.overlaps.set(tile.overlaps.get() - 1);
   1555                }
   1556                // If the tile still has overlaps, keep deferring it till later.
   1557                if tile.overlaps.get() > 0 {
   1558                    return;
   1559                }
   1560                // Otherwise, the tile's dependencies are all resolved, so composite it.
   1561                let mut lock = composite_thread.lock();
   1562                self.queue_composite(surface, transform, clip_rect, filter, tile, &mut lock);
   1563                // Finally, get the tile's overlap rect used for tracking dependencies
   1564                match tile.overlap_rect(surface, transform, clip_rect) {
   1565                    Some(overlap_rect) => (overlap_rect, lock),
   1566                    None => return,
   1567                }
   1568            }
   1569            None => return,
   1570        };
   1571 
   1572        // Accumulate rects whose dependencies have been satisfied from this update.
   1573        // Store the union of all these bounds to quickly reject unaffected tiles.
   1574        let mut flushed_bounds = overlap_rect;
   1575        let mut flushed_rects = vec![overlap_rect];
   1576 
   1577        // Check surfaces following the update in the frame list and see if they would overlap it.
   1578        for &(ref id, ref transform, ref clip_rect, filter) in frame_surfaces {
   1579            // If the clip rect doesn't overlap the conservative bounds, we can skip the whole surface.
   1580            if !flushed_bounds.intersects(clip_rect) {
   1581                continue;
   1582            }
   1583            if let Some(surface) = self.surfaces.get(&id) {
   1584                // Search through the surface's tiles for any blocked on this update and queue jobs for them.
   1585                for tile in &surface.tiles {
   1586                    let mut overlaps = tile.overlaps.get();
   1587                    // Only check tiles that have existing unresolved dependencies
   1588                    if overlaps == 0 {
   1589                        continue;
   1590                    }
   1591                    // Get this tile's overlap rect for tracking dependencies
   1592                    let overlap_rect = match tile.overlap_rect(surface, transform, clip_rect) {
   1593                        Some(overlap_rect) => overlap_rect,
   1594                        None => continue,
   1595                    };
   1596                    // Do a quick check to see if the tile overlaps the conservative bounds.
   1597                    if !overlap_rect.intersects(&flushed_bounds) {
   1598                        continue;
   1599                    }
   1600                    // Decrement the overlap count if this tile is dependent on any flushed rects.
   1601                    for flushed_rect in &flushed_rects {
   1602                        if overlap_rect.intersects(flushed_rect) {
   1603                            overlaps -= 1;
   1604                        }
   1605                    }
   1606                    if overlaps != tile.overlaps.get() {
   1607                        // If the overlap count changed, this tile had a dependency on some flush rects.
   1608                        // If the count hit zero, it is ready to composite.
   1609                        tile.overlaps.set(overlaps);
   1610                        if overlaps == 0 {
   1611                            self.queue_composite(surface, transform, clip_rect, filter, tile, &mut lock);
   1612                            // Record that the tile got flushed to update any downwind dependencies.
   1613                            flushed_bounds = flushed_bounds.union(&overlap_rect);
   1614                            flushed_rects.push(overlap_rect);
   1615                        }
   1616                    }
   1617                }
   1618            }
   1619        }
   1620    }
   1621 }
   1622 
   1623 impl Compositor for SwCompositor {
   1624    fn create_surface(
   1625        &mut self,
   1626        device: &mut Device,
   1627        id: NativeSurfaceId,
   1628        virtual_offset: DeviceIntPoint,
   1629        tile_size: DeviceIntSize,
   1630        is_opaque: bool,
   1631    ) {
   1632        if self.use_native_compositor {
   1633            self.compositor.create_surface(device, id, virtual_offset, tile_size, is_opaque);
   1634        }
   1635        self.max_tile_size = DeviceIntSize::new(
   1636            self.max_tile_size.width.max(tile_size.width),
   1637            self.max_tile_size.height.max(tile_size.height),
   1638        );
   1639        if self.depth_id.is_none() {
   1640            self.depth_id = Some(self.gl.gen_textures(1)[0]);
   1641        }
   1642        self.surfaces.insert(id, SwSurface::new(tile_size, is_opaque));
   1643    }
   1644 
   1645    fn create_external_surface(&mut self, device: &mut Device, id: NativeSurfaceId, is_opaque: bool) {
   1646        if self.use_native_compositor {
   1647            self.compositor.create_external_surface(device, id, is_opaque);
   1648        }
   1649        self.surfaces
   1650            .insert(id, SwSurface::new(DeviceIntSize::zero(), is_opaque));
   1651    }
   1652 
   1653    fn create_backdrop_surface(&mut self, _device: &mut Device, _id: NativeSurfaceId, _color: ColorF) {
   1654        unreachable!("Not implemented.")
   1655    }
   1656 
   1657    fn destroy_surface(&mut self, device: &mut Device, id: NativeSurfaceId) {
   1658        if let Some(surface) = self.surfaces.remove(&id) {
   1659            self.deinit_surface(&surface);
   1660        }
   1661        if self.use_native_compositor {
   1662            self.compositor.destroy_surface(device, id);
   1663        }
   1664        if self.surfaces.is_empty() {
   1665            if let Some(depth_id) = self.depth_id.take() {
   1666                self.gl.delete_textures(&[depth_id]);
   1667            }
   1668        }
   1669    }
   1670 
   1671    fn deinit(&mut self, device: &mut Device) {
   1672        if let Some(ref composite_thread) = self.composite_thread {
   1673            composite_thread.deinit();
   1674        }
   1675 
   1676        // Ensure we drop the last remaining composite context so that the
   1677        // locked textures are dropped before we try to drop the SWGL context
   1678        // in the parent caller
   1679        self.composite_context = None;
   1680 
   1681        for surface in self.surfaces.values() {
   1682            self.deinit_surface(surface);
   1683        }
   1684 
   1685        if let Some(depth_id) = self.depth_id.take() {
   1686            self.gl.delete_textures(&[depth_id]);
   1687        }
   1688 
   1689        if self.use_native_compositor {
   1690            self.compositor.deinit(device);
   1691        }
   1692    }
   1693 
   1694    fn create_tile(&mut self, device: &mut Device, id: NativeTileId) {
   1695        if self.use_native_compositor {
   1696            self.compositor.create_tile(device, id);
   1697        }
   1698        if let Some(surface) = self.surfaces.get_mut(&id.surface_id) {
   1699            let mut tile = SwTile::new(id.x, id.y);
   1700            tile.color_id = self.gl.gen_textures(1)[0];
   1701            tile.fbo_id = self.gl.gen_framebuffers(1)[0];
   1702            let mut prev_fbo = [0];
   1703            unsafe {
   1704                self.gl.get_integer_v(gl::DRAW_FRAMEBUFFER_BINDING, &mut prev_fbo);
   1705            }
   1706            self.gl.bind_framebuffer(gl::DRAW_FRAMEBUFFER, tile.fbo_id);
   1707            self.gl.framebuffer_texture_2d(
   1708                gl::DRAW_FRAMEBUFFER,
   1709                gl::COLOR_ATTACHMENT0,
   1710                gl::TEXTURE_2D,
   1711                tile.color_id,
   1712                0,
   1713            );
   1714            self.gl.framebuffer_texture_2d(
   1715                gl::DRAW_FRAMEBUFFER,
   1716                gl::DEPTH_ATTACHMENT,
   1717                gl::TEXTURE_2D,
   1718                self.depth_id.expect("depth texture should be initialized"),
   1719                0,
   1720            );
   1721            self.gl.bind_framebuffer(gl::DRAW_FRAMEBUFFER, prev_fbo[0] as gl::GLuint);
   1722 
   1723            surface.tiles.push(tile);
   1724        }
   1725    }
   1726 
   1727    fn destroy_tile(&mut self, device: &mut Device, id: NativeTileId) {
   1728        if let Some(surface) = self.surfaces.get_mut(&id.surface_id) {
   1729            if let Some(idx) = surface.tiles.iter().position(|t| t.x == id.x && t.y == id.y) {
   1730                let tile = surface.tiles.remove(idx);
   1731                self.deinit_tile(&tile);
   1732            }
   1733        }
   1734        if self.use_native_compositor {
   1735            self.compositor.destroy_tile(device, id);
   1736        }
   1737    }
   1738 
   1739    fn attach_external_image(&mut self, device: &mut Device, id: NativeSurfaceId, external_image: ExternalImageId) {
   1740        if self.use_native_compositor {
   1741            self.compositor.attach_external_image(device, id, external_image);
   1742        }
   1743        if let Some(surface) = self.surfaces.get_mut(&id) {
   1744            // Surfaces with attached external images have a single tile at the origin encompassing
   1745            // the entire surface.
   1746            assert!(surface.tile_size.is_empty());
   1747            surface.external_image = Some(external_image);
   1748            if surface.tiles.is_empty() {
   1749                surface.tiles.push(SwTile::new(0, 0));
   1750            }
   1751        }
   1752    }
   1753 
   1754    fn invalidate_tile(&mut self, device: &mut Device, id: NativeTileId, valid_rect: DeviceIntRect) {
   1755        if self.use_native_compositor {
   1756            self.compositor.invalidate_tile(device, id, valid_rect);
   1757        }
   1758        if let Some(surface) = self.surfaces.get_mut(&id.surface_id) {
   1759            if let Some(tile) = surface.tiles.iter_mut().find(|t| t.x == id.x && t.y == id.y) {
   1760                tile.invalid.set(true);
   1761                tile.valid_rect = valid_rect;
   1762            }
   1763        }
   1764    }
   1765 
   1766    fn bind(&mut self, device: &mut Device, id: NativeTileId, dirty_rect: DeviceIntRect, valid_rect: DeviceIntRect) -> NativeSurfaceInfo {
   1767        let mut surface_info = NativeSurfaceInfo {
   1768            origin: DeviceIntPoint::zero(),
   1769            fbo_id: 0,
   1770        };
   1771 
   1772        self.cur_tile = id;
   1773 
   1774        if let Some(surface) = self.surfaces.get_mut(&id.surface_id) {
   1775            if let Some(tile) = surface.tiles.iter_mut().find(|t| t.x == id.x && t.y == id.y) {
   1776                assert_eq!(tile.valid_rect, valid_rect);
   1777                if valid_rect.is_empty() {
   1778                    return surface_info;
   1779                }
   1780 
   1781                let mut stride = 0;
   1782                let mut buf = ptr::null_mut();
   1783                if self.use_native_compositor {
   1784                    if let Some(tile_info) = self.compositor.map_tile(device, id, dirty_rect, valid_rect) {
   1785                        stride = tile_info.stride;
   1786                        buf = tile_info.data;
   1787                    }
   1788                } else if let Some(ref composite_thread) = self.composite_thread {
   1789                    // Check if the tile is currently in use before proceeding to modify it.
   1790                    if tile.graph_node.get().has_job.load(Ordering::SeqCst) {
   1791                        // Need to wait for the SwComposite thread to finish any queued jobs.
   1792                        composite_thread.wait_for_composites(false);
   1793                    }
   1794                }
   1795                self.gl.set_texture_buffer(
   1796                    tile.color_id,
   1797                    gl::RGBA8,
   1798                    valid_rect.width(),
   1799                    valid_rect.height(),
   1800                    stride,
   1801                    buf,
   1802                    surface.tile_size.width,
   1803                    surface.tile_size.height,
   1804                );
   1805                // Reallocate the shared depth buffer to fit the valid rect, but within
   1806                // a buffer sized to actually fit at least the maximum possible tile size.
   1807                // The maximum tile size is supplied to avoid reallocation by ensuring the
   1808                // allocated buffer is actually big enough to accommodate the largest tile
   1809                // size requested by any used surface, even though supplied valid rect may
   1810                // actually be much smaller than this. This will only force a texture
   1811                // reallocation inside SWGL if the maximum tile size has grown since the
   1812                // last time it was supplied, instead simply reusing the buffer if the max
   1813                // tile size is not bigger than what was previously allocated.
   1814                self.gl.set_texture_buffer(
   1815                    self.depth_id.expect("depth texture should be initialized"),
   1816                    gl::DEPTH_COMPONENT,
   1817                    valid_rect.width(),
   1818                    valid_rect.height(),
   1819                    0,
   1820                    ptr::null_mut(),
   1821                    self.max_tile_size.width,
   1822                    self.max_tile_size.height,
   1823                );
   1824                surface_info.fbo_id = tile.fbo_id;
   1825                surface_info.origin -= valid_rect.min.to_vector();
   1826            }
   1827        }
   1828 
   1829        surface_info
   1830    }
   1831 
   1832    fn unbind(&mut self, device: &mut Device) {
   1833        let id = self.cur_tile;
   1834        if let Some(surface) = self.surfaces.get(&id.surface_id) {
   1835            if let Some(tile) = surface.tiles.iter().find(|t| t.x == id.x && t.y == id.y) {
   1836                if tile.valid_rect.is_empty() {
   1837                    // If we didn't actually render anything, then just queue any
   1838                    // dependencies.
   1839                    self.flush_composites(&id, surface, tile);
   1840                    return;
   1841                }
   1842 
   1843                // Force any delayed clears to be resolved.
   1844                self.gl.resolve_framebuffer(tile.fbo_id);
   1845 
   1846                if self.use_native_compositor {
   1847                    self.compositor.unmap_tile(device);
   1848                } else {
   1849                    // If we're not relying on a native compositor, then composite
   1850                    // any tiles that are dependent on this tile being updated but
   1851                    // are otherwise ready to composite.
   1852                    self.flush_composites(&id, surface, tile);
   1853                }
   1854            }
   1855        }
   1856    }
   1857 
   1858    fn begin_frame(&mut self, device: &mut Device) {
   1859        self.reset_overlaps();
   1860 
   1861        if self.use_native_compositor {
   1862            self.compositor.begin_frame(device);
   1863        }
   1864    }
   1865 
   1866    fn add_surface(
   1867        &mut self,
   1868        device: &mut Device,
   1869        id: NativeSurfaceId,
   1870        transform: CompositorSurfaceTransform,
   1871        clip_rect: DeviceIntRect,
   1872        filter: ImageRendering,
   1873        rounded_clip_rect: DeviceIntRect,
   1874        rounded_clip_radii: ClipRadius,
   1875    ) {
   1876        // Update the rounded clip on the surface
   1877        let surface = self.surfaces.get_mut(&id).expect("bug: unknown surface");
   1878        surface.rounded_clip = RoundedClip {
   1879            rect: rounded_clip_rect,
   1880            radii: rounded_clip_radii,
   1881        };
   1882 
   1883        if self.use_native_compositor {
   1884            self.compositor.add_surface(
   1885                device,
   1886                id,
   1887                transform,
   1888                clip_rect,
   1889                filter,
   1890                rounded_clip_rect,
   1891                rounded_clip_radii,
   1892            );
   1893        }
   1894 
   1895        if self.composite_thread.is_some() {
   1896            // If the surface has an attached external image, try to lock that now.
   1897            self.try_lock_composite_surface(device, &id);
   1898 
   1899            // If we're already busy compositing, then add to the queue of late
   1900            // surfaces instead of trying to sort into the main frame queue.
   1901            // These late surfaces will not have any overlap tracking done for
   1902            // them and must be processed synchronously at the end of the frame.
   1903            if self.is_compositing {
   1904                self.late_surfaces.push((id, transform, clip_rect, filter));
   1905                return;
   1906            }
   1907        }
   1908 
   1909        self.frame_surfaces.push((id, transform, clip_rect, filter));
   1910    }
   1911 
   1912    /// Now that all the dependency graph nodes have been built, start queuing
   1913    /// composition jobs. Any surfaces that get added after this point in the
   1914    /// frame will not have overlap dependencies assigned and so must instead
   1915    /// be added to the late_surfaces queue to be processed at the end of the
   1916    /// frame.
   1917    fn start_compositing(&mut self, device: &mut Device, clear_color: ColorF, dirty_rects: &[DeviceIntRect], _opaque_rects: &[DeviceIntRect]) {
   1918        self.is_compositing = true;
   1919 
   1920        // Opaque rects are currently only computed here, not by WR itself, so we
   1921        // ignore the passed parameter and forward our own version onto the native
   1922        // compositor.
   1923        let mut opaque_rects: Vec<DeviceIntRect> = Vec::new();
   1924        for &(ref id, ref transform, ref clip_rect, _filter) in &self.frame_surfaces {
   1925            if let Some(surface) = self.surfaces.get(id) {
   1926                if !surface.is_opaque {
   1927                    continue;
   1928                }
   1929 
   1930                for tile in &surface.tiles {
   1931                    if let Some(rect) = tile.overlap_rect(surface, transform, clip_rect) {
   1932                        opaque_rects.push(rect);
   1933                    }
   1934                }
   1935            }
   1936        }
   1937 
   1938        self.compositor.start_compositing(device, clear_color, dirty_rects, &opaque_rects);
   1939 
   1940        if let Some(dirty_rect) = dirty_rects
   1941            .iter()
   1942            .fold(DeviceIntRect::zero(), |acc, dirty_rect| acc.union(dirty_rect))
   1943            .to_non_empty()
   1944        {
   1945            // Factor dirty rect into surface clip rects
   1946            for &mut (_, _, ref mut clip_rect, _) in &mut self.frame_surfaces {
   1947                *clip_rect = clip_rect.intersection(&dirty_rect).unwrap_or_default();
   1948            }
   1949        }
   1950 
   1951        self.occlude_surfaces();
   1952 
   1953        // Discard surfaces that are entirely clipped out
   1954        self.frame_surfaces
   1955            .retain(|&(_, _, ref clip_rect, _)| !clip_rect.is_empty());
   1956 
   1957        if let Some(ref composite_thread) = self.composite_thread {
   1958            // Compute overlap dependencies for surfaces.
   1959            for &(ref id, ref transform, ref clip_rect, _filter) in &self.frame_surfaces {
   1960                if let Some(surface) = self.surfaces.get(id) {
   1961                    for tile in &surface.tiles {
   1962                        self.init_overlaps(id, surface, tile, transform, clip_rect);
   1963                    }
   1964                }
   1965            }
   1966 
   1967            self.locked_framebuffer = self.gl.lock_framebuffer(0);
   1968 
   1969            composite_thread.prepare_for_composites();
   1970 
   1971            // Issue any initial composite jobs for the SwComposite thread.
   1972            let mut lock = composite_thread.lock();
   1973            for &(ref id, ref transform, ref clip_rect, filter) in &self.frame_surfaces {
   1974                if let Some(surface) = self.surfaces.get(id) {
   1975                    for tile in &surface.tiles {
   1976                        if tile.overlaps.get() == 0 {
   1977                            // Not dependent on any tiles, so go ahead and composite now.
   1978                            self.queue_composite(surface, transform, clip_rect, filter, tile, &mut lock);
   1979                        }
   1980                    }
   1981                }
   1982            }
   1983        }
   1984    }
   1985 
   1986    fn end_frame(&mut self, device: &mut Device,) {
   1987        self.is_compositing = false;
   1988 
   1989        if self.use_native_compositor {
   1990            self.compositor.end_frame(device);
   1991        } else if let Some(ref composite_thread) = self.composite_thread {
   1992            // Need to wait for the SwComposite thread to finish any queued jobs.
   1993            composite_thread.wait_for_composites(false);
   1994 
   1995            if !self.late_surfaces.is_empty() {
   1996                // All of the main frame surface have been processed by now. But if there
   1997                // are any late surfaces, we need to kick off a new synchronous composite
   1998                // phase. These late surfaces don't have any overlap/dependency tracking,
   1999                // so we just queue them directly and wait synchronously for the composite
   2000                // thread to process them in order.
   2001                composite_thread.prepare_for_composites();
   2002                {
   2003                    let mut lock = composite_thread.lock();
   2004                    for &(ref id, ref transform, ref clip_rect, filter) in &self.late_surfaces {
   2005                        if let Some(surface) = self.surfaces.get(id) {
   2006                            for tile in &surface.tiles {
   2007                                self.queue_composite(surface, transform, clip_rect, filter, tile, &mut lock);
   2008                            }
   2009                        }
   2010                    }
   2011                }
   2012                composite_thread.wait_for_composites(true);
   2013            }
   2014 
   2015            self.locked_framebuffer = None;
   2016 
   2017            self.unlock_composite_surfaces(device);
   2018        }
   2019 
   2020        self.frame_surfaces.clear();
   2021        self.late_surfaces.clear();
   2022 
   2023        self.reset_overlaps();
   2024    }
   2025 
   2026    fn enable_native_compositor(&mut self, device: &mut Device, enable: bool) {
   2027        // TODO: The SwComposite thread is not properly instantiated if this is
   2028        // ever actually toggled.
   2029        assert_eq!(self.use_native_compositor, enable);
   2030        self.compositor.enable_native_compositor(device, enable);
   2031        self.use_native_compositor = enable;
   2032    }
   2033 
   2034    fn get_capabilities(&self, device: &mut Device) -> CompositorCapabilities {
   2035        self.compositor.get_capabilities(device)
   2036    }
   2037 
   2038    fn get_window_visibility(&self, device: &mut Device) -> WindowVisibility {
   2039        self.compositor.get_window_visibility(device)
   2040    }
   2041 }