tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

core.rs (28004B)


      1 /* This Source Code Form is subject to the terms of the Mozilla Public
      2 * License, v. 2.0. If a copy of the MPL was not distributed with this
      3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
      4 
      5 #![allow(unsafe_code)]
      6 
      7 use crate::applicable_declarations::CascadePriority;
      8 use crate::shared_lock::StylesheetGuards;
      9 use crate::stylesheets::layer_rule::LayerOrder;
     10 use malloc_size_of::{MallocShallowSizeOf, MallocSizeOf, MallocSizeOfOps};
     11 use parking_lot::RwLock;
     12 use smallvec::SmallVec;
     13 use std::fmt;
     14 use std::hash;
     15 use std::io::Write;
     16 use std::mem;
     17 use std::ptr;
     18 use std::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
     19 
     20 use super::map::{Entry, Map};
     21 use super::unsafe_box::UnsafeBox;
     22 use super::{CascadeLevel, StyleSource};
     23 
     24 /// The rule tree, the structure servo uses to preserve the results of selector
     25 /// matching.
     26 ///
     27 /// This is organized as a tree of rules. When a node matches a set of rules,
     28 /// they're inserted in order in the tree, starting with the less specific one.
     29 ///
     30 /// When a rule is inserted in the tree, other elements may share the path up to
     31 /// a given rule. If that's the case, we don't duplicate child nodes, but share
     32 /// them.
     33 ///
     34 /// When the rule node refcount drops to zero, it doesn't get freed. It gets
     35 /// instead put into a free list, and it is potentially GC'd after a while.
     36 ///
     37 /// That way, a rule node that represents a likely-to-match-again rule (like a
     38 /// :hover rule) can be reused if we haven't GC'd it yet.
     39 #[derive(Debug)]
     40 pub struct RuleTree {
     41    root: StrongRuleNode,
     42 }
     43 
     44 impl Drop for RuleTree {
     45    fn drop(&mut self) {
     46        unsafe { self.swap_free_list_and_gc(ptr::null_mut()) }
     47    }
     48 }
     49 
     50 impl MallocSizeOf for RuleTree {
     51    fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
     52        let mut n = 0;
     53        let mut stack = SmallVec::<[_; 32]>::new();
     54        stack.push(self.root.clone());
     55 
     56        while let Some(node) = stack.pop() {
     57            n += unsafe { ops.malloc_size_of(&*node.p) };
     58            let children = node.p.children.read();
     59            children.shallow_size_of(ops);
     60            for c in &*children {
     61                stack.push(unsafe { c.upgrade() });
     62            }
     63        }
     64 
     65        n
     66    }
     67 }
     68 
     69 #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
     70 struct ChildKey(CascadePriority, ptr::NonNull<()>);
     71 unsafe impl Send for ChildKey {}
     72 unsafe impl Sync for ChildKey {}
     73 
     74 impl RuleTree {
     75    /// Construct a new rule tree.
     76    pub fn new() -> Self {
     77        RuleTree {
     78            root: StrongRuleNode::new(Box::new(RuleNode::root())),
     79        }
     80    }
     81 
     82    /// Get the root rule node.
     83    pub fn root(&self) -> &StrongRuleNode {
     84        &self.root
     85    }
     86 
     87    /// This can only be called when no other threads is accessing this tree.
     88    pub fn gc(&self) {
     89        unsafe { self.swap_free_list_and_gc(RuleNode::DANGLING_PTR) }
     90    }
     91 
     92    /// This can only be called when no other threads is accessing this tree.
     93    pub fn maybe_gc(&self) {
     94        #[cfg(debug_assertions)]
     95        self.maybe_dump_stats();
     96 
     97        if self.root.p.approximate_free_count.load(Ordering::Relaxed) > RULE_TREE_GC_INTERVAL {
     98            self.gc();
     99        }
    100    }
    101 
    102    #[cfg(debug_assertions)]
    103    fn maybe_dump_stats(&self) {
    104        use itertools::Itertools;
    105        use std::cell::Cell;
    106        use std::time::{Duration, Instant};
    107 
    108        if !log_enabled!(log::Level::Trace) {
    109            return;
    110        }
    111 
    112        const RULE_TREE_STATS_INTERVAL: Duration = Duration::from_secs(2);
    113 
    114        thread_local! {
    115            pub static LAST_STATS: Cell<Instant> = Cell::new(Instant::now());
    116        };
    117 
    118        let should_dump = LAST_STATS.with(|s| {
    119            let now = Instant::now();
    120            if now.duration_since(s.get()) < RULE_TREE_STATS_INTERVAL {
    121                return false;
    122            }
    123            s.set(now);
    124            true
    125        });
    126 
    127        if !should_dump {
    128            return;
    129        }
    130 
    131        let mut children_count = rustc_hash::FxHashMap::default();
    132 
    133        let mut stack = SmallVec::<[_; 32]>::new();
    134        stack.push(self.root.clone());
    135        while let Some(node) = stack.pop() {
    136            let children = node.p.children.read();
    137            *children_count.entry(children.len()).or_insert(0) += 1;
    138            for c in &*children {
    139                stack.push(unsafe { c.upgrade() });
    140            }
    141        }
    142 
    143        trace!("Rule tree stats:");
    144        let counts = children_count.keys().sorted();
    145        for count in counts {
    146            trace!(" {} - {}", count, children_count[count]);
    147        }
    148    }
    149 
    150    /// Steals the free list and drops its contents.
    151    unsafe fn swap_free_list_and_gc(&self, ptr: *mut RuleNode) {
    152        let root = &self.root.p;
    153 
    154        debug_assert!(!root.next_free.load(Ordering::Relaxed).is_null());
    155 
    156        // Reset the approximate free count to zero, as we are going to steal
    157        // the free list.
    158        root.approximate_free_count.store(0, Ordering::Relaxed);
    159 
    160        // Steal the free list head. Memory loads on nodes while iterating it
    161        // must observe any prior changes that occured so this requires
    162        // acquire ordering, but there are no writes that need to be kept
    163        // before this swap so there is no need for release.
    164        let mut head = root.next_free.swap(ptr, Ordering::Acquire);
    165 
    166        while head != RuleNode::DANGLING_PTR {
    167            debug_assert!(!head.is_null());
    168 
    169            let mut node = UnsafeBox::from_raw(head);
    170 
    171            // The root node cannot go on the free list.
    172            debug_assert!(node.root.is_some());
    173 
    174            // The refcount of nodes on the free list never goes below 1.
    175            debug_assert!(node.refcount.load(Ordering::Relaxed) > 0);
    176 
    177            // No one else is currently writing to that field. Get the address
    178            // of the next node in the free list and replace it with null,
    179            // other threads will now consider that this node is not on the
    180            // free list.
    181            head = node.next_free.swap(ptr::null_mut(), Ordering::Relaxed);
    182 
    183            // This release write synchronises with the acquire fence in
    184            // `WeakRuleNode::upgrade`, making sure that if `upgrade` observes
    185            // decrements the refcount to 0, it will also observe the
    186            // `node.next_free` swap to null above.
    187            if node.refcount.fetch_sub(1, Ordering::Release) == 1 {
    188                // And given it observed the null swap above, it will need
    189                // `pretend_to_be_on_free_list` to finish its job, writing
    190                // `RuleNode::DANGLING_PTR` in `node.next_free`.
    191                RuleNode::pretend_to_be_on_free_list(&node);
    192 
    193                // Drop this node now that we just observed its refcount going
    194                // down to zero.
    195                RuleNode::drop_without_free_list(&mut node);
    196            }
    197        }
    198    }
    199 }
    200 
    201 /// The number of RuleNodes added to the free list before we will consider
    202 /// doing a GC when calling maybe_gc().  (The value is copied from Gecko,
    203 /// where it likely did not result from a rigorous performance analysis.)
    204 const RULE_TREE_GC_INTERVAL: usize = 300;
    205 
    206 /// A node in the rule tree.
    207 struct RuleNode {
    208    /// The root node. Only the root has no root pointer, for obvious reasons.
    209    root: Option<WeakRuleNode>,
    210 
    211    /// The parent rule node. Only the root has no parent.
    212    parent: Option<StrongRuleNode>,
    213 
    214    /// The actual style source, either coming from a selector in a StyleRule,
    215    /// or a raw property declaration block (like the style attribute).
    216    ///
    217    /// None for the root node.
    218    source: Option<StyleSource>,
    219 
    220    /// The cascade level + layer order this rule is positioned at.
    221    cascade_priority: CascadePriority,
    222 
    223    /// The refcount of this node.
    224    ///
    225    /// Starts at one. Incremented in `StrongRuleNode::clone` and
    226    /// `WeakRuleNode::upgrade`. Decremented in `StrongRuleNode::drop`
    227    /// and `RuleTree::swap_free_list_and_gc`.
    228    ///
    229    /// If a non-root node's refcount reaches zero, it is incremented back to at
    230    /// least one in `RuleNode::pretend_to_be_on_free_list` until the caller who
    231    /// observed it dropping to zero had a chance to try to remove it from its
    232    /// parent's children list.
    233    ///
    234    /// The refcount should never be decremented to zero if the value in
    235    /// `next_free` is not null.
    236    refcount: AtomicUsize,
    237 
    238    /// Only used for the root, stores the number of free rule nodes that are
    239    /// around.
    240    approximate_free_count: AtomicUsize,
    241 
    242    /// The children of a given rule node. Children remove themselves from here
    243    /// when they go away.
    244    children: RwLock<Map<ChildKey, WeakRuleNode>>,
    245 
    246    /// This field has two different meanings depending on whether this is the
    247    /// root node or not.
    248    ///
    249    /// If it is the root, it represents the head of the free list. It may be
    250    /// null, which means the free list is gone because the tree was dropped,
    251    /// and it may be `RuleNode::DANGLING_PTR`, which means the free list is
    252    /// empty.
    253    ///
    254    /// If it is not the root node, this field is either null if the node is
    255    /// not on the free list, `RuleNode::DANGLING_PTR` if it is the last item
    256    /// on the free list or the node is pretending to be on the free list, or
    257    /// any valid non-null pointer representing the next item on the free list
    258    /// after this one.
    259    ///
    260    /// See `RuleNode::push_on_free_list`, `swap_free_list_and_gc`, and
    261    /// `WeakRuleNode::upgrade`.
    262    ///
    263    /// Two threads should never attempt to put the same node on the free list
    264    /// both at the same time.
    265    next_free: AtomicPtr<RuleNode>,
    266 }
    267 
    268 // On Gecko builds, hook into the leak checking machinery.
    269 #[cfg(feature = "gecko_refcount_logging")]
    270 mod gecko_leak_checking {
    271    use super::RuleNode;
    272    use std::mem::size_of;
    273    use std::os::raw::{c_char, c_void};
    274 
    275    extern "C" {
    276        fn NS_LogCtor(aPtr: *mut c_void, aTypeName: *const c_char, aSize: u32);
    277        fn NS_LogDtor(aPtr: *mut c_void, aTypeName: *const c_char, aSize: u32);
    278    }
    279    static NAME: &'static [u8] = b"RuleNode\0";
    280 
    281    /// Logs the creation of a heap-allocated object to Gecko's leak-checking machinery.
    282    pub(super) fn log_ctor(ptr: *const RuleNode) {
    283        let s = NAME as *const [u8] as *const u8 as *const c_char;
    284        unsafe {
    285            NS_LogCtor(ptr as *mut c_void, s, size_of::<RuleNode>() as u32);
    286        }
    287    }
    288 
    289    /// Logs the destruction of a heap-allocated object to Gecko's leak-checking machinery.
    290    pub(super) fn log_dtor(ptr: *const RuleNode) {
    291        let s = NAME as *const [u8] as *const u8 as *const c_char;
    292        unsafe {
    293            NS_LogDtor(ptr as *mut c_void, s, size_of::<RuleNode>() as u32);
    294        }
    295    }
    296 }
    297 
    298 #[inline(always)]
    299 fn log_new(_ptr: *const RuleNode) {
    300    #[cfg(feature = "gecko_refcount_logging")]
    301    gecko_leak_checking::log_ctor(_ptr);
    302 }
    303 
    304 #[inline(always)]
    305 fn log_drop(_ptr: *const RuleNode) {
    306    #[cfg(feature = "gecko_refcount_logging")]
    307    gecko_leak_checking::log_dtor(_ptr);
    308 }
    309 
    310 impl RuleNode {
    311    const DANGLING_PTR: *mut Self = ptr::NonNull::dangling().as_ptr();
    312 
    313    unsafe fn new(
    314        root: WeakRuleNode,
    315        parent: StrongRuleNode,
    316        source: StyleSource,
    317        cascade_priority: CascadePriority,
    318    ) -> Self {
    319        debug_assert!(root.p.parent.is_none());
    320        RuleNode {
    321            root: Some(root),
    322            parent: Some(parent),
    323            source: Some(source),
    324            cascade_priority,
    325            refcount: AtomicUsize::new(1),
    326            children: Default::default(),
    327            approximate_free_count: AtomicUsize::new(0),
    328            next_free: AtomicPtr::new(ptr::null_mut()),
    329        }
    330    }
    331 
    332    fn root() -> Self {
    333        RuleNode {
    334            root: None,
    335            parent: None,
    336            source: None,
    337            cascade_priority: CascadePriority::new(CascadeLevel::UANormal, LayerOrder::root()),
    338            refcount: AtomicUsize::new(1),
    339            approximate_free_count: AtomicUsize::new(0),
    340            children: Default::default(),
    341            next_free: AtomicPtr::new(RuleNode::DANGLING_PTR),
    342        }
    343    }
    344 
    345    fn key(&self) -> ChildKey {
    346        ChildKey(
    347            self.cascade_priority,
    348            self.source
    349                .as_ref()
    350                .expect("Called key() on the root node")
    351                .key(),
    352        )
    353    }
    354 
    355    /// Drops a node without ever putting it on the free list.
    356    ///
    357    /// Note that the node may not be dropped if we observe that its refcount
    358    /// isn't zero anymore when we write-lock its parent's children map to
    359    /// remove it.
    360    ///
    361    /// This loops over parents of dropped nodes if their own refcount reaches
    362    /// zero to avoid recursion when dropping deep hierarchies of nodes.
    363    ///
    364    /// For non-root nodes, this should always be preceded by a call of
    365    /// `RuleNode::pretend_to_be_on_free_list`.
    366    unsafe fn drop_without_free_list(this: &mut UnsafeBox<Self>) {
    367        // We clone the box and shadow the original one to be able to loop
    368        // over its ancestors if they also need to be dropped.
    369        let mut this = UnsafeBox::clone(this);
    370        loop {
    371            // If the node has a parent, we need to remove it from its parent's
    372            // children list.
    373            if let Some(parent) = this.parent.as_ref() {
    374                debug_assert!(!this.next_free.load(Ordering::Relaxed).is_null());
    375 
    376                // We lock the parent's children list, which means no other
    377                // thread will have any more opportunity to resurrect the node
    378                // anymore.
    379                let mut children = parent.p.children.write();
    380 
    381                this.next_free.store(ptr::null_mut(), Ordering::Relaxed);
    382 
    383                // We decrement the counter to remove the "pretend to be
    384                // on the free list" reference.
    385                let old_refcount = this.refcount.fetch_sub(1, Ordering::Release);
    386                debug_assert!(old_refcount != 0);
    387                if old_refcount != 1 {
    388                    // Other threads resurrected this node and those references
    389                    // are still alive, we have nothing to do anymore.
    390                    return;
    391                }
    392 
    393                // We finally remove the node from its parent's children list,
    394                // there are now no other references to it and it cannot
    395                // be resurrected anymore even after we unlock the list.
    396                debug!(
    397                    "Remove from child list: {:?}, parent: {:?}",
    398                    this.as_mut_ptr(),
    399                    this.parent.as_ref().map(|p| p.p.as_mut_ptr())
    400                );
    401                let weak = children.remove(&this.key(), |node| node.p.key()).unwrap();
    402                assert_eq!(weak.p.as_mut_ptr(), this.as_mut_ptr());
    403            } else {
    404                debug_assert_eq!(this.next_free.load(Ordering::Relaxed), ptr::null_mut());
    405                debug_assert_eq!(this.refcount.load(Ordering::Relaxed), 0);
    406            }
    407 
    408            // We are going to drop this node for good this time, as per the
    409            // usual refcounting protocol we need an acquire fence here before
    410            // we run the destructor.
    411            //
    412            // See https://github.com/rust-lang/rust/pull/41714#issuecomment-298996916
    413            // for why it doesn't matter whether this is a load or a fence.
    414            atomic::fence(Ordering::Acquire);
    415 
    416            // Remove the parent reference from the child to avoid
    417            // recursively dropping it and putting it on the free list.
    418            let parent = UnsafeBox::deref_mut(&mut this).parent.take();
    419 
    420            // We now drop the actual box and its contents, no one should
    421            // access the current value in `this` anymore.
    422            log_drop(&*this);
    423            UnsafeBox::drop(&mut this);
    424 
    425            if let Some(parent) = parent {
    426                // We will attempt to drop the node's parent without the free
    427                // list, so we clone the inner unsafe box and forget the
    428                // original parent to avoid running its `StrongRuleNode`
    429                // destructor which would attempt to use the free list if it
    430                // still exists.
    431                this = UnsafeBox::clone(&parent.p);
    432                mem::forget(parent);
    433                if this.refcount.fetch_sub(1, Ordering::Release) == 1 {
    434                    debug_assert_eq!(this.next_free.load(Ordering::Relaxed), ptr::null_mut());
    435                    if this.root.is_some() {
    436                        RuleNode::pretend_to_be_on_free_list(&this);
    437                    }
    438                    // Parent also reached refcount zero, we loop to drop it.
    439                    continue;
    440                }
    441            }
    442 
    443            return;
    444        }
    445    }
    446 
    447    /// Pushes this node on the tree's free list. Returns false if the free list
    448    /// is gone. Should only be called after we decremented a node's refcount
    449    /// to zero and pretended to be on the free list.
    450    unsafe fn push_on_free_list(this: &UnsafeBox<Self>) -> bool {
    451        let root = &this.root.as_ref().unwrap().p;
    452 
    453        debug_assert!(this.refcount.load(Ordering::Relaxed) > 0);
    454        debug_assert_eq!(this.next_free.load(Ordering::Relaxed), Self::DANGLING_PTR);
    455 
    456        // Increment the approximate free count by one.
    457        root.approximate_free_count.fetch_add(1, Ordering::Relaxed);
    458 
    459        // If the compare-exchange operation fails in the loop, we will retry
    460        // with the new head value, so this can be a relaxed load.
    461        let mut head = root.next_free.load(Ordering::Relaxed);
    462 
    463        while !head.is_null() {
    464            // Two threads can never attempt to push the same node on the free
    465            // list both at the same time, so whoever else pushed a node on the
    466            // free list cannot have done so with this node.
    467            debug_assert_ne!(head, this.as_mut_ptr());
    468 
    469            // Store the current head of the free list in this node.
    470            this.next_free.store(head, Ordering::Relaxed);
    471 
    472            // Any thread acquiring the free list must observe the previous
    473            // next_free changes that occured, hence the release ordering
    474            // on success.
    475            match root.next_free.compare_exchange_weak(
    476                head,
    477                this.as_mut_ptr(),
    478                Ordering::Release,
    479                Ordering::Relaxed,
    480            ) {
    481                Ok(_) => {
    482                    // This node is now on the free list, caller should not use
    483                    // the node anymore.
    484                    return true;
    485                },
    486                Err(new_head) => head = new_head,
    487            }
    488        }
    489 
    490        // Tree was dropped and free list has been destroyed. We did not push
    491        // this node on the free list but we still pretend to be on the free
    492        // list to be ready to call `drop_without_free_list`.
    493        false
    494    }
    495 
    496    /// Makes the node pretend to be on the free list. This will increment the
    497    /// refcount by 1 and store `Self::DANGLING_PTR` in `next_free`. This
    498    /// method should only be called after caller decremented the refcount to
    499    /// zero, with the null pointer stored in `next_free`.
    500    unsafe fn pretend_to_be_on_free_list(this: &UnsafeBox<Self>) {
    501        debug_assert_eq!(this.next_free.load(Ordering::Relaxed), ptr::null_mut());
    502        this.refcount.fetch_add(1, Ordering::Relaxed);
    503        this.next_free.store(Self::DANGLING_PTR, Ordering::Release);
    504    }
    505 
    506    fn as_mut_ptr(&self) -> *mut RuleNode {
    507        self as *const RuleNode as *mut RuleNode
    508    }
    509 }
    510 
    511 pub(crate) struct WeakRuleNode {
    512    p: UnsafeBox<RuleNode>,
    513 }
    514 
    515 /// A strong reference to a rule node.
    516 pub struct StrongRuleNode {
    517    p: UnsafeBox<RuleNode>,
    518 }
    519 
    520 #[cfg(feature = "servo")]
    521 malloc_size_of::malloc_size_of_is_0!(StrongRuleNode);
    522 
    523 impl StrongRuleNode {
    524    fn new(n: Box<RuleNode>) -> Self {
    525        debug_assert_eq!(n.parent.is_none(), !n.source.is_some());
    526 
    527        log_new(&*n);
    528 
    529        debug!("Creating rule node: {:p}", &*n);
    530 
    531        Self {
    532            p: UnsafeBox::from_box(n),
    533        }
    534    }
    535 
    536    unsafe fn from_unsafe_box(p: UnsafeBox<RuleNode>) -> Self {
    537        Self { p }
    538    }
    539 
    540    unsafe fn downgrade(&self) -> WeakRuleNode {
    541        WeakRuleNode {
    542            p: UnsafeBox::clone(&self.p),
    543        }
    544    }
    545 
    546    /// Get the parent rule node of this rule node.
    547    pub fn parent(&self) -> Option<&StrongRuleNode> {
    548        self.p.parent.as_ref()
    549    }
    550 
    551    pub(super) fn ensure_child(
    552        &self,
    553        root: &StrongRuleNode,
    554        source: StyleSource,
    555        cascade_priority: CascadePriority,
    556    ) -> StrongRuleNode {
    557        use parking_lot::RwLockUpgradableReadGuard;
    558 
    559        debug_assert!(
    560            self.p.cascade_priority <= cascade_priority,
    561            "Should be ordered (instead {:?} > {:?}), from {:?} and {:?}",
    562            self.p.cascade_priority,
    563            cascade_priority,
    564            self.p.source,
    565            source,
    566        );
    567 
    568        let key = ChildKey(cascade_priority, source.key());
    569        let children = self.p.children.upgradable_read();
    570        if let Some(child) = children.get(&key, |node| node.p.key()) {
    571            // Sound to call because we read-locked the parent's children.
    572            return unsafe { child.upgrade() };
    573        }
    574        let mut children = RwLockUpgradableReadGuard::upgrade(children);
    575        match children.entry(key, |node| node.p.key()) {
    576            Entry::Occupied(child) => {
    577                // Sound to call because we write-locked the parent's children.
    578                unsafe { child.upgrade() }
    579            },
    580            Entry::Vacant(entry) => unsafe {
    581                let node = StrongRuleNode::new(Box::new(RuleNode::new(
    582                    root.downgrade(),
    583                    self.clone(),
    584                    source,
    585                    cascade_priority,
    586                )));
    587                // Sound to call because we still own a strong reference to
    588                // this node, through the `node` variable itself that we are
    589                // going to return to the caller.
    590                entry.insert(node.downgrade());
    591                node
    592            },
    593        }
    594    }
    595 
    596    /// Get the style source corresponding to this rule node. May return `None`
    597    /// if it's the root node, which means that the node hasn't matched any
    598    /// rules.
    599    pub fn style_source(&self) -> Option<&StyleSource> {
    600        self.p.source.as_ref()
    601    }
    602 
    603    /// The cascade priority.
    604    #[inline]
    605    pub fn cascade_priority(&self) -> CascadePriority {
    606        self.p.cascade_priority
    607    }
    608 
    609    /// The cascade level.
    610    #[inline]
    611    pub fn cascade_level(&self) -> CascadeLevel {
    612        self.cascade_priority().cascade_level()
    613    }
    614 
    615    /// The importance.
    616    #[inline]
    617    pub fn importance(&self) -> crate::properties::Importance {
    618        self.cascade_level().importance()
    619    }
    620 
    621    /// Returns whether this node has any child, only intended for testing
    622    /// purposes.
    623    pub unsafe fn has_children_for_testing(&self) -> bool {
    624        !self.p.children.read().is_empty()
    625    }
    626 
    627    pub(super) fn dump<W: Write>(&self, guards: &StylesheetGuards, writer: &mut W, indent: usize) {
    628        const INDENT_INCREMENT: usize = 4;
    629 
    630        for _ in 0..indent {
    631            let _ = write!(writer, " ");
    632        }
    633 
    634        let _ = writeln!(
    635            writer,
    636            " - {:p} (ref: {:?}, parent: {:?})",
    637            &*self.p,
    638            self.p.refcount.load(Ordering::Relaxed),
    639            self.parent().map(|p| &*p.p as *const RuleNode)
    640        );
    641 
    642        for _ in 0..indent {
    643            let _ = write!(writer, " ");
    644        }
    645 
    646        if let Some(source) = self.style_source() {
    647            source.dump(self.cascade_level().guard(guards), writer);
    648        } else {
    649            if indent != 0 {
    650                warn!("How has this happened?");
    651            }
    652            let _ = write!(writer, "(root)");
    653        }
    654 
    655        let _ = write!(writer, "\n");
    656        for child in &*self.p.children.read() {
    657            unsafe {
    658                child
    659                    .upgrade()
    660                    .dump(guards, writer, indent + INDENT_INCREMENT);
    661            }
    662        }
    663    }
    664 }
    665 
    666 impl Clone for StrongRuleNode {
    667    fn clone(&self) -> Self {
    668        debug!(
    669            "{:p}: {:?}+",
    670            &*self.p,
    671            self.p.refcount.load(Ordering::Relaxed)
    672        );
    673        debug_assert!(self.p.refcount.load(Ordering::Relaxed) > 0);
    674        self.p.refcount.fetch_add(1, Ordering::Relaxed);
    675        unsafe { StrongRuleNode::from_unsafe_box(UnsafeBox::clone(&self.p)) }
    676    }
    677 }
    678 
    679 impl Drop for StrongRuleNode {
    680    #[cfg_attr(feature = "servo", allow(unused_mut))]
    681    fn drop(&mut self) {
    682        let node = &*self.p;
    683        debug!("{:p}: {:?}-", node, node.refcount.load(Ordering::Relaxed));
    684        debug!(
    685            "Dropping node: {:p}, root: {:?}, parent: {:?}",
    686            node,
    687            node.root.as_ref().map(|r| &*r.p as *const RuleNode),
    688            node.parent.as_ref().map(|p| &*p.p as *const RuleNode)
    689        );
    690 
    691        let should_drop = {
    692            debug_assert!(node.refcount.load(Ordering::Relaxed) > 0);
    693            node.refcount.fetch_sub(1, Ordering::Release) == 1
    694        };
    695 
    696        if !should_drop {
    697            // The refcount didn't even drop zero yet, there is nothing for us
    698            // to do anymore.
    699            return;
    700        }
    701 
    702        unsafe {
    703            if node.root.is_some() {
    704                // This is a non-root node and we just observed the refcount
    705                // dropping to zero, we need to pretend to be on the free list
    706                // to unstuck any thread who tried to resurrect this node first
    707                // through `WeakRuleNode::upgrade`.
    708                RuleNode::pretend_to_be_on_free_list(&self.p);
    709 
    710                // Attempt to push the node on the free list. This may fail
    711                // if the free list is gone.
    712                if RuleNode::push_on_free_list(&self.p) {
    713                    return;
    714                }
    715            }
    716 
    717            // Either this was the last reference of the root node, or the
    718            // tree rule is gone and there is no free list anymore. Drop the
    719            // node.
    720            RuleNode::drop_without_free_list(&mut self.p);
    721        }
    722    }
    723 }
    724 
    725 impl WeakRuleNode {
    726    /// Upgrades this weak node reference, returning a strong one.
    727    ///
    728    /// Must be called with items stored in a node's children list. The children
    729    /// list must at least be read-locked when this is called.
    730    unsafe fn upgrade(&self) -> StrongRuleNode {
    731        debug!("Upgrading weak node: {:p}", &*self.p);
    732 
    733        if self.p.refcount.fetch_add(1, Ordering::Relaxed) == 0 {
    734            // We observed a refcount of 0, we need to wait for this node to
    735            // be put on the free list. Resetting the `next_free` pointer to
    736            // null is only done in `RuleNode::drop_without_free_list`, just
    737            // before a release refcount decrement, so this acquire fence here
    738            // makes sure that we observed the write to null before we loop
    739            // until there is a non-null value.
    740            atomic::fence(Ordering::Acquire);
    741            while self.p.next_free.load(Ordering::Relaxed).is_null() {}
    742        }
    743        StrongRuleNode::from_unsafe_box(UnsafeBox::clone(&self.p))
    744    }
    745 }
    746 
    747 impl fmt::Debug for StrongRuleNode {
    748    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
    749        (&*self.p as *const RuleNode).fmt(f)
    750    }
    751 }
    752 
    753 impl Eq for StrongRuleNode {}
    754 impl PartialEq for StrongRuleNode {
    755    fn eq(&self, other: &Self) -> bool {
    756        &*self.p as *const RuleNode == &*other.p
    757    }
    758 }
    759 
    760 impl hash::Hash for StrongRuleNode {
    761    fn hash<H>(&self, state: &mut H)
    762    where
    763        H: hash::Hasher,
    764    {
    765        (&*self.p as *const RuleNode).hash(state)
    766    }
    767 }
    768 
    769 // Large pages generate thousands of RuleNode objects.
    770 size_of_test!(RuleNode, 80);
    771 // StrongRuleNode should be pointer-sized even inside an option.
    772 size_of_test!(Option<StrongRuleNode>, 8);