moz2d_renderer.rs (34447B)
1 /* This Source Code Form is subject to the terms of the Mozilla Public 2 * License, v. 2.0. If a copy of the MPL was not distributed with this 3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ 4 #![deny(missing_docs)] 5 6 //! Provides the webrender-side implementation of gecko blob images. 7 //! 8 //! Pretty much this is just a shim that calls back into Moz2DImageRenderer, but 9 //! it also handles merging "partial" blob images (see `merge_blob_images`) and 10 //! registering fonts found in the blob (see `prepare_request`). 11 12 use bindings::{wr_moz2d_render_cb, ArcVecU8, ByteSlice, MutByteSlice}; 13 use gecko_profiler::auto_profiler_marker; 14 use gecko_profiler::gecko_profiler_label; 15 use rayon::prelude::*; 16 use rayon::ThreadPool; 17 use webrender::api::units::{BlobDirtyRect, BlobToDeviceTranslation, DeviceIntRect}; 18 use webrender::api::*; 19 20 use euclid::point2; 21 use std::collections::btree_map::BTreeMap; 22 use std::collections::hash_map; 23 use std::collections::hash_map::HashMap; 24 use std::collections::Bound::Included; 25 use std::i32; 26 use std::mem; 27 use std::os::raw::c_void; 28 use std::ptr; 29 use std::sync::Arc; 30 31 #[cfg(any(target_os = "macos", target_os = "ios"))] 32 use core_foundation::string::CFString; 33 #[cfg(any(target_os = "macos", target_os = "ios"))] 34 use core_graphics::font::CGFont; 35 #[cfg(any(target_os = "macos", target_os = "ios"))] 36 use foreign_types::ForeignType; 37 38 #[cfg(target_os = "windows")] 39 use std::ffi::CStr; 40 #[cfg(target_os = "windows")] 41 use std::ffi::OsStr; 42 #[cfg(target_os = "windows")] 43 use std::iter::FromIterator; 44 #[cfg(target_os = "windows")] 45 use std::os::raw::c_char; 46 #[cfg(target_os = "windows")] 47 use std::path::PathBuf; 48 #[cfg(target_os = "windows")] 49 use winapi::um::errhandlingapi::GetLastError; 50 51 #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "windows")))] 52 use std::ffi::CString; 53 #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "windows")))] 54 use std::os::unix::ffi::OsStrExt; 55 56 /// Local print-debugging utility 57 macro_rules! dlog { 58 ($($e:expr),*) => { {$(let _ = $e;)*} } 59 //($($t:tt)*) => { println!($($t)*) } 60 } 61 62 /// Debug prints a blob's item bounds, indicating whether the bounds are dirty or not. 63 fn dump_bounds(blob: &[u8], dirty_rect: DeviceIntRect) { 64 let mut index = BlobReader::new(blob); 65 while index.reader.has_more() { 66 let e = index.read_entry(); 67 dlog!( 68 " {:?} {}", 69 e.bounds, 70 if dirty_rect.contains_box(&e.bounds) { "*" } else { "" } 71 ); 72 } 73 } 74 75 /// Debug prints a blob's metadata. 76 fn dump_index(blob: &[u8]) { 77 let mut index = BlobReader::new(blob); 78 // we might get an empty result here because sub groups are not tightly bound 79 // and we'll sometimes have display items that end up with empty bounds in 80 // the blob image. 81 while index.reader.has_more() { 82 let e = index.read_entry(); 83 dlog!("result bounds: {} {} {:?}", e.end, e.extra_end, e.bounds); 84 } 85 } 86 87 /// Handles the interpretation and rasterization of gecko-based (moz2d) WR blob images. 88 pub struct Moz2dBlobImageHandler { 89 workers: Arc<ThreadPool>, 90 workers_low_priority: Arc<ThreadPool>, 91 blob_commands: HashMap<BlobImageKey, BlobCommand>, 92 enable_multithreading: bool, 93 } 94 95 /// Transmute some bytes into a value. 96 /// 97 /// FIXME: kill this with fire and/or do a super robust security audit 98 unsafe fn convert_from_bytes<T: Copy>(slice: &[u8]) -> T { 99 assert!(mem::size_of::<T>() <= slice.len()); 100 ptr::read_unaligned(slice.as_ptr() as *const T) 101 } 102 103 /// Transmute a value into some bytes. 104 fn convert_to_bytes<T>(x: &T) -> &[u8] { 105 unsafe { 106 let ip: *const T = x; 107 let bp: *const u8 = ip as *const _; 108 ::std::slice::from_raw_parts(bp, mem::size_of::<T>()) 109 } 110 } 111 112 /// A simple helper for deserializing a bunch of transmuted POD data from bytes. 113 struct BufReader<'a> { 114 /// The buffer to read from. 115 buf: &'a [u8], 116 /// Where we currently are reading from. 117 pos: usize, 118 } 119 120 impl<'a> BufReader<'a> { 121 /// Creates a reader over the given input. 122 fn new(buf: &'a [u8]) -> BufReader<'a> { 123 BufReader { buf, pos: 0 } 124 } 125 126 /// Transmute-deserializes a value of type T from the stream. 127 /// 128 /// !!! SUPER DANGEROUS !!! 129 /// 130 /// To limit the scope of this unsafety, please don't call this directly. 131 /// Make a helper method for each whitelisted type. 132 unsafe fn read<T: Copy>(&mut self) -> T { 133 let ret = convert_from_bytes(&self.buf[self.pos..]); 134 self.pos += mem::size_of::<T>(); 135 ret 136 } 137 138 /// Deserializes a BlobFont. 139 fn read_blob_font(&mut self) -> BlobFont { 140 unsafe { self.read::<BlobFont>() } 141 } 142 143 /// Deserializes a usize. 144 fn read_usize(&mut self) -> usize { 145 unsafe { self.read::<usize>() } 146 } 147 148 /// Deserializes a rectangle. 149 fn read_box(&mut self) -> DeviceIntRect { 150 unsafe { self.read::<DeviceIntRect>() } 151 } 152 153 /// Returns whether the buffer has more data to deserialize. 154 fn has_more(&self) -> bool { 155 self.pos < self.buf.len() 156 } 157 } 158 159 /// Reads the metadata of a blob image. 160 /// 161 /// Blob stream format: 162 /// { data[..], index[..], offset in the stream of the index array } 163 /// 164 /// An 'item' has 'data' and 'extra_data' 165 /// - In our case the 'data' is the stream produced by DrawTargetRecording 166 /// and the 'extra_data' includes things like webrender font keys 167 /// 168 /// The index is an array of entries of the following form: 169 /// { end, extra_end, bounds } 170 /// 171 /// - end is the offset of the end of an item's data 172 /// an item's data goes from the begining of the stream or 173 /// the begining of the last item til end 174 /// - extra_end is the offset of the end of an item's extra data 175 /// an item's extra data goes from 'end' until 'extra_end' 176 /// - bounds is a set of 4 ints { min.x, min.y, max.x, max.y } 177 /// 178 /// The offsets in the index should be monotonically increasing. 179 /// 180 /// Design rationale: 181 /// - the index is smaller so we append it to the end of the data array 182 /// during construction. This makes it more likely that we'll fit inside 183 /// the data Vec 184 /// - we use indices/offsets instead of sizes to avoid having to deal with any 185 /// arithmetic that might overflow. 186 struct BlobReader<'a> { 187 /// The buffer of the blob. 188 reader: BufReader<'a>, 189 /// Where the buffer head is. 190 begin: usize, 191 } 192 193 /// The metadata for each display item in a blob image (doesn't match the serialized layout). 194 /// 195 /// See BlobReader above for detailed docs of the blob image format. 196 struct Entry { 197 /// The bounds of the display item. 198 bounds: DeviceIntRect, 199 /// Where the item's recorded drawing commands start. 200 begin: usize, 201 /// Where the item's recorded drawing commands end, and its extra data starts. 202 end: usize, 203 /// Where the item's extra data ends, and the next item's `begin`. 204 extra_end: usize, 205 } 206 207 impl<'a> BlobReader<'a> { 208 /// Creates a new BlobReader for the given buffer. 209 fn new(buf: &'a [u8]) -> BlobReader<'a> { 210 // The offset of the index is at the end of the buffer. 211 let index_offset_pos = buf.len() - mem::size_of::<usize>(); 212 assert!(index_offset_pos < buf.len()); 213 let index_offset = unsafe { convert_from_bytes::<usize>(&buf[index_offset_pos..]) }; 214 215 BlobReader { 216 reader: BufReader::new(&buf[index_offset..index_offset_pos]), 217 begin: 0, 218 } 219 } 220 221 /// Reads the next display item's metadata. 222 fn read_entry(&mut self) -> Entry { 223 let end = self.reader.read_usize(); 224 let extra_end = self.reader.read_usize(); 225 let bounds = self.reader.read_box(); 226 let ret = Entry { 227 begin: self.begin, 228 end, 229 extra_end, 230 bounds, 231 }; 232 self.begin = extra_end; 233 ret 234 } 235 } 236 237 /// Writes new blob images. 238 /// 239 /// In our case this is the result of merging an old one and a new one 240 struct BlobWriter { 241 /// The buffer that the data and extra data for the items is accumulated. 242 data: Vec<u8>, 243 /// The buffer that the metadata for the items is accumulated. 244 index: Vec<u8>, 245 } 246 247 impl BlobWriter { 248 /// Creates an empty BlobWriter. 249 fn new() -> BlobWriter { 250 BlobWriter { 251 data: Vec::new(), 252 index: Vec::new(), 253 } 254 } 255 256 /// Writes a display item to the blob. 257 fn new_entry(&mut self, extra_size: usize, bounds: DeviceIntRect, data: &[u8]) { 258 self.data.extend_from_slice(data); 259 // Write 'end' to the index: the offset where the regular data ends and the extra data starts. 260 self.index 261 .extend_from_slice(convert_to_bytes(&(self.data.len() - extra_size))); 262 // Write 'extra_end' to the index: the offset where the extra data ends. 263 self.index.extend_from_slice(convert_to_bytes(&self.data.len())); 264 // XXX: we can aggregate these writes 265 // Write the bounds to the index. 266 self.index.extend_from_slice(convert_to_bytes(&bounds.min.x)); 267 self.index.extend_from_slice(convert_to_bytes(&bounds.min.y)); 268 self.index.extend_from_slice(convert_to_bytes(&bounds.max.x)); 269 self.index.extend_from_slice(convert_to_bytes(&bounds.max.y)); 270 } 271 272 /// Completes the blob image, producing a single buffer containing it. 273 fn finish(mut self) -> Vec<u8> { 274 // Append the index to the end of the buffer 275 // and then append the offset to the beginning of the index. 276 let index_begin = self.data.len(); 277 self.data.extend_from_slice(&self.index); 278 self.data.extend_from_slice(convert_to_bytes(&index_begin)); 279 self.data 280 } 281 } 282 283 #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] 284 struct CacheKey { 285 x1: i32, 286 y1: i32, 287 x2: i32, 288 y2: i32, 289 cache_order: u32, 290 } 291 292 impl CacheKey { 293 pub fn new(bounds: DeviceIntRect, cache_order: u32) -> Self { 294 CacheKey { 295 x1: bounds.min.x, 296 y1: bounds.min.y, 297 x2: bounds.max.x, 298 y2: bounds.max.y, 299 cache_order, 300 } 301 } 302 } 303 304 /// Provides an API for looking up the display items in a blob image by bounds, yielding items 305 /// with equal bounds in their original relative ordering. 306 /// 307 /// This is used to implement `merge_blobs_images`. 308 /// 309 /// We use a BTree as a kind of multi-map, by appending an integer "cache_order" to the key. 310 /// This lets us use multiple items with matching bounds in the map and allows 311 /// us to fetch and remove them while retaining the ordering of the original list. 312 struct CachedReader<'a> { 313 /// Wrapped reader. 314 reader: BlobReader<'a>, 315 /// Cached entries that have been read but not yet requested by our consumer. 316 cache: BTreeMap<CacheKey, Entry>, 317 /// The current number of internally read display items, used to preserve list order. 318 cache_index_counter: u32, 319 } 320 321 impl<'a> CachedReader<'a> { 322 /// Creates a new CachedReader. 323 pub fn new(buf: &'a [u8]) -> Self { 324 CachedReader { 325 reader: BlobReader::new(buf), 326 cache: BTreeMap::new(), 327 cache_index_counter: 0, 328 } 329 } 330 331 /// Tries to find the given bounds in the cache of internally read items, removing it if found. 332 fn take_entry_with_bounds_from_cache(&mut self, bounds: &DeviceIntRect) -> Option<Entry> { 333 if self.cache.is_empty() { 334 return None; 335 } 336 337 let key_to_delete = match self 338 .cache 339 .range(( 340 Included(CacheKey::new(*bounds, 0u32)), 341 Included(CacheKey::new(*bounds, std::u32::MAX)), 342 )) 343 .next() 344 { 345 Some((&key, _)) => key, 346 None => return None, 347 }; 348 349 Some( 350 self.cache 351 .remove(&key_to_delete) 352 .expect("We just got this key from range, it needs to be present"), 353 ) 354 } 355 356 /// Yields the next item in the blob image with the given bounds. 357 /// 358 /// If the given bounds aren't found in the blob, this panics. `merge_blob_images` should 359 /// avoid this by construction if the blob images are well-formed. 360 pub fn next_entry_with_bounds(&mut self, bounds: &DeviceIntRect, ignore_rect: &DeviceIntRect) -> Entry { 361 if let Some(entry) = self.take_entry_with_bounds_from_cache(bounds) { 362 return entry; 363 } 364 365 loop { 366 // This will panic if we run through the whole list without finding our bounds. 367 let old = self.reader.read_entry(); 368 if old.bounds == *bounds { 369 return old; 370 } else if !ignore_rect.contains_box(&old.bounds) { 371 self.cache 372 .insert(CacheKey::new(old.bounds, self.cache_index_counter), old); 373 self.cache_index_counter += 1; 374 } 375 } 376 } 377 } 378 379 /// Merges a new partial blob image into an existing complete one. 380 /// 381 /// A blob image represents a recording of the drawing commands needed to render 382 /// (part of) a display list. A partial blob image is a diff between the old display 383 /// list and a new one. It contains an entry for every display item in the new list, but 384 /// the actual drawing commands are missing for any item that isn't strictly contained 385 /// in the dirty rect. This is possible because not being contained in the dirty 386 /// rect implies that the item is unchanged between the old and new list, so we can 387 /// just grab the drawing commands from the old list. 388 /// 389 /// The dirty rect strictly contains the bounds of every item that has been inserted 390 /// into or deleted from the old list to create the new list. (For simplicity 391 /// you may think of any other update as deleting and reinserting the item). 392 /// 393 /// Partial blobs are based on gecko's "retained display list" system, and 394 /// in particular rely on one key property: if two items have overlapping bounds 395 /// and *aren't* contained in the dirty rect, then their relative order in both 396 /// the old and new list will not change. This lets us uniquely identify a display 397 /// item using only its bounds and relative order in the list. 398 /// 399 /// That is, the first non-dirty item in the new list with bounds (10, 15, 100, 100) 400 /// is *also* the first non-dirty item in the old list with those bounds. 401 /// 402 /// Note that *every* item contained inside the dirty rect will be fully recorded in 403 /// the new list, even if it is actually unchanged from the old list. 404 /// 405 /// All of this together gives us a fairly simple merging algorithm: all we need 406 /// to do is walk through the new (partial) list, determine which of the two lists 407 /// has the recording for that item, and copy the recording into the result. 408 /// 409 /// If an item is contained in the dirty rect, then the new list contains the 410 /// correct recording for that item, so we always copy it from there. Otherwise, we find 411 /// the first not-yet-copied item with those bounds in the old list and copy that. 412 /// Any items found in the old list but not the new one can be safely assumed to 413 /// have been deleted. 414 fn merge_blob_images( 415 old_buf: &[u8], 416 new_buf: &[u8], 417 dirty_rect: DeviceIntRect, 418 old_visible_rect: DeviceIntRect, 419 new_visible_rect: DeviceIntRect, 420 ) -> Vec<u8> { 421 let mut result = BlobWriter::new(); 422 dlog!("dirty rect: {:?}", dirty_rect); 423 dlog!("old:"); 424 dump_bounds(old_buf, dirty_rect); 425 dlog!("new:"); 426 dump_bounds(new_buf, dirty_rect); 427 dlog!("old visibile rect: {:?}", old_visible_rect); 428 dlog!("new visibile rect: {:?}", new_visible_rect); 429 430 let mut old_reader = CachedReader::new(old_buf); 431 let mut new_reader = BlobReader::new(new_buf); 432 let preserved_rect = old_visible_rect.intersection_unchecked(&new_visible_rect); 433 434 // Loop over both new and old entries merging them. 435 // Both new and old must have the same number of entries that 436 // overlap but are not contained by the dirty rect, and they 437 // must be in the same order. 438 while new_reader.reader.has_more() { 439 let new = new_reader.read_entry(); 440 dlog!("bounds: {} {} {:?}", new.end, new.extra_end, new.bounds); 441 let preserved_bounds = new.bounds.intersection_unchecked(&preserved_rect); 442 if dirty_rect.contains_box(&preserved_bounds) { 443 result.new_entry(new.extra_end - new.end, new.bounds, &new_buf[new.begin..new.extra_end]); 444 } else { 445 let old = old_reader.next_entry_with_bounds(&new.bounds, &dirty_rect); 446 result.new_entry(old.extra_end - old.end, new.bounds, &old_buf[old.begin..old.extra_end]) 447 } 448 } 449 450 // XXX: future work: ensure that items that have been deleted but aren't in the blob's visible 451 // rect don't affect the dirty rect -- this allows us to scroll content out of view while only 452 // updating the areas where items have been scrolled *into* view. This is very important for 453 // the performance of blobs that are larger than the viewport. When this is done this 454 // assertion will need to be modified to factor in the visible rect, or removed. 455 456 // Ensure all remaining items will be discarded 457 while old_reader.reader.reader.has_more() { 458 let old = old_reader.reader.read_entry(); 459 dlog!("new bounds: {} {} {:?}", old.end, old.extra_end, old.bounds); 460 //assert!(dirty_rect.contains_box(&old.bounds)); 461 } 462 463 //assert!(old_reader.cache.is_empty()); 464 465 let result = result.finish(); 466 dump_index(&result); 467 result 468 } 469 470 /// A font used by a blob image. 471 #[repr(C)] 472 #[derive(Copy, Clone)] 473 struct BlobFont { 474 /// The font key. 475 font_instance_key: FontInstanceKey, 476 /// A pointer to the scaled font. 477 scaled_font_ptr: u64, 478 } 479 480 /// A blob image and extra data provided by webrender on how to rasterize it. 481 #[derive(Clone)] 482 struct BlobCommand { 483 /// The blob. 484 data: Arc<BlobImageData>, 485 /// What part of the blob should be rasterized (visible_rect's top-left corresponds to 486 /// (0,0) in the blob's rasterization) 487 visible_rect: DeviceIntRect, 488 /// The size of the tiles to use in rasterization. 489 tile_size: TileSize, 490 } 491 492 struct Job { 493 request: BlobImageRequest, 494 descriptor: BlobImageDescriptor, 495 commands: Arc<BlobImageData>, 496 dirty_rect: BlobDirtyRect, 497 visible_rect: DeviceIntRect, 498 tile_size: TileSize, 499 output: MutableTileBuffer, 500 } 501 502 /// Rasterizes gecko blob images. 503 struct Moz2dBlobRasterizer { 504 /// Pool of rasterizers. 505 workers: Arc<ThreadPool>, 506 /// Pool of low priority rasterizers. 507 workers_low_priority: Arc<ThreadPool>, 508 /// Blobs to rasterize. 509 blob_commands: HashMap<BlobImageKey, BlobCommand>, 510 /// 511 enable_multithreading: bool, 512 } 513 514 impl AsyncBlobImageRasterizer for Moz2dBlobRasterizer { 515 fn rasterize( 516 &mut self, 517 requests: &[BlobImageParams], 518 low_priority: bool, 519 tile_pool: &mut BlobTilePool, 520 ) -> Vec<(BlobImageRequest, BlobImageResult)> { 521 // All we do here is spin up our workers to callback into gecko to replay the drawing commands. 522 gecko_profiler_label!(Graphics, Rasterization); 523 auto_profiler_marker!( 524 "BlobRasterization", 525 gecko_profiler::gecko_profiler_category!(Graphics), 526 Default::default() 527 ); 528 529 let requests: Vec<Job> = requests 530 .iter() 531 .map(|params| { 532 let command = &self.blob_commands[¶ms.request.key]; 533 let blob = Arc::clone(&command.data); 534 assert!(!params.descriptor.rect.is_empty()); 535 536 let buf_size = (params.descriptor.rect.area() * params.descriptor.format.bytes_per_pixel()) as usize; 537 538 Job { 539 request: params.request, 540 descriptor: params.descriptor, 541 commands: blob, 542 visible_rect: command.visible_rect, 543 dirty_rect: params.dirty_rect, 544 tile_size: command.tile_size, 545 output: tile_pool.get_buffer(buf_size), 546 } 547 }) 548 .collect(); 549 550 // If we don't have a lot of blobs it is probably not worth the initial cost 551 // of installing work on rayon's thread pool so we do it serially on this thread. 552 let should_parallelize = if !self.enable_multithreading { 553 false 554 } else if low_priority { 555 requests.len() > 2 556 } else { 557 // For high priority requests we don't "risk" the potential priority inversion of 558 // dispatching to a thread pool full of low priority jobs unless it is really 559 // appealing. 560 requests.len() > 4 561 }; 562 563 let result = if should_parallelize { 564 // Parallel version synchronously installs a job on the thread pool which will 565 // try to do the work in parallel. 566 // This thread is blocked until the thread pool is done doing the work. 567 let lambda = || requests.into_par_iter().map(rasterize_blob).collect(); 568 if low_priority { 569 //TODO --bpe runtime flag to A/B test these two 570 self.workers_low_priority.install(lambda) 571 //self.workers.install(lambda) 572 } else { 573 self.workers.install(lambda) 574 } 575 } else { 576 requests.into_iter().map(rasterize_blob).collect() 577 }; 578 579 result 580 } 581 } 582 583 // a cross platform wrapper that creates an autorelease pool 584 // on macOS 585 fn autoreleasepool<T, F: FnOnce() -> T>(f: F) -> T { 586 #[cfg(target_os = "macos")] 587 { 588 objc::rc::autoreleasepool(f) 589 } 590 #[cfg(not(target_os = "macos"))] 591 { 592 f() 593 } 594 } 595 596 fn rasterize_blob(mut job: Job) -> (BlobImageRequest, BlobImageResult) { 597 gecko_profiler_label!(Graphics, Rasterization); 598 let descriptor = job.descriptor; 599 600 let dirty_rect = match job.dirty_rect { 601 DirtyRect::Partial(rect) => Some(rect), 602 DirtyRect::All => None, 603 }; 604 assert!(!descriptor.rect.is_empty()); 605 606 let request = job.request; 607 608 let result = autoreleasepool(|| { 609 unsafe { 610 if wr_moz2d_render_cb( 611 ByteSlice::new(&job.commands[..]), 612 descriptor.format, 613 &descriptor.rect, 614 &job.visible_rect, 615 job.tile_size, 616 &request.tile, 617 dirty_rect.as_ref(), 618 MutByteSlice::new(job.output.as_mut_slice()), 619 ) { 620 // We want the dirty rect local to the tile rather than the whole image. 621 // TODO(nical): move that up and avoid recomupting the tile bounds in the callback 622 let dirty_rect = job.dirty_rect.to_subrect_of(&descriptor.rect); 623 let tx: BlobToDeviceTranslation = (-descriptor.rect.min.to_vector()).into(); 624 let rasterized_rect = tx.transform_box(&dirty_rect); 625 626 Ok(RasterizedBlobImage { 627 rasterized_rect, 628 data: job.output.into_arc(), 629 }) 630 } else { 631 panic!("Moz2D replay problem"); 632 } 633 } 634 }); 635 636 (request, result) 637 } 638 639 impl BlobImageHandler for Moz2dBlobImageHandler { 640 fn create_similar(&self) -> Box<dyn BlobImageHandler> { 641 Box::new(Self::new( 642 Arc::clone(&self.workers), 643 Arc::clone(&self.workers_low_priority), 644 )) 645 } 646 647 fn add(&mut self, key: BlobImageKey, data: Arc<BlobImageData>, visible_rect: &DeviceIntRect, tile_size: TileSize) { 648 { 649 let index = BlobReader::new(&data); 650 assert!(index.reader.has_more()); 651 } 652 self.blob_commands.insert( 653 key, 654 BlobCommand { 655 data: Arc::clone(&data), 656 visible_rect: *visible_rect, 657 tile_size, 658 }, 659 ); 660 } 661 662 fn update( 663 &mut self, 664 key: BlobImageKey, 665 data: Arc<BlobImageData>, 666 visible_rect: &DeviceIntRect, 667 dirty_rect: &BlobDirtyRect, 668 ) { 669 match self.blob_commands.entry(key) { 670 hash_map::Entry::Occupied(mut e) => { 671 let command = e.get_mut(); 672 let dirty_rect = if let DirtyRect::Partial(rect) = *dirty_rect { 673 rect.cast_unit() 674 } else { 675 DeviceIntRect { 676 min: point2(i32::MIN, i32::MIN), 677 max: point2(i32::MAX, i32::MAX), 678 } 679 }; 680 command.data = Arc::new(merge_blob_images( 681 &command.data, 682 &data, 683 dirty_rect, 684 command.visible_rect, 685 *visible_rect, 686 )); 687 command.visible_rect = *visible_rect; 688 }, 689 _ => { 690 panic!("missing image key"); 691 }, 692 } 693 } 694 695 fn delete(&mut self, key: BlobImageKey) { 696 self.blob_commands.remove(&key); 697 } 698 699 fn create_blob_rasterizer(&mut self) -> Box<dyn AsyncBlobImageRasterizer> { 700 Box::new(Moz2dBlobRasterizer { 701 workers: Arc::clone(&self.workers), 702 workers_low_priority: Arc::clone(&self.workers_low_priority), 703 blob_commands: self.blob_commands.clone(), 704 enable_multithreading: self.enable_multithreading, 705 }) 706 } 707 708 fn delete_font(&mut self, font: FontKey) { 709 unsafe { 710 DeleteFontData(font); 711 } 712 } 713 714 fn delete_font_instance(&mut self, key: FontInstanceKey) { 715 unsafe { 716 DeleteBlobFont(key); 717 } 718 } 719 720 fn clear_namespace(&mut self, namespace: IdNamespace) { 721 unsafe { 722 ClearBlobImageResources(namespace); 723 } 724 } 725 726 fn prepare_resources(&mut self, resources: &dyn BlobImageResources, requests: &[BlobImageParams]) { 727 for params in requests { 728 let commands = &self.blob_commands[¶ms.request.key]; 729 let blob = Arc::clone(&commands.data); 730 self.prepare_request(&blob, resources); 731 } 732 } 733 734 fn enable_multithreading(&mut self, enable: bool) { 735 self.enable_multithreading = enable; 736 } 737 } 738 739 use bindings::{WrFontInstanceKey, WrFontKey, WrIdNamespace}; 740 741 #[allow(improper_ctypes)] // this is needed so that rustc doesn't complain about passing the &Arc<Vec> to an extern function 742 extern "C" { 743 fn HasFontData(key: WrFontKey) -> bool; 744 fn AddFontData(key: WrFontKey, data: *const u8, size: usize, index: u32, vec: &ArcVecU8); 745 fn AddNativeFontHandle(key: WrFontKey, handle: *mut c_void, index: u32); 746 fn DeleteFontData(key: WrFontKey); 747 fn AddBlobFont( 748 instance_key: WrFontInstanceKey, 749 font_key: WrFontKey, 750 size: f32, 751 options: Option<&FontInstanceOptions>, 752 platform_options: Option<&FontInstancePlatformOptions>, 753 variations: *const FontVariation, 754 num_variations: usize, 755 ); 756 fn DeleteBlobFont(key: WrFontInstanceKey); 757 fn ClearBlobImageResources(namespace: WrIdNamespace); 758 #[cfg(target_os = "windows")] 759 fn gfx_wr_set_crash_annotation(annotation: CrashAnnotation, value: *const c_char); 760 } 761 762 impl Moz2dBlobImageHandler { 763 /// Create a new BlobImageHandler with the given thread pool. 764 pub fn new(workers: Arc<ThreadPool>, workers_low_priority: Arc<ThreadPool>) -> Self { 765 Moz2dBlobImageHandler { 766 blob_commands: HashMap::new(), 767 workers, 768 workers_low_priority, 769 enable_multithreading: true, 770 } 771 } 772 773 /// Does early preprocessing of a blob's resources. 774 /// 775 /// Currently just sets up fonts found in the blob. 776 fn prepare_request(&self, blob: &[u8], resources: &dyn BlobImageResources) { 777 #[cfg(target_os = "windows")] 778 fn maybe_crash_on_no_font_file(font_path: &PathBuf) { 779 if !mozbuild::config::NIGHTLY_BUILD { 780 return; 781 } 782 783 // On Nightly add annotation of the error and font file path then crash. We strip 784 // the user's dir if necessary to try to prevent capturing personal information. 785 let end_of_path = PathBuf::from_iter( 786 font_path 787 .components() 788 .skip_while(|c| !c.as_os_str().eq_ignore_ascii_case(OsStr::new("users"))) 789 .skip(2), 790 ); 791 // end_of_path will be empty if we don't find the Users dir. 792 let annotation_path = if end_of_path.as_os_str().is_empty() { 793 font_path.as_os_str() 794 } else { 795 end_of_path.as_os_str() 796 }; 797 let annotation_string = format!( 798 "Error: {:x} loading: {}", 799 unsafe { GetLastError() }, 800 annotation_path.to_string_lossy() 801 ); 802 unsafe { 803 gfx_wr_set_crash_annotation( 804 CrashAnnotation::FontFile, 805 CStr::from_bytes_with_nul(annotation_string.as_bytes()) 806 .unwrap() 807 .as_ptr(), 808 ); 809 } 810 panic!("Moz2D font file not found"); 811 } 812 813 #[cfg(target_os = "windows")] 814 fn process_native_font_handle(key: FontKey, handle: &NativeFontHandle) { 815 if let Some(file) = dwrote::FontFile::new_from_path(&handle.path) { 816 if let Ok(face) = file.create_face(handle.index, dwrote::DWRITE_FONT_SIMULATIONS_NONE) { 817 unsafe { AddNativeFontHandle(key, face.as_ptr() as *mut c_void, 0) }; 818 return; 819 } 820 } 821 822 maybe_crash_on_no_font_file(&handle.path); 823 824 // Failed to open the font file? Try to set up a fallback font so that 825 // we don't simply crash, although text will be garbage. 826 let desc = dwrote::FontDescriptor { 827 family_name: "Arial".to_string(), 828 weight: dwrote::FontWeight::Regular, 829 stretch: dwrote::FontStretch::Normal, 830 style: dwrote::FontStyle::Normal, 831 }; 832 // If the returned font is None, give up. 833 // (TODO: try other font names? get an arbitrary font by index?) 834 let font = dwrote::FontCollection::system() 835 .font_from_descriptor(&desc) 836 .unwrap() 837 .unwrap(); 838 let face = font.create_font_face(); 839 unsafe { AddNativeFontHandle(key, face.as_ptr() as *mut c_void, 0) }; 840 } 841 842 #[cfg(any(target_os = "macos", target_os = "ios"))] 843 fn process_native_font_handle(key: FontKey, handle: &NativeFontHandle) { 844 let font = match CGFont::from_name(&CFString::new(&handle.name)) { 845 Ok(font) => font, 846 Err(_) => { 847 // If for some reason we failed to load a font descriptor, then our 848 // only options are to either abort or substitute a fallback font. 849 // It is preferable to use a fallback font instead so that rendering 850 // can at least still proceed in some fashion without erroring. 851 // Lucida Grande is the fallback font in Gecko, so use that here. 852 CGFont::from_name(&CFString::from_static_string("Lucida Grande")) 853 .expect("Failed reading font descriptor and could not load fallback font") 854 }, 855 }; 856 unsafe { AddNativeFontHandle(key, font.as_ptr() as *mut c_void, 0) }; 857 } 858 859 #[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "windows")))] 860 fn process_native_font_handle(key: FontKey, handle: &NativeFontHandle) { 861 let cstr = CString::new(handle.path.as_os_str().as_bytes()).unwrap(); 862 unsafe { AddNativeFontHandle(key, cstr.as_ptr() as *mut c_void, handle.index) }; 863 } 864 865 fn process_fonts( 866 mut extra_data: BufReader, 867 resources: &dyn BlobImageResources, 868 unscaled_fonts: &mut Vec<FontKey>, 869 scaled_fonts: &mut Vec<FontInstanceKey>, 870 ) { 871 let font_count = extra_data.read_usize(); 872 for _ in 0..font_count { 873 let font = extra_data.read_blob_font(); 874 if scaled_fonts.contains(&font.font_instance_key) { 875 continue; 876 } 877 scaled_fonts.push(font.font_instance_key); 878 if let Some(instance) = resources.get_font_instance_data(font.font_instance_key) { 879 if !unscaled_fonts.contains(&instance.font_key) { 880 unscaled_fonts.push(instance.font_key); 881 if !unsafe { HasFontData(instance.font_key) } { 882 let template = resources.get_font_data(instance.font_key).unwrap(); 883 match template { 884 FontTemplate::Raw(ref data, ref index) => unsafe { 885 AddFontData(instance.font_key, data.as_ptr(), data.len(), *index, data); 886 }, 887 FontTemplate::Native(ref handle) => { 888 process_native_font_handle(instance.font_key, handle); 889 }, 890 } 891 } 892 } 893 unsafe { 894 AddBlobFont( 895 font.font_instance_key, 896 instance.font_key, 897 instance.size, 898 instance.options.as_ref(), 899 instance.platform_options.as_ref(), 900 instance.variations.as_ptr(), 901 instance.variations.len(), 902 ); 903 } 904 } 905 } 906 } 907 908 { 909 let mut index = BlobReader::new(blob); 910 let mut unscaled_fonts = Vec::new(); 911 let mut scaled_fonts = Vec::new(); 912 while index.reader.pos < index.reader.buf.len() { 913 let e = index.read_entry(); 914 process_fonts( 915 BufReader::new(&blob[e.end..e.extra_end]), 916 resources, 917 &mut unscaled_fonts, 918 &mut scaled_fonts, 919 ); 920 } 921 } 922 } 923 }