tor-browser

The Tor Browser
git clone https://git.dasho.dev/tor-browser.git
Log | Files | Refs | README | LICENSE

WebRTCChild.sys.mjs (17722B)


      1 /* This Source Code Form is subject to the terms of the Mozilla Public
      2 * License, v. 2.0. If a copy of the MPL was not distributed with this
      3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
      4 
      5 import { XPCOMUtils } from "resource://gre/modules/XPCOMUtils.sys.mjs";
      6 import { AppConstants } from "resource://gre/modules/AppConstants.sys.mjs";
      7 
      8 const lazy = {};
      9 XPCOMUtils.defineLazyServiceGetter(
     10  lazy,
     11  "MediaManagerService",
     12  "@mozilla.org/mediaManagerService;1",
     13  Ci.nsIMediaManagerService
     14 );
     15 
     16 const kBrowserURL = AppConstants.BROWSER_CHROME_URL;
     17 
     18 /**
     19 * GlobalMuteListener is a process-global object that listens for changes to
     20 * the global mute state of the camera and microphone. When it notices a
     21 * change in that state, it tells the underlying platform code to mute or
     22 * unmute those devices.
     23 */
     24 const GlobalMuteListener = {
     25  _initted: false,
     26 
     27  /**
     28   * Initializes the listener if it hasn't been already. This will also
     29   * ensure that the microphone and camera are initially in the right
     30   * muting state.
     31   */
     32  init() {
     33    if (!this._initted) {
     34      Services.cpmm.sharedData.addEventListener("change", this);
     35      this._updateCameraMuteState();
     36      this._updateMicrophoneMuteState();
     37      this._initted = true;
     38    }
     39  },
     40 
     41  handleEvent(event) {
     42    if (event.changedKeys.includes("WebRTC:GlobalCameraMute")) {
     43      this._updateCameraMuteState();
     44    }
     45    if (event.changedKeys.includes("WebRTC:GlobalMicrophoneMute")) {
     46      this._updateMicrophoneMuteState();
     47    }
     48  },
     49 
     50  _updateCameraMuteState() {
     51    let shouldMute = Services.cpmm.sharedData.get("WebRTC:GlobalCameraMute");
     52    let topic = shouldMute
     53      ? "getUserMedia:muteVideo"
     54      : "getUserMedia:unmuteVideo";
     55    Services.obs.notifyObservers(null, topic);
     56  },
     57 
     58  _updateMicrophoneMuteState() {
     59    let shouldMute = Services.cpmm.sharedData.get(
     60      "WebRTC:GlobalMicrophoneMute"
     61    );
     62    let topic = shouldMute
     63      ? "getUserMedia:muteAudio"
     64      : "getUserMedia:unmuteAudio";
     65 
     66    Services.obs.notifyObservers(null, topic);
     67  },
     68 };
     69 
     70 export class WebRTCChild extends JSWindowActorChild {
     71  actorCreated() {
     72    // The user might request that DOM notifications be silenced
     73    // when sharing the screen. There doesn't seem to be a great
     74    // way of storing that state in any of the objects going into
     75    // the WebRTC API or coming out via the observer notification
     76    // service, so we store it here on the actor.
     77    //
     78    // If the user chooses to silence notifications during screen
     79    // share, this will get set to true.
     80    this.suppressNotifications = false;
     81  }
     82 
     83  // Called only for 'unload' to remove pending gUM prompts in reloaded frames.
     84  static handleEvent(aEvent) {
     85    let contentWindow = aEvent.target.defaultView;
     86    let actor = getActorForWindow(contentWindow);
     87    if (actor) {
     88      for (let key of contentWindow.pendingGetUserMediaRequests.keys()) {
     89        actor.sendAsyncMessage("webrtc:CancelRequest", key);
     90      }
     91      for (let key of contentWindow.pendingPeerConnectionRequests.keys()) {
     92        actor.sendAsyncMessage("rtcpeer:CancelRequest", key);
     93      }
     94    }
     95  }
     96 
     97  // This observer is called from BrowserProcessChild to avoid
     98  // loading this module when WebRTC is not in use.
     99  static observe(aSubject, aTopic, aData) {
    100    switch (aTopic) {
    101      case "getUserMedia:request":
    102        handleGUMRequest(aSubject, aTopic, aData);
    103        break;
    104      case "recording-device-stopped":
    105        handleGUMStop(aSubject, aTopic, aData);
    106        break;
    107      case "PeerConnection:request":
    108        handlePCRequest(aSubject, aTopic, aData);
    109        break;
    110      case "recording-device-events":
    111        updateIndicators(aSubject, aTopic, aData);
    112        break;
    113      case "recording-window-ended":
    114        removeBrowserSpecificIndicator(aSubject, aTopic, aData);
    115        break;
    116    }
    117  }
    118 
    119  receiveMessage(aMessage) {
    120    switch (aMessage.name) {
    121      case "rtcpeer:Allow":
    122      case "rtcpeer:Deny": {
    123        let callID = aMessage.data.callID;
    124        let contentWindow = Services.wm.getOuterWindowWithId(
    125          aMessage.data.windowID
    126        );
    127        forgetPCRequest(contentWindow, callID);
    128        let topic =
    129          aMessage.name == "rtcpeer:Allow"
    130            ? "PeerConnection:response:allow"
    131            : "PeerConnection:response:deny";
    132        Services.obs.notifyObservers(null, topic, callID);
    133        break;
    134      }
    135      case "webrtc:Allow": {
    136        let callID = aMessage.data.callID;
    137        let contentWindow = Services.wm.getOuterWindowWithId(
    138          aMessage.data.windowID
    139        );
    140        let devices = contentWindow.pendingGetUserMediaRequests.get(callID);
    141        forgetGUMRequest(contentWindow, callID);
    142 
    143        let allowedDevices = Cc["@mozilla.org/array;1"].createInstance(
    144          Ci.nsIMutableArray
    145        );
    146        for (let deviceIndex of aMessage.data.devices) {
    147          allowedDevices.appendElement(devices[deviceIndex]);
    148        }
    149 
    150        Services.obs.notifyObservers(
    151          allowedDevices,
    152          "getUserMedia:response:allow",
    153          callID
    154        );
    155 
    156        this.suppressNotifications = !!aMessage.data.suppressNotifications;
    157 
    158        break;
    159      }
    160      case "webrtc:Deny":
    161        denyGUMRequest(aMessage.data);
    162        break;
    163      case "webrtc:StopSharing":
    164        Services.obs.notifyObservers(
    165          null,
    166          "getUserMedia:revoke",
    167          aMessage.data
    168        );
    169        break;
    170      case "webrtc:MuteCamera":
    171        Services.obs.notifyObservers(
    172          null,
    173          "getUserMedia:muteVideo",
    174          aMessage.data
    175        );
    176        break;
    177      case "webrtc:UnmuteCamera":
    178        Services.obs.notifyObservers(
    179          null,
    180          "getUserMedia:unmuteVideo",
    181          aMessage.data
    182        );
    183        break;
    184      case "webrtc:MuteMicrophone":
    185        Services.obs.notifyObservers(
    186          null,
    187          "getUserMedia:muteAudio",
    188          aMessage.data
    189        );
    190        break;
    191      case "webrtc:UnmuteMicrophone":
    192        Services.obs.notifyObservers(
    193          null,
    194          "getUserMedia:unmuteAudio",
    195          aMessage.data
    196        );
    197        break;
    198    }
    199  }
    200 }
    201 
    202 function getActorForWindow(window) {
    203  try {
    204    let windowGlobal = window.windowGlobalChild;
    205    if (windowGlobal) {
    206      return windowGlobal.getActor("WebRTC");
    207    }
    208  } catch (ex) {
    209    // There might not be an actor for a parent process chrome URL,
    210    // and we may not even be allowed to access its windowGlobalChild.
    211  }
    212 
    213  return null;
    214 }
    215 
    216 function handlePCRequest(aSubject) {
    217  let { windowID, innerWindowID, callID, isSecure } = aSubject;
    218  let contentWindow = Services.wm.getOuterWindowWithId(windowID);
    219  if (!contentWindow.pendingPeerConnectionRequests) {
    220    setupPendingListsInitially(contentWindow);
    221  }
    222  contentWindow.pendingPeerConnectionRequests.add(callID);
    223 
    224  let request = {
    225    windowID,
    226    innerWindowID,
    227    callID,
    228    documentURI: contentWindow.document.documentURI,
    229    secure: isSecure,
    230  };
    231 
    232  let actor = getActorForWindow(contentWindow);
    233  if (actor) {
    234    actor.sendAsyncMessage("rtcpeer:Request", request);
    235  }
    236 }
    237 
    238 function handleGUMStop(aSubject) {
    239  let contentWindow = Services.wm.getOuterWindowWithId(aSubject.windowID);
    240 
    241  let request = {
    242    windowID: aSubject.windowID,
    243    rawID: aSubject.rawID,
    244    mediaSource: aSubject.mediaSource,
    245  };
    246 
    247  let actor = getActorForWindow(contentWindow);
    248  if (actor) {
    249    actor.sendAsyncMessage("webrtc:StopRecording", request);
    250  }
    251 }
    252 
    253 function handleGUMRequest(aSubject) {
    254  // Now that a getUserMedia request has been created, we should check
    255  // to see if we're supposed to have any devices muted. This needs
    256  // to occur after the getUserMedia request is made, since the global
    257  // mute state is associated with the GetUserMediaWindowListener, which
    258  // is only created after a getUserMedia request.
    259  GlobalMuteListener.init();
    260 
    261  let constraints = aSubject.getConstraints();
    262  let contentWindow = Services.wm.getOuterWindowWithId(aSubject.windowID);
    263 
    264  prompt(
    265    aSubject.type,
    266    contentWindow,
    267    aSubject.windowID,
    268    aSubject.callID,
    269    constraints,
    270    aSubject.getAudioOutputOptions(),
    271    aSubject.devices,
    272    aSubject.isSecure,
    273    aSubject.isHandlingUserInput
    274  );
    275 }
    276 
    277 function prompt(
    278  aRequestType,
    279  aContentWindow,
    280  aWindowID,
    281  aCallID,
    282  aConstraints,
    283  aAudioOutputOptions,
    284  aDevices,
    285  aSecure,
    286  aIsHandlingUserInput
    287 ) {
    288  let audioInputDevices = [];
    289  let videoInputDevices = [];
    290  let audioOutputDevices = [];
    291  let devices = [];
    292 
    293  // MediaStreamConstraints defines video as 'boolean or MediaTrackConstraints'.
    294  let video = aConstraints.video || aConstraints.picture;
    295  let audio = aConstraints.audio;
    296  let sharingScreen =
    297    video && typeof video != "boolean" && video.mediaSource != "camera";
    298  let sharingAudio =
    299    audio && typeof audio != "boolean" && audio.mediaSource != "microphone";
    300 
    301  const hasInherentConstraints = ({ facingMode, groupId, deviceId }) => {
    302    const id = [deviceId].flat()[0];
    303    return facingMode || groupId || (id && id != "default"); // flock workaround
    304  };
    305  let hasInherentAudioConstraints =
    306    audio &&
    307    !sharingAudio &&
    308    [audio, ...(audio.advanced || [])].some(hasInherentConstraints);
    309  let hasInherentVideoConstraints =
    310    video &&
    311    !sharingScreen &&
    312    [video, ...(video.advanced || [])].some(hasInherentConstraints);
    313 
    314  for (let device of aDevices) {
    315    device = device.QueryInterface(Ci.nsIMediaDevice);
    316    let deviceObject = {
    317      name: device.rawName, // unfiltered device name to show to the user
    318      deviceIndex: devices.length,
    319      rawId: device.rawId,
    320      id: device.id,
    321      mediaSource: device.mediaSource,
    322      canRequestOsLevelPrompt: device.canRequestOsLevelPrompt,
    323    };
    324    switch (device.type) {
    325      case "audioinput":
    326        // Check that if we got a microphone, we have not requested an audio
    327        // capture, and if we have requested an audio capture, we are not
    328        // getting a microphone instead.
    329        if (audio && (device.mediaSource == "microphone") != sharingAudio) {
    330          audioInputDevices.push(deviceObject);
    331          devices.push(device);
    332        }
    333        break;
    334      case "videoinput":
    335        // Verify that if we got a camera, we haven't requested a screen share,
    336        // or that if we requested a screen share we aren't getting a camera.
    337        if (video && (device.mediaSource == "camera") != sharingScreen) {
    338          if (device.scary) {
    339            deviceObject.scary = true;
    340          }
    341          videoInputDevices.push(deviceObject);
    342          devices.push(device);
    343        }
    344        break;
    345      case "audiooutput":
    346        if (aRequestType == "selectaudiooutput") {
    347          audioOutputDevices.push(deviceObject);
    348          devices.push(device);
    349        }
    350        break;
    351    }
    352  }
    353 
    354  let requestTypes = [];
    355  if (videoInputDevices.length) {
    356    requestTypes.push(sharingScreen ? "Screen" : "Camera");
    357  }
    358  if (audioInputDevices.length) {
    359    requestTypes.push(sharingAudio ? "AudioCapture" : "Microphone");
    360  }
    361  if (audioOutputDevices.length) {
    362    requestTypes.push("Speaker");
    363  }
    364 
    365  if (!requestTypes.length) {
    366    // Device enumeration is done ahead of handleGUMRequest, so we're not
    367    // responsible for handling the NotFoundError spec case.
    368    denyGUMRequest({ callID: aCallID });
    369    return;
    370  }
    371 
    372  if (!aContentWindow.pendingGetUserMediaRequests) {
    373    setupPendingListsInitially(aContentWindow);
    374  }
    375  aContentWindow.pendingGetUserMediaRequests.set(aCallID, devices);
    376 
    377  // WebRTC prompts have a bunch of special requirements, such as being able to
    378  // grant two permissions (microphone and camera), selecting devices and showing
    379  // a screen sharing preview. All this could have probably been baked into
    380  // nsIContentPermissionRequest prompts, but the team that implemented this back
    381  // then chose to just build their own prompting mechanism instead.
    382  //
    383  // So, what you are looking at here is not a real nsIContentPermissionRequest, but
    384  // something that looks really similar and will be transmitted to webrtcUI.sys.mjs
    385  // for showing the prompt.
    386  // Note that we basically do the permission delegate check in
    387  // nsIContentPermissionRequest, but because webrtc uses their own prompting
    388  // system, we should manually apply the delegate policy here. Permission
    389  // should be delegated using Feature Policy and top principal
    390  const permDelegateHandler =
    391    aContentWindow.document.permDelegateHandler.QueryInterface(
    392      Ci.nsIPermissionDelegateHandler
    393    );
    394 
    395  let secondOrigin = undefined;
    396  if (permDelegateHandler.maybeUnsafePermissionDelegate(requestTypes)) {
    397    // We are going to prompt both first party and third party origin.
    398    // SecondOrigin should be third party
    399    secondOrigin = aContentWindow.document.nodePrincipal.origin;
    400  }
    401 
    402  let request = {
    403    callID: aCallID,
    404    windowID: aWindowID,
    405    secondOrigin,
    406    documentURI: aContentWindow.document.documentURI,
    407    secure: aSecure,
    408    isHandlingUserInput: aIsHandlingUserInput,
    409    requestTypes,
    410    sharingScreen,
    411    sharingAudio,
    412    audioInputDevices,
    413    videoInputDevices,
    414    audioOutputDevices,
    415    hasInherentAudioConstraints,
    416    hasInherentVideoConstraints,
    417    audioOutputId: aAudioOutputOptions.deviceId,
    418  };
    419 
    420  let actor = getActorForWindow(aContentWindow);
    421  if (actor) {
    422    actor.sendAsyncMessage("webrtc:Request", request);
    423  }
    424 }
    425 
    426 function denyGUMRequest(aData) {
    427  let subject;
    428  if (aData.noOSPermission) {
    429    subject = "getUserMedia:response:noOSPermission";
    430  } else {
    431    subject = "getUserMedia:response:deny";
    432  }
    433  Services.obs.notifyObservers(null, subject, aData.callID);
    434 
    435  if (!aData.windowID) {
    436    return;
    437  }
    438  let contentWindow = Services.wm.getOuterWindowWithId(aData.windowID);
    439  if (contentWindow.pendingGetUserMediaRequests) {
    440    forgetGUMRequest(contentWindow, aData.callID);
    441  }
    442 }
    443 
    444 function forgetGUMRequest(aContentWindow, aCallID) {
    445  aContentWindow.pendingGetUserMediaRequests.delete(aCallID);
    446  forgetPendingListsEventually(aContentWindow);
    447 }
    448 
    449 function forgetPCRequest(aContentWindow, aCallID) {
    450  aContentWindow.pendingPeerConnectionRequests.delete(aCallID);
    451  forgetPendingListsEventually(aContentWindow);
    452 }
    453 
    454 function setupPendingListsInitially(aContentWindow) {
    455  if (aContentWindow.pendingGetUserMediaRequests) {
    456    return;
    457  }
    458  aContentWindow.pendingGetUserMediaRequests = new Map();
    459  aContentWindow.pendingPeerConnectionRequests = new Set();
    460  aContentWindow.addEventListener("unload", WebRTCChild.handleEvent);
    461 }
    462 
    463 function forgetPendingListsEventually(aContentWindow) {
    464  if (
    465    aContentWindow.pendingGetUserMediaRequests.size ||
    466    aContentWindow.pendingPeerConnectionRequests.size
    467  ) {
    468    return;
    469  }
    470  aContentWindow.pendingGetUserMediaRequests = null;
    471  aContentWindow.pendingPeerConnectionRequests = null;
    472  aContentWindow.removeEventListener("unload", WebRTCChild.handleEvent);
    473 }
    474 
    475 function updateIndicators(aSubject) {
    476  if (
    477    aSubject instanceof Ci.nsIPropertyBag &&
    478    aSubject.getProperty("requestURL") == kBrowserURL
    479  ) {
    480    // Ignore notifications caused by the browser UI showing previews.
    481    return;
    482  }
    483 
    484  let contentWindow = aSubject.getProperty("window");
    485 
    486  let actor = contentWindow ? getActorForWindow(contentWindow) : null;
    487  if (actor) {
    488    let tabState = getTabStateForContentWindow(contentWindow, false);
    489    tabState.windowId = getInnerWindowIDForWindow(contentWindow);
    490 
    491    // If we were silencing DOM notifications before, but we've updated
    492    // state such that we're no longer sharing one of our displays, then
    493    // reset the silencing state.
    494    if (actor.suppressNotifications) {
    495      if (!tabState.screen && !tabState.window && !tabState.browser) {
    496        actor.suppressNotifications = false;
    497      }
    498    }
    499 
    500    tabState.suppressNotifications = actor.suppressNotifications;
    501 
    502    actor.sendAsyncMessage("webrtc:UpdateIndicators", tabState);
    503  }
    504 }
    505 
    506 function removeBrowserSpecificIndicator(aSubject, aTopic, aData) {
    507  let contentWindow = Services.wm.getOuterWindowWithId(aData);
    508  if (contentWindow.document.documentURI == kBrowserURL) {
    509    // Ignore notifications caused by the browser UI showing previews.
    510    return;
    511  }
    512 
    513  let tabState = getTabStateForContentWindow(contentWindow, true);
    514 
    515  tabState.windowId = aData;
    516 
    517  let actor = getActorForWindow(contentWindow);
    518  if (actor) {
    519    actor.sendAsyncMessage("webrtc:UpdateIndicators", tabState);
    520  }
    521 }
    522 
    523 function getTabStateForContentWindow(aContentWindow, aForRemove = false) {
    524  let camera = {},
    525    microphone = {},
    526    screen = {},
    527    window = {},
    528    browser = {},
    529    devices = {};
    530  lazy.MediaManagerService.mediaCaptureWindowState(
    531    aContentWindow,
    532    camera,
    533    microphone,
    534    screen,
    535    window,
    536    browser,
    537    devices
    538  );
    539 
    540  if (
    541    camera.value == lazy.MediaManagerService.STATE_NOCAPTURE &&
    542    microphone.value == lazy.MediaManagerService.STATE_NOCAPTURE &&
    543    screen.value == lazy.MediaManagerService.STATE_NOCAPTURE &&
    544    window.value == lazy.MediaManagerService.STATE_NOCAPTURE &&
    545    browser.value == lazy.MediaManagerService.STATE_NOCAPTURE
    546  ) {
    547    return { remove: true };
    548  }
    549 
    550  if (aForRemove) {
    551    return { remove: true };
    552  }
    553 
    554  let serializedDevices = [];
    555  if (Array.isArray(devices.value)) {
    556    serializedDevices = devices.value.map(device => {
    557      return {
    558        type: device.type,
    559        mediaSource: device.mediaSource,
    560        rawId: device.rawId,
    561        scary: device.scary,
    562      };
    563    });
    564  }
    565 
    566  return {
    567    camera: camera.value,
    568    microphone: microphone.value,
    569    screen: screen.value,
    570    window: window.value,
    571    browser: browser.value,
    572    devices: serializedDevices,
    573  };
    574 }
    575 
    576 function getInnerWindowIDForWindow(aContentWindow) {
    577  return aContentWindow.windowGlobalChild.innerWindowId;
    578 }