audio_device_mac.cc (81068B)
1 /* 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 3 * 4 * Use of this source code is governed by a BSD-style license 5 * that can be found in the LICENSE file in the root of the source 6 * tree. An additional intellectual property rights grant can be found 7 * in the file PATENTS. All contributing project authors may 8 * be found in the AUTHORS file in the root of the source tree. 9 */ 10 11 #include "modules/audio_device/mac/audio_device_mac.h" 12 13 #include <ApplicationServices/ApplicationServices.h> 14 #include <mach/mach.h> // mach_task_self() 15 #include <sys/sysctl.h> // sysctlbyname() 16 17 #include <memory> 18 #include <vector> 19 20 #include "modules/third_party/portaudio/pa_ringbuffer.h" 21 #include "rtc_base/checks.h" 22 #include "rtc_base/platform_thread.h" 23 #include "rtc_base/system/arch.h" 24 25 namespace webrtc { 26 27 #define WEBRTC_CA_RETURN_ON_ERR(expr) \ 28 do { \ 29 err = expr; \ 30 if (err != noErr) { \ 31 logCAMsg(::webrtc::LS_ERROR, "Error in " #expr, (const char*)&err); \ 32 return -1; \ 33 } \ 34 } while (0) 35 36 #define WEBRTC_CA_LOG_ERR(expr) \ 37 do { \ 38 err = expr; \ 39 if (err != noErr) { \ 40 logCAMsg(::webrtc::LS_ERROR, "Error in " #expr, (const char*)&err); \ 41 } \ 42 } while (0) 43 44 #define WEBRTC_CA_LOG_WARN(expr) \ 45 do { \ 46 err = expr; \ 47 if (err != noErr) { \ 48 logCAMsg(::webrtc::LS_WARNING, "Error in " #expr, (const char*)&err); \ 49 } \ 50 } while (0) 51 52 enum { MaxNumberDevices = 64 }; 53 54 // CoreAudio errors are best interpreted as four character strings. 55 void AudioDeviceMac::logCAMsg(const LoggingSeverity sev, 56 const char* msg, 57 const char* err) { 58 RTC_DCHECK(msg != NULL); 59 RTC_DCHECK(err != NULL); 60 61 #ifdef WEBRTC_ARCH_BIG_ENDIAN 62 switch (sev) { 63 case LS_ERROR: 64 RTC_LOG(LS_ERROR) << msg << ": " << err[0] << err[1] << err[2] << err[3]; 65 break; 66 case LS_WARNING: 67 RTC_LOG(LS_WARNING) << msg << ": " << err[0] << err[1] << err[2] 68 << err[3]; 69 break; 70 case LS_VERBOSE: 71 RTC_LOG(LS_VERBOSE) << msg << ": " << err[0] << err[1] << err[2] 72 << err[3]; 73 break; 74 default: 75 break; 76 } 77 #else 78 // We need to flip the characters in this case. 79 switch (sev) { 80 case LS_ERROR: 81 RTC_LOG(LS_ERROR) << msg << ": " << err[3] << err[2] << err[1] << err[0]; 82 break; 83 case LS_WARNING: 84 RTC_LOG(LS_WARNING) << msg << ": " << err[3] << err[2] << err[1] 85 << err[0]; 86 break; 87 case LS_VERBOSE: 88 RTC_LOG(LS_VERBOSE) << msg << ": " << err[3] << err[2] << err[1] 89 << err[0]; 90 break; 91 default: 92 break; 93 } 94 #endif 95 } 96 97 AudioDeviceMac::AudioDeviceMac() 98 : _ptrAudioBuffer(NULL), 99 _mixerManager(), 100 _inputDeviceIndex(0), 101 _outputDeviceIndex(0), 102 _inputDeviceID(kAudioObjectUnknown), 103 _outputDeviceID(kAudioObjectUnknown), 104 _inputDeviceIsSpecified(false), 105 _outputDeviceIsSpecified(false), 106 _recChannels(N_REC_CHANNELS), 107 _playChannels(N_PLAY_CHANNELS), 108 _captureBufData(NULL), 109 _renderBufData(NULL), 110 _initialized(false), 111 _isShutDown(false), 112 _recording(false), 113 _playing(false), 114 _recIsInitialized(false), 115 _playIsInitialized(false), 116 _renderDeviceIsAlive(1), 117 _captureDeviceIsAlive(1), 118 _twoDevices(true), 119 _doStop(false), 120 _doStopRec(false), 121 _macBookPro(false), 122 _macBookProPanRight(false), 123 _captureLatencyUs(0), 124 _renderLatencyUs(0), 125 _captureDelayUs(0), 126 _renderDelayUs(0), 127 _renderDelayOffsetSamples(0), 128 _paCaptureBuffer(NULL), 129 _paRenderBuffer(NULL), 130 _captureBufSizeSamples(0), 131 _renderBufSizeSamples(0), 132 prev_key_state_() { 133 RTC_DLOG(LS_INFO) << __FUNCTION__ << " created"; 134 135 memset(_renderConvertData, 0, sizeof(_renderConvertData)); 136 memset(&_outStreamFormat, 0, sizeof(AudioStreamBasicDescription)); 137 memset(&_outDesiredFormat, 0, sizeof(AudioStreamBasicDescription)); 138 memset(&_inStreamFormat, 0, sizeof(AudioStreamBasicDescription)); 139 memset(&_inDesiredFormat, 0, sizeof(AudioStreamBasicDescription)); 140 } 141 142 AudioDeviceMac::~AudioDeviceMac() { 143 RTC_DLOG(LS_INFO) << __FUNCTION__ << " destroyed"; 144 145 if (!_isShutDown) { 146 Terminate(); 147 } 148 149 RTC_DCHECK(capture_worker_thread_.empty()); 150 RTC_DCHECK(render_worker_thread_.empty()); 151 152 if (_paRenderBuffer) { 153 delete _paRenderBuffer; 154 _paRenderBuffer = NULL; 155 } 156 157 if (_paCaptureBuffer) { 158 delete _paCaptureBuffer; 159 _paCaptureBuffer = NULL; 160 } 161 162 if (_renderBufData) { 163 delete[] _renderBufData; 164 _renderBufData = NULL; 165 } 166 167 if (_captureBufData) { 168 delete[] _captureBufData; 169 _captureBufData = NULL; 170 } 171 172 kern_return_t kernErr = KERN_SUCCESS; 173 kernErr = semaphore_destroy(mach_task_self(), _renderSemaphore); 174 if (kernErr != KERN_SUCCESS) { 175 RTC_LOG(LS_ERROR) << "semaphore_destroy() error: " << kernErr; 176 } 177 178 kernErr = semaphore_destroy(mach_task_self(), _captureSemaphore); 179 if (kernErr != KERN_SUCCESS) { 180 RTC_LOG(LS_ERROR) << "semaphore_destroy() error: " << kernErr; 181 } 182 } 183 184 // ============================================================================ 185 // API 186 // ============================================================================ 187 188 void AudioDeviceMac::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) { 189 MutexLock lock(&mutex_); 190 191 _ptrAudioBuffer = audioBuffer; 192 193 // inform the AudioBuffer about default settings for this implementation 194 _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); 195 _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC); 196 _ptrAudioBuffer->SetRecordingChannels(N_REC_CHANNELS); 197 _ptrAudioBuffer->SetPlayoutChannels(N_PLAY_CHANNELS); 198 } 199 200 int32_t AudioDeviceMac::ActiveAudioLayer( 201 AudioDeviceModule::AudioLayer& audioLayer) const { 202 audioLayer = AudioDeviceModule::kPlatformDefaultAudio; 203 return 0; 204 } 205 206 AudioDeviceGeneric::InitStatus AudioDeviceMac::Init() { 207 MutexLock lock(&mutex_); 208 209 if (_initialized) { 210 return InitStatus::OK; 211 } 212 213 OSStatus err = noErr; 214 215 _isShutDown = false; 216 217 // PortAudio ring buffers require an elementCount which is a power of two. 218 if (_renderBufData == NULL) { 219 UInt32 powerOfTwo = 1; 220 while (powerOfTwo < PLAY_BUF_SIZE_IN_SAMPLES) { 221 powerOfTwo <<= 1; 222 } 223 _renderBufSizeSamples = powerOfTwo; 224 _renderBufData = new SInt16[_renderBufSizeSamples]; 225 } 226 227 if (_paRenderBuffer == NULL) { 228 _paRenderBuffer = new PaUtilRingBuffer; 229 ring_buffer_size_t bufSize = -1; 230 bufSize = PaUtil_InitializeRingBuffer( 231 _paRenderBuffer, sizeof(SInt16), _renderBufSizeSamples, _renderBufData); 232 if (bufSize == -1) { 233 RTC_LOG(LS_ERROR) << "PaUtil_InitializeRingBuffer() error"; 234 return InitStatus::PLAYOUT_ERROR; 235 } 236 } 237 238 if (_captureBufData == NULL) { 239 UInt32 powerOfTwo = 1; 240 while (powerOfTwo < REC_BUF_SIZE_IN_SAMPLES) { 241 powerOfTwo <<= 1; 242 } 243 _captureBufSizeSamples = powerOfTwo; 244 _captureBufData = new Float32[_captureBufSizeSamples]; 245 } 246 247 if (_paCaptureBuffer == NULL) { 248 _paCaptureBuffer = new PaUtilRingBuffer; 249 ring_buffer_size_t bufSize = -1; 250 bufSize = 251 PaUtil_InitializeRingBuffer(_paCaptureBuffer, sizeof(Float32), 252 _captureBufSizeSamples, _captureBufData); 253 if (bufSize == -1) { 254 RTC_LOG(LS_ERROR) << "PaUtil_InitializeRingBuffer() error"; 255 return InitStatus::RECORDING_ERROR; 256 } 257 } 258 259 kern_return_t kernErr = KERN_SUCCESS; 260 kernErr = semaphore_create(mach_task_self(), &_renderSemaphore, 261 SYNC_POLICY_FIFO, 0); 262 if (kernErr != KERN_SUCCESS) { 263 RTC_LOG(LS_ERROR) << "semaphore_create() error: " << kernErr; 264 return InitStatus::OTHER_ERROR; 265 } 266 267 kernErr = semaphore_create(mach_task_self(), &_captureSemaphore, 268 SYNC_POLICY_FIFO, 0); 269 if (kernErr != KERN_SUCCESS) { 270 RTC_LOG(LS_ERROR) << "semaphore_create() error: " << kernErr; 271 return InitStatus::OTHER_ERROR; 272 } 273 274 // Setting RunLoop to NULL here instructs HAL to manage its own thread for 275 // notifications. This was the default behaviour on OS X 10.5 and earlier, 276 // but now must be explicitly specified. HAL would otherwise try to use the 277 // main thread to issue notifications. 278 AudioObjectPropertyAddress propertyAddress = { 279 kAudioHardwarePropertyRunLoop, kAudioObjectPropertyScopeGlobal, 280 kAudioObjectPropertyElementMain}; 281 CFRunLoopRef runLoop = NULL; 282 UInt32 size = sizeof(CFRunLoopRef); 283 int aoerr = AudioObjectSetPropertyData( 284 kAudioObjectSystemObject, &propertyAddress, 0, NULL, size, &runLoop); 285 if (aoerr != noErr) { 286 RTC_LOG(LS_ERROR) << "Error in AudioObjectSetPropertyData: " 287 << (const char*)&aoerr; 288 return InitStatus::OTHER_ERROR; 289 } 290 291 // Listen for any device changes. 292 propertyAddress.mSelector = kAudioHardwarePropertyDevices; 293 WEBRTC_CA_LOG_ERR(AudioObjectAddPropertyListener( 294 kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this)); 295 296 // Determine if this is a MacBook Pro 297 _macBookPro = false; 298 _macBookProPanRight = false; 299 char buf[128]; 300 size_t length = sizeof(buf); 301 memset(buf, 0, length); 302 303 int intErr = sysctlbyname("hw.model", buf, &length, NULL, 0); 304 if (intErr != 0) { 305 RTC_LOG(LS_ERROR) << "Error in sysctlbyname(): " << err; 306 } else { 307 RTC_LOG(LS_VERBOSE) << "Hardware model: " << buf; 308 if (strncmp(buf, "MacBookPro", 10) == 0) { 309 _macBookPro = true; 310 } 311 } 312 313 _initialized = true; 314 315 return InitStatus::OK; 316 } 317 318 int32_t AudioDeviceMac::Terminate() { 319 if (!_initialized) { 320 return 0; 321 } 322 323 if (_recording) { 324 RTC_LOG(LS_ERROR) << "Recording must be stopped"; 325 return -1; 326 } 327 328 if (_playing) { 329 RTC_LOG(LS_ERROR) << "Playback must be stopped"; 330 return -1; 331 } 332 333 MutexLock lock(&mutex_); 334 _mixerManager.Close(); 335 336 OSStatus err = noErr; 337 int retVal = 0; 338 339 AudioObjectPropertyAddress propertyAddress = { 340 kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, 341 kAudioObjectPropertyElementMain}; 342 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( 343 kAudioObjectSystemObject, &propertyAddress, &objectListenerProc, this)); 344 345 err = AudioHardwareUnload(); 346 if (err != noErr) { 347 logCAMsg(LS_ERROR, "Error in AudioHardwareUnload()", (const char*)&err); 348 retVal = -1; 349 } 350 351 _isShutDown = true; 352 _initialized = false; 353 _outputDeviceIsSpecified = false; 354 _inputDeviceIsSpecified = false; 355 356 return retVal; 357 } 358 359 bool AudioDeviceMac::Initialized() const { 360 return (_initialized); 361 } 362 363 int32_t AudioDeviceMac::SpeakerIsAvailable(bool& available) { 364 MutexLock lock(&mutex_); 365 return SpeakerIsAvailableLocked(available); 366 } 367 368 int32_t AudioDeviceMac::SpeakerIsAvailableLocked(bool& available) { 369 bool wasInitialized = _mixerManager.SpeakerIsInitialized(); 370 371 // Make an attempt to open up the 372 // output mixer corresponding to the currently selected output device. 373 // 374 if (!wasInitialized && InitSpeakerLocked() == -1) { 375 available = false; 376 return 0; 377 } 378 379 // Given that InitSpeaker was successful, we know that a valid speaker 380 // exists. 381 available = true; 382 383 // Close the initialized output mixer 384 // 385 if (!wasInitialized) { 386 _mixerManager.CloseSpeaker(); 387 } 388 389 return 0; 390 } 391 392 int32_t AudioDeviceMac::InitSpeaker() { 393 MutexLock lock(&mutex_); 394 return InitSpeakerLocked(); 395 } 396 397 int32_t AudioDeviceMac::InitSpeakerLocked() { 398 if (_playing) { 399 return -1; 400 } 401 402 if (InitDevice(_outputDeviceIndex, _outputDeviceID, false) == -1) { 403 return -1; 404 } 405 406 if (_inputDeviceID == _outputDeviceID) { 407 _twoDevices = false; 408 } else { 409 _twoDevices = true; 410 } 411 412 if (_mixerManager.OpenSpeaker(_outputDeviceID) == -1) { 413 return -1; 414 } 415 416 return 0; 417 } 418 419 int32_t AudioDeviceMac::MicrophoneIsAvailable(bool& available) { 420 MutexLock lock(&mutex_); 421 return MicrophoneIsAvailableLocked(available); 422 } 423 424 int32_t AudioDeviceMac::MicrophoneIsAvailableLocked(bool& available) { 425 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); 426 427 // Make an attempt to open up the 428 // input mixer corresponding to the currently selected output device. 429 // 430 if (!wasInitialized && InitMicrophoneLocked() == -1) { 431 available = false; 432 return 0; 433 } 434 435 // Given that InitMicrophone was successful, we know that a valid microphone 436 // exists. 437 available = true; 438 439 // Close the initialized input mixer 440 // 441 if (!wasInitialized) { 442 _mixerManager.CloseMicrophone(); 443 } 444 445 return 0; 446 } 447 448 int32_t AudioDeviceMac::InitMicrophone() { 449 MutexLock lock(&mutex_); 450 return InitMicrophoneLocked(); 451 } 452 453 int32_t AudioDeviceMac::InitMicrophoneLocked() { 454 if (_recording) { 455 return -1; 456 } 457 458 if (InitDevice(_inputDeviceIndex, _inputDeviceID, true) == -1) { 459 return -1; 460 } 461 462 if (_inputDeviceID == _outputDeviceID) { 463 _twoDevices = false; 464 } else { 465 _twoDevices = true; 466 } 467 468 if (_mixerManager.OpenMicrophone(_inputDeviceID) == -1) { 469 return -1; 470 } 471 472 return 0; 473 } 474 475 bool AudioDeviceMac::SpeakerIsInitialized() const { 476 return (_mixerManager.SpeakerIsInitialized()); 477 } 478 479 bool AudioDeviceMac::MicrophoneIsInitialized() const { 480 return (_mixerManager.MicrophoneIsInitialized()); 481 } 482 483 int32_t AudioDeviceMac::SpeakerVolumeIsAvailable(bool& available) { 484 bool wasInitialized = _mixerManager.SpeakerIsInitialized(); 485 486 // Make an attempt to open up the 487 // output mixer corresponding to the currently selected output device. 488 // 489 if (!wasInitialized && InitSpeaker() == -1) { 490 // If we end up here it means that the selected speaker has no volume 491 // control. 492 available = false; 493 return 0; 494 } 495 496 // Given that InitSpeaker was successful, we know that a volume control exists 497 // 498 available = true; 499 500 // Close the initialized output mixer 501 // 502 if (!wasInitialized) { 503 _mixerManager.CloseSpeaker(); 504 } 505 506 return 0; 507 } 508 509 int32_t AudioDeviceMac::SetSpeakerVolume(uint32_t volume) { 510 return (_mixerManager.SetSpeakerVolume(volume)); 511 } 512 513 int32_t AudioDeviceMac::SpeakerVolume(uint32_t& volume) const { 514 uint32_t level(0); 515 516 if (_mixerManager.SpeakerVolume(level) == -1) { 517 return -1; 518 } 519 520 volume = level; 521 return 0; 522 } 523 524 int32_t AudioDeviceMac::MaxSpeakerVolume(uint32_t& maxVolume) const { 525 uint32_t maxVol(0); 526 527 if (_mixerManager.MaxSpeakerVolume(maxVol) == -1) { 528 return -1; 529 } 530 531 maxVolume = maxVol; 532 return 0; 533 } 534 535 int32_t AudioDeviceMac::MinSpeakerVolume(uint32_t& minVolume) const { 536 uint32_t minVol(0); 537 538 if (_mixerManager.MinSpeakerVolume(minVol) == -1) { 539 return -1; 540 } 541 542 minVolume = minVol; 543 return 0; 544 } 545 546 int32_t AudioDeviceMac::SpeakerMuteIsAvailable(bool& available) { 547 bool isAvailable(false); 548 bool wasInitialized = _mixerManager.SpeakerIsInitialized(); 549 550 // Make an attempt to open up the 551 // output mixer corresponding to the currently selected output device. 552 // 553 if (!wasInitialized && InitSpeaker() == -1) { 554 // If we end up here it means that the selected speaker has no volume 555 // control, hence it is safe to state that there is no mute control 556 // already at this stage. 557 available = false; 558 return 0; 559 } 560 561 // Check if the selected speaker has a mute control 562 // 563 _mixerManager.SpeakerMuteIsAvailable(isAvailable); 564 565 available = isAvailable; 566 567 // Close the initialized output mixer 568 // 569 if (!wasInitialized) { 570 _mixerManager.CloseSpeaker(); 571 } 572 573 return 0; 574 } 575 576 int32_t AudioDeviceMac::SetSpeakerMute(bool enable) { 577 return (_mixerManager.SetSpeakerMute(enable)); 578 } 579 580 int32_t AudioDeviceMac::SpeakerMute(bool& enabled) const { 581 bool muted(0); 582 583 if (_mixerManager.SpeakerMute(muted) == -1) { 584 return -1; 585 } 586 587 enabled = muted; 588 return 0; 589 } 590 591 int32_t AudioDeviceMac::MicrophoneMuteIsAvailable(bool& available) { 592 bool isAvailable(false); 593 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); 594 595 // Make an attempt to open up the 596 // input mixer corresponding to the currently selected input device. 597 // 598 if (!wasInitialized && InitMicrophone() == -1) { 599 // If we end up here it means that the selected microphone has no volume 600 // control, hence it is safe to state that there is no boost control 601 // already at this stage. 602 available = false; 603 return 0; 604 } 605 606 // Check if the selected microphone has a mute control 607 // 608 _mixerManager.MicrophoneMuteIsAvailable(isAvailable); 609 available = isAvailable; 610 611 // Close the initialized input mixer 612 // 613 if (!wasInitialized) { 614 _mixerManager.CloseMicrophone(); 615 } 616 617 return 0; 618 } 619 620 int32_t AudioDeviceMac::SetMicrophoneMute(bool enable) { 621 return (_mixerManager.SetMicrophoneMute(enable)); 622 } 623 624 int32_t AudioDeviceMac::MicrophoneMute(bool& enabled) const { 625 bool muted(0); 626 627 if (_mixerManager.MicrophoneMute(muted) == -1) { 628 return -1; 629 } 630 631 enabled = muted; 632 return 0; 633 } 634 635 int32_t AudioDeviceMac::StereoRecordingIsAvailable(bool& available) { 636 bool isAvailable(false); 637 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); 638 639 if (!wasInitialized && InitMicrophone() == -1) { 640 // Cannot open the specified device 641 available = false; 642 return 0; 643 } 644 645 // Check if the selected microphone can record stereo 646 // 647 _mixerManager.StereoRecordingIsAvailable(isAvailable); 648 available = isAvailable; 649 650 // Close the initialized input mixer 651 // 652 if (!wasInitialized) { 653 _mixerManager.CloseMicrophone(); 654 } 655 656 return 0; 657 } 658 659 int32_t AudioDeviceMac::SetStereoRecording(bool enable) { 660 if (enable) 661 _recChannels = 2; 662 else 663 _recChannels = 1; 664 665 return 0; 666 } 667 668 int32_t AudioDeviceMac::StereoRecording(bool& enabled) const { 669 if (_recChannels == 2) 670 enabled = true; 671 else 672 enabled = false; 673 674 return 0; 675 } 676 677 int32_t AudioDeviceMac::StereoPlayoutIsAvailable(bool& available) { 678 bool isAvailable(false); 679 bool wasInitialized = _mixerManager.SpeakerIsInitialized(); 680 681 if (!wasInitialized && InitSpeaker() == -1) { 682 // Cannot open the specified device 683 available = false; 684 return 0; 685 } 686 687 // Check if the selected microphone can record stereo 688 // 689 _mixerManager.StereoPlayoutIsAvailable(isAvailable); 690 available = isAvailable; 691 692 // Close the initialized input mixer 693 // 694 if (!wasInitialized) { 695 _mixerManager.CloseSpeaker(); 696 } 697 698 return 0; 699 } 700 701 int32_t AudioDeviceMac::SetStereoPlayout(bool enable) { 702 if (enable) 703 _playChannels = 2; 704 else 705 _playChannels = 1; 706 707 return 0; 708 } 709 710 int32_t AudioDeviceMac::StereoPlayout(bool& enabled) const { 711 if (_playChannels == 2) 712 enabled = true; 713 else 714 enabled = false; 715 716 return 0; 717 } 718 719 int32_t AudioDeviceMac::MicrophoneVolumeIsAvailable(bool& available) { 720 bool wasInitialized = _mixerManager.MicrophoneIsInitialized(); 721 722 // Make an attempt to open up the 723 // input mixer corresponding to the currently selected output device. 724 // 725 if (!wasInitialized && InitMicrophone() == -1) { 726 // If we end up here it means that the selected microphone has no volume 727 // control. 728 available = false; 729 return 0; 730 } 731 732 // Given that InitMicrophone was successful, we know that a volume control 733 // exists 734 // 735 available = true; 736 737 // Close the initialized input mixer 738 // 739 if (!wasInitialized) { 740 _mixerManager.CloseMicrophone(); 741 } 742 743 return 0; 744 } 745 746 int32_t AudioDeviceMac::SetMicrophoneVolume(uint32_t volume) { 747 return (_mixerManager.SetMicrophoneVolume(volume)); 748 } 749 750 int32_t AudioDeviceMac::MicrophoneVolume(uint32_t& volume) const { 751 uint32_t level(0); 752 753 if (_mixerManager.MicrophoneVolume(level) == -1) { 754 RTC_LOG(LS_WARNING) << "failed to retrieve current microphone level"; 755 return -1; 756 } 757 758 volume = level; 759 return 0; 760 } 761 762 int32_t AudioDeviceMac::MaxMicrophoneVolume(uint32_t& maxVolume) const { 763 uint32_t maxVol(0); 764 765 if (_mixerManager.MaxMicrophoneVolume(maxVol) == -1) { 766 return -1; 767 } 768 769 maxVolume = maxVol; 770 return 0; 771 } 772 773 int32_t AudioDeviceMac::MinMicrophoneVolume(uint32_t& minVolume) const { 774 uint32_t minVol(0); 775 776 if (_mixerManager.MinMicrophoneVolume(minVol) == -1) { 777 return -1; 778 } 779 780 minVolume = minVol; 781 return 0; 782 } 783 784 int16_t AudioDeviceMac::PlayoutDevices() { 785 AudioDeviceID playDevices[MaxNumberDevices]; 786 return GetNumberDevices(kAudioDevicePropertyScopeOutput, playDevices, 787 MaxNumberDevices); 788 } 789 790 int32_t AudioDeviceMac::SetPlayoutDevice(uint16_t index) { 791 MutexLock lock(&mutex_); 792 793 if (_playIsInitialized) { 794 return -1; 795 } 796 797 AudioDeviceID playDevices[MaxNumberDevices]; 798 uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeOutput, 799 playDevices, MaxNumberDevices); 800 RTC_LOG(LS_VERBOSE) << "number of available waveform-audio output devices is " 801 << nDevices; 802 803 if (index > (nDevices - 1)) { 804 RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1) 805 << "]"; 806 return -1; 807 } 808 809 _outputDeviceIndex = index; 810 _outputDeviceIsSpecified = true; 811 812 return 0; 813 } 814 815 int32_t AudioDeviceMac::SetPlayoutDevice( 816 AudioDeviceModule::WindowsDeviceType /*device*/) { 817 RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported"; 818 return -1; 819 } 820 821 int32_t AudioDeviceMac::PlayoutDeviceName(uint16_t index, 822 char name[kAdmMaxDeviceNameSize], 823 char guid[kAdmMaxGuidSize]) { 824 const uint16_t nDevices(PlayoutDevices()); 825 826 if ((index > (nDevices - 1)) || (name == NULL)) { 827 return -1; 828 } 829 830 memset(name, 0, kAdmMaxDeviceNameSize); 831 832 if (guid != NULL) { 833 memset(guid, 0, kAdmMaxGuidSize); 834 } 835 836 return GetDeviceName(kAudioDevicePropertyScopeOutput, index, 837 ArrayView<char>(name, kAdmMaxDeviceNameSize)); 838 } 839 840 int32_t AudioDeviceMac::RecordingDeviceName(uint16_t index, 841 char name[kAdmMaxDeviceNameSize], 842 char guid[kAdmMaxGuidSize]) { 843 const uint16_t nDevices(RecordingDevices()); 844 845 if ((index > (nDevices - 1)) || (name == NULL)) { 846 return -1; 847 } 848 849 memset(name, 0, kAdmMaxDeviceNameSize); 850 851 if (guid != NULL) { 852 memset(guid, 0, kAdmMaxGuidSize); 853 } 854 855 return GetDeviceName(kAudioDevicePropertyScopeInput, index, 856 ArrayView<char>(name, kAdmMaxDeviceNameSize)); 857 } 858 859 int16_t AudioDeviceMac::RecordingDevices() { 860 AudioDeviceID recDevices[MaxNumberDevices]; 861 return GetNumberDevices(kAudioDevicePropertyScopeInput, recDevices, 862 MaxNumberDevices); 863 } 864 865 int32_t AudioDeviceMac::SetRecordingDevice(uint16_t index) { 866 if (_recIsInitialized) { 867 return -1; 868 } 869 870 AudioDeviceID recDevices[MaxNumberDevices]; 871 uint32_t nDevices = GetNumberDevices(kAudioDevicePropertyScopeInput, 872 recDevices, MaxNumberDevices); 873 RTC_LOG(LS_VERBOSE) << "number of available waveform-audio input devices is " 874 << nDevices; 875 876 if (index > (nDevices - 1)) { 877 RTC_LOG(LS_ERROR) << "device index is out of range [0," << (nDevices - 1) 878 << "]"; 879 return -1; 880 } 881 882 _inputDeviceIndex = index; 883 _inputDeviceIsSpecified = true; 884 885 return 0; 886 } 887 888 int32_t AudioDeviceMac::SetRecordingDevice( 889 AudioDeviceModule::WindowsDeviceType /*device*/) { 890 RTC_LOG(LS_ERROR) << "WindowsDeviceType not supported"; 891 return -1; 892 } 893 894 int32_t AudioDeviceMac::PlayoutIsAvailable(bool& available) { 895 available = true; 896 897 // Try to initialize the playout side 898 if (InitPlayout() == -1) { 899 available = false; 900 } 901 902 // We destroy the IOProc created by InitPlayout() in implDeviceIOProc(). 903 // We must actually start playout here in order to have the IOProc 904 // deleted by calling StopPlayout(). 905 if (StartPlayout() == -1) { 906 available = false; 907 } 908 909 // Cancel effect of initialization 910 if (StopPlayout() == -1) { 911 available = false; 912 } 913 914 return 0; 915 } 916 917 int32_t AudioDeviceMac::RecordingIsAvailable(bool& available) { 918 available = true; 919 920 // Try to initialize the recording side 921 if (InitRecording() == -1) { 922 available = false; 923 } 924 925 // We destroy the IOProc created by InitRecording() in implInDeviceIOProc(). 926 // We must actually start recording here in order to have the IOProc 927 // deleted by calling StopRecording(). 928 if (StartRecording() == -1) { 929 available = false; 930 } 931 932 // Cancel effect of initialization 933 if (StopRecording() == -1) { 934 available = false; 935 } 936 937 return 0; 938 } 939 940 int32_t AudioDeviceMac::InitPlayout() { 941 RTC_LOG(LS_INFO) << "InitPlayout"; 942 MutexLock lock(&mutex_); 943 944 if (_playing) { 945 return -1; 946 } 947 948 if (!_outputDeviceIsSpecified) { 949 return -1; 950 } 951 952 if (_playIsInitialized) { 953 return 0; 954 } 955 956 // Initialize the speaker (devices might have been added or removed) 957 if (InitSpeakerLocked() == -1) { 958 RTC_LOG(LS_WARNING) << "InitSpeaker() failed"; 959 } 960 961 if (!MicrophoneIsInitialized()) { 962 // Make this call to check if we are using 963 // one or two devices (_twoDevices) 964 bool available = false; 965 if (MicrophoneIsAvailableLocked(available) == -1) { 966 RTC_LOG(LS_WARNING) << "MicrophoneIsAvailable() failed"; 967 } 968 } 969 970 PaUtil_FlushRingBuffer(_paRenderBuffer); 971 972 OSStatus err = noErr; 973 UInt32 size = 0; 974 _renderDelayOffsetSamples = 0; 975 _renderDelayUs = 0; 976 _renderLatencyUs = 0; 977 _renderDeviceIsAlive = 1; 978 _doStop = false; 979 980 // The internal microphone of a MacBook Pro is located under the left speaker 981 // grille. When the internal speakers are in use, we want to fully stereo 982 // pan to the right. 983 AudioObjectPropertyAddress propertyAddress = { 984 kAudioDevicePropertyDataSource, kAudioDevicePropertyScopeOutput, 0}; 985 if (_macBookPro) { 986 _macBookProPanRight = false; 987 Boolean hasProperty = 988 AudioObjectHasProperty(_outputDeviceID, &propertyAddress); 989 if (hasProperty) { 990 UInt32 dataSource = 0; 991 size = sizeof(dataSource); 992 WEBRTC_CA_LOG_WARN(AudioObjectGetPropertyData( 993 _outputDeviceID, &propertyAddress, 0, NULL, &size, &dataSource)); 994 995 if (dataSource == 'ispk') { 996 _macBookProPanRight = true; 997 RTC_LOG(LS_VERBOSE) 998 << "MacBook Pro using internal speakers; stereo panning right"; 999 } else { 1000 RTC_LOG(LS_VERBOSE) << "MacBook Pro not using internal speakers"; 1001 } 1002 1003 // Add a listener to determine if the status changes. 1004 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener( 1005 _outputDeviceID, &propertyAddress, &objectListenerProc, this)); 1006 } 1007 } 1008 1009 // Get current stream description 1010 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; 1011 memset(&_outStreamFormat, 0, sizeof(_outStreamFormat)); 1012 size = sizeof(_outStreamFormat); 1013 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( 1014 _outputDeviceID, &propertyAddress, 0, NULL, &size, &_outStreamFormat)); 1015 1016 if (_outStreamFormat.mFormatID != kAudioFormatLinearPCM) { 1017 logCAMsg(LS_ERROR, "Unacceptable output stream format -> mFormatID", 1018 (const char*)&_outStreamFormat.mFormatID); 1019 return -1; 1020 } 1021 1022 if (_outStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) { 1023 RTC_LOG(LS_ERROR) 1024 << "Too many channels on output device (mChannelsPerFrame = " 1025 << _outStreamFormat.mChannelsPerFrame << ")"; 1026 return -1; 1027 } 1028 1029 if (_outStreamFormat.mFormatFlags & kAudioFormatFlagIsNonInterleaved) { 1030 RTC_LOG(LS_ERROR) << "Non-interleaved audio data is not supported." 1031 "AudioHardware streams should not have this format."; 1032 return -1; 1033 } 1034 1035 RTC_LOG(LS_VERBOSE) << "Ouput stream format:"; 1036 RTC_LOG(LS_VERBOSE) << "mSampleRate = " << _outStreamFormat.mSampleRate 1037 << ", mChannelsPerFrame = " 1038 << _outStreamFormat.mChannelsPerFrame; 1039 RTC_LOG(LS_VERBOSE) << "mBytesPerPacket = " 1040 << _outStreamFormat.mBytesPerPacket 1041 << ", mFramesPerPacket = " 1042 << _outStreamFormat.mFramesPerPacket; 1043 RTC_LOG(LS_VERBOSE) << "mBytesPerFrame = " << _outStreamFormat.mBytesPerFrame 1044 << ", mBitsPerChannel = " 1045 << _outStreamFormat.mBitsPerChannel; 1046 RTC_LOG(LS_VERBOSE) << "mFormatFlags = " << _outStreamFormat.mFormatFlags; 1047 logCAMsg(LS_VERBOSE, "mFormatID", (const char*)&_outStreamFormat.mFormatID); 1048 1049 // Our preferred format to work with. 1050 if (_outStreamFormat.mChannelsPerFrame < 2) { 1051 // Disable stereo playout when we only have one channel on the device. 1052 _playChannels = 1; 1053 RTC_LOG(LS_VERBOSE) << "Stereo playout unavailable on this device"; 1054 } 1055 WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat()); 1056 1057 // Listen for format changes. 1058 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; 1059 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener( 1060 _outputDeviceID, &propertyAddress, &objectListenerProc, this)); 1061 1062 // Listen for processor overloads. 1063 propertyAddress.mSelector = kAudioDeviceProcessorOverload; 1064 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener( 1065 _outputDeviceID, &propertyAddress, &objectListenerProc, this)); 1066 1067 if (_twoDevices || !_recIsInitialized) { 1068 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID( 1069 _outputDeviceID, deviceIOProc, this, &_deviceIOProcID)); 1070 } 1071 1072 _playIsInitialized = true; 1073 1074 return 0; 1075 } 1076 1077 int32_t AudioDeviceMac::InitRecording() { 1078 RTC_LOG(LS_INFO) << "InitRecording"; 1079 MutexLock lock(&mutex_); 1080 1081 if (_recording) { 1082 return -1; 1083 } 1084 1085 if (!_inputDeviceIsSpecified) { 1086 return -1; 1087 } 1088 1089 if (_recIsInitialized) { 1090 return 0; 1091 } 1092 1093 // Initialize the microphone (devices might have been added or removed) 1094 if (InitMicrophoneLocked() == -1) { 1095 RTC_LOG(LS_WARNING) << "InitMicrophone() failed"; 1096 } 1097 1098 if (!SpeakerIsInitialized()) { 1099 // Make this call to check if we are using 1100 // one or two devices (_twoDevices) 1101 bool available = false; 1102 if (SpeakerIsAvailableLocked(available) == -1) { 1103 RTC_LOG(LS_WARNING) << "SpeakerIsAvailable() failed"; 1104 } 1105 } 1106 1107 OSStatus err = noErr; 1108 UInt32 size = 0; 1109 1110 PaUtil_FlushRingBuffer(_paCaptureBuffer); 1111 1112 _captureDelayUs = 0; 1113 _captureLatencyUs = 0; 1114 _captureDeviceIsAlive = 1; 1115 _doStopRec = false; 1116 1117 // Get current stream description 1118 AudioObjectPropertyAddress propertyAddress = { 1119 kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0}; 1120 memset(&_inStreamFormat, 0, sizeof(_inStreamFormat)); 1121 size = sizeof(_inStreamFormat); 1122 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( 1123 _inputDeviceID, &propertyAddress, 0, NULL, &size, &_inStreamFormat)); 1124 1125 if (_inStreamFormat.mFormatID != kAudioFormatLinearPCM) { 1126 logCAMsg(LS_ERROR, "Unacceptable input stream format -> mFormatID", 1127 (const char*)&_inStreamFormat.mFormatID); 1128 return -1; 1129 } 1130 1131 if (_inStreamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) { 1132 RTC_LOG(LS_ERROR) 1133 << "Too many channels on input device (mChannelsPerFrame = " 1134 << _inStreamFormat.mChannelsPerFrame << ")"; 1135 return -1; 1136 } 1137 1138 const int io_block_size_samples = _inStreamFormat.mChannelsPerFrame * 1139 _inStreamFormat.mSampleRate / 100 * 1140 N_BLOCKS_IO; 1141 if (io_block_size_samples > _captureBufSizeSamples) { 1142 RTC_LOG(LS_ERROR) << "Input IO block size (" << io_block_size_samples 1143 << ") is larger than ring buffer (" 1144 << _captureBufSizeSamples << ")"; 1145 return -1; 1146 } 1147 1148 RTC_LOG(LS_VERBOSE) << "Input stream format:"; 1149 RTC_LOG(LS_VERBOSE) << "mSampleRate = " << _inStreamFormat.mSampleRate 1150 << ", mChannelsPerFrame = " 1151 << _inStreamFormat.mChannelsPerFrame; 1152 RTC_LOG(LS_VERBOSE) << "mBytesPerPacket = " << _inStreamFormat.mBytesPerPacket 1153 << ", mFramesPerPacket = " 1154 << _inStreamFormat.mFramesPerPacket; 1155 RTC_LOG(LS_VERBOSE) << "mBytesPerFrame = " << _inStreamFormat.mBytesPerFrame 1156 << ", mBitsPerChannel = " 1157 << _inStreamFormat.mBitsPerChannel; 1158 RTC_LOG(LS_VERBOSE) << "mFormatFlags = " << _inStreamFormat.mFormatFlags; 1159 logCAMsg(LS_VERBOSE, "mFormatID", (const char*)&_inStreamFormat.mFormatID); 1160 1161 // Our preferred format to work with 1162 if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) { 1163 _inDesiredFormat.mChannelsPerFrame = 2; 1164 } else { 1165 // Disable stereo recording when we only have one channel on the device. 1166 _inDesiredFormat.mChannelsPerFrame = 1; 1167 _recChannels = 1; 1168 RTC_LOG(LS_VERBOSE) << "Stereo recording unavailable on this device"; 1169 } 1170 1171 if (_ptrAudioBuffer) { 1172 // Update audio buffer with the selected parameters 1173 _ptrAudioBuffer->SetRecordingSampleRate(N_REC_SAMPLES_PER_SEC); 1174 _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels); 1175 } 1176 1177 _inDesiredFormat.mSampleRate = N_REC_SAMPLES_PER_SEC; 1178 _inDesiredFormat.mBytesPerPacket = 1179 _inDesiredFormat.mChannelsPerFrame * sizeof(SInt16); 1180 _inDesiredFormat.mFramesPerPacket = 1; 1181 _inDesiredFormat.mBytesPerFrame = 1182 _inDesiredFormat.mChannelsPerFrame * sizeof(SInt16); 1183 _inDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8; 1184 1185 _inDesiredFormat.mFormatFlags = 1186 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; 1187 #ifdef WEBRTC_ARCH_BIG_ENDIAN 1188 _inDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian; 1189 #endif 1190 _inDesiredFormat.mFormatID = kAudioFormatLinearPCM; 1191 1192 WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&_inStreamFormat, &_inDesiredFormat, 1193 &_captureConverter)); 1194 1195 // First try to set buffer size to desired value (10 ms * N_BLOCKS_IO) 1196 // TODO(xians): investigate this block. 1197 UInt32 bufByteCount = 1198 (UInt32)((_inStreamFormat.mSampleRate / 1000.0) * 10.0 * N_BLOCKS_IO * 1199 _inStreamFormat.mChannelsPerFrame * sizeof(Float32)); 1200 if (_inStreamFormat.mFramesPerPacket != 0) { 1201 if (bufByteCount % _inStreamFormat.mFramesPerPacket != 0) { 1202 bufByteCount = 1203 ((UInt32)(bufByteCount / _inStreamFormat.mFramesPerPacket) + 1) * 1204 _inStreamFormat.mFramesPerPacket; 1205 } 1206 } 1207 1208 // Ensure the buffer size is within the acceptable range provided by the 1209 // device. 1210 propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange; 1211 AudioValueRange range; 1212 size = sizeof(range); 1213 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( 1214 _inputDeviceID, &propertyAddress, 0, NULL, &size, &range)); 1215 if (range.mMinimum > bufByteCount) { 1216 bufByteCount = range.mMinimum; 1217 } else if (range.mMaximum < bufByteCount) { 1218 bufByteCount = range.mMaximum; 1219 } 1220 1221 propertyAddress.mSelector = kAudioDevicePropertyBufferSize; 1222 size = sizeof(bufByteCount); 1223 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( 1224 _inputDeviceID, &propertyAddress, 0, NULL, size, &bufByteCount)); 1225 1226 // Get capture device latency 1227 propertyAddress.mSelector = kAudioDevicePropertyLatency; 1228 UInt32 latency = 0; 1229 size = sizeof(UInt32); 1230 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( 1231 _inputDeviceID, &propertyAddress, 0, NULL, &size, &latency)); 1232 _captureLatencyUs = (UInt32)((1.0e6 * latency) / _inStreamFormat.mSampleRate); 1233 1234 // Get capture stream latency 1235 propertyAddress.mSelector = kAudioDevicePropertyStreams; 1236 AudioStreamID stream = 0; 1237 size = sizeof(AudioStreamID); 1238 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( 1239 _inputDeviceID, &propertyAddress, 0, NULL, &size, &stream)); 1240 propertyAddress.mSelector = kAudioStreamPropertyLatency; 1241 size = sizeof(UInt32); 1242 latency = 0; 1243 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( 1244 _inputDeviceID, &propertyAddress, 0, NULL, &size, &latency)); 1245 _captureLatencyUs += 1246 (UInt32)((1.0e6 * latency) / _inStreamFormat.mSampleRate); 1247 1248 // Listen for format changes 1249 // TODO(xians): should we be using kAudioDevicePropertyDeviceHasChanged? 1250 propertyAddress.mSelector = kAudioDevicePropertyStreamFormat; 1251 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener( 1252 _inputDeviceID, &propertyAddress, &objectListenerProc, this)); 1253 1254 // Listen for processor overloads 1255 propertyAddress.mSelector = kAudioDeviceProcessorOverload; 1256 WEBRTC_CA_LOG_WARN(AudioObjectAddPropertyListener( 1257 _inputDeviceID, &propertyAddress, &objectListenerProc, this)); 1258 1259 if (_twoDevices) { 1260 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID( 1261 _inputDeviceID, inDeviceIOProc, this, &_inDeviceIOProcID)); 1262 } else if (!_playIsInitialized) { 1263 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceCreateIOProcID( 1264 _inputDeviceID, deviceIOProc, this, &_deviceIOProcID)); 1265 } 1266 1267 // Mark recording side as initialized 1268 _recIsInitialized = true; 1269 1270 return 0; 1271 } 1272 1273 int32_t AudioDeviceMac::StartRecording() { 1274 RTC_LOG(LS_INFO) << "StartRecording"; 1275 MutexLock lock(&mutex_); 1276 1277 if (!_recIsInitialized) { 1278 return -1; 1279 } 1280 1281 if (_recording) { 1282 return 0; 1283 } 1284 1285 if (!_initialized) { 1286 RTC_LOG(LS_ERROR) << "Recording worker thread has not been started"; 1287 return -1; 1288 } 1289 1290 RTC_DCHECK(capture_worker_thread_.empty()); 1291 capture_worker_thread_ = PlatformThread::SpawnJoinable( 1292 [this] { 1293 while (CaptureWorkerThread()) { 1294 } 1295 }, 1296 "CaptureWorkerThread", 1297 ThreadAttributes().SetPriority(ThreadPriority::kRealtime)); 1298 1299 OSStatus err = noErr; 1300 if (_twoDevices) { 1301 WEBRTC_CA_RETURN_ON_ERR( 1302 AudioDeviceStart(_inputDeviceID, _inDeviceIOProcID)); 1303 } else if (!_playing) { 1304 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_inputDeviceID, _deviceIOProcID)); 1305 } 1306 1307 _recording = true; 1308 1309 return 0; 1310 } 1311 1312 int32_t AudioDeviceMac::StopRecording() { 1313 RTC_LOG(LS_INFO) << "StopRecording"; 1314 MutexLock lock(&mutex_); 1315 1316 if (!_recIsInitialized) { 1317 return 0; 1318 } 1319 1320 OSStatus err = noErr; 1321 int32_t captureDeviceIsAlive = _captureDeviceIsAlive; 1322 if (_twoDevices && captureDeviceIsAlive == 1) { 1323 // Recording side uses its own dedicated device and IOProc. 1324 if (_recording) { 1325 _recording = false; 1326 _doStopRec = true; // Signal to io proc to stop audio device 1327 mutex_.Unlock(); // Cannot be under lock, risk of deadlock 1328 if (!_stopEventRec.Wait(TimeDelta::Seconds(2))) { 1329 MutexLock lockScoped(&mutex_); 1330 RTC_LOG(LS_WARNING) << "Timed out stopping the capture IOProc." 1331 "We may have failed to detect a device removal."; 1332 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID)); 1333 WEBRTC_CA_LOG_WARN( 1334 AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID)); 1335 } 1336 mutex_.Lock(); 1337 _doStopRec = false; 1338 RTC_LOG(LS_INFO) << "Recording stopped (input device)"; 1339 } else if (_recIsInitialized) { 1340 WEBRTC_CA_LOG_WARN( 1341 AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID)); 1342 RTC_LOG(LS_INFO) << "Recording uninitialized (input device)"; 1343 } 1344 } else { 1345 // We signal a stop for a shared device even when rendering has 1346 // not yet ended. This is to ensure the IOProc will return early as 1347 // intended (by checking `_recording`) before accessing 1348 // resources we free below (e.g. the capture converter). 1349 // 1350 // In the case of a shared devcie, the IOProc will verify 1351 // rendering has ended before stopping itself. 1352 if (_recording && captureDeviceIsAlive == 1) { 1353 _recording = false; 1354 _doStop = true; // Signal to io proc to stop audio device 1355 mutex_.Unlock(); // Cannot be under lock, risk of deadlock 1356 if (!_stopEvent.Wait(TimeDelta::Seconds(2))) { 1357 MutexLock lockScoped(&mutex_); 1358 RTC_LOG(LS_WARNING) << "Timed out stopping the shared IOProc." 1359 "We may have failed to detect a device removal."; 1360 // We assume rendering on a shared device has stopped as well if 1361 // the IOProc times out. 1362 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID)); 1363 WEBRTC_CA_LOG_WARN( 1364 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); 1365 } 1366 mutex_.Lock(); 1367 _doStop = false; 1368 RTC_LOG(LS_INFO) << "Recording stopped (shared device)"; 1369 } else if (_recIsInitialized && !_playing && !_playIsInitialized) { 1370 WEBRTC_CA_LOG_WARN( 1371 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); 1372 RTC_LOG(LS_INFO) << "Recording uninitialized (shared device)"; 1373 } 1374 } 1375 1376 // Setting this signal will allow the worker thread to be stopped. 1377 _captureDeviceIsAlive = 0; 1378 1379 if (!capture_worker_thread_.empty()) { 1380 mutex_.Unlock(); 1381 capture_worker_thread_.Finalize(); 1382 mutex_.Lock(); 1383 } 1384 1385 WEBRTC_CA_LOG_WARN(AudioConverterDispose(_captureConverter)); 1386 1387 // Remove listeners. 1388 AudioObjectPropertyAddress propertyAddress = { 1389 kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeInput, 0}; 1390 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( 1391 _inputDeviceID, &propertyAddress, &objectListenerProc, this)); 1392 1393 propertyAddress.mSelector = kAudioDeviceProcessorOverload; 1394 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( 1395 _inputDeviceID, &propertyAddress, &objectListenerProc, this)); 1396 1397 _recIsInitialized = false; 1398 _recording = false; 1399 1400 return 0; 1401 } 1402 1403 bool AudioDeviceMac::RecordingIsInitialized() const { 1404 return (_recIsInitialized); 1405 } 1406 1407 bool AudioDeviceMac::Recording() const { 1408 return (_recording); 1409 } 1410 1411 bool AudioDeviceMac::PlayoutIsInitialized() const { 1412 return (_playIsInitialized); 1413 } 1414 1415 int32_t AudioDeviceMac::StartPlayout() { 1416 RTC_LOG(LS_INFO) << "StartPlayout"; 1417 MutexLock lock(&mutex_); 1418 1419 if (!_playIsInitialized) { 1420 return -1; 1421 } 1422 1423 if (_playing) { 1424 return 0; 1425 } 1426 1427 RTC_DCHECK(render_worker_thread_.empty()); 1428 render_worker_thread_ = PlatformThread::SpawnJoinable( 1429 [this] { 1430 while (RenderWorkerThread()) { 1431 } 1432 }, 1433 "RenderWorkerThread", 1434 ThreadAttributes().SetPriority(ThreadPriority::kRealtime)); 1435 1436 if (_twoDevices || !_recording) { 1437 OSStatus err = noErr; 1438 WEBRTC_CA_RETURN_ON_ERR(AudioDeviceStart(_outputDeviceID, _deviceIOProcID)); 1439 } 1440 _playing = true; 1441 1442 return 0; 1443 } 1444 1445 int32_t AudioDeviceMac::StopPlayout() { 1446 RTC_LOG(LS_INFO) << "StopPlayout"; 1447 MutexLock lock(&mutex_); 1448 1449 if (!_playIsInitialized) { 1450 return 0; 1451 } 1452 1453 OSStatus err = noErr; 1454 int32_t renderDeviceIsAlive = _renderDeviceIsAlive; 1455 if (_playing && renderDeviceIsAlive == 1) { 1456 // We signal a stop for a shared device even when capturing has not 1457 // yet ended. This is to ensure the IOProc will return early as 1458 // intended (by checking `_playing`) before accessing resources we 1459 // free below (e.g. the render converter). 1460 // 1461 // In the case of a shared device, the IOProc will verify capturing 1462 // has ended before stopping itself. 1463 _playing = false; 1464 _doStop = true; // Signal to io proc to stop audio device 1465 mutex_.Unlock(); // Cannot be under lock, risk of deadlock 1466 if (!_stopEvent.Wait(TimeDelta::Seconds(2))) { 1467 MutexLock lockScoped(&mutex_); 1468 RTC_LOG(LS_WARNING) << "Timed out stopping the render IOProc." 1469 "We may have failed to detect a device removal."; 1470 1471 // We assume capturing on a shared device has stopped as well if the 1472 // IOProc times out. 1473 WEBRTC_CA_LOG_WARN(AudioDeviceStop(_outputDeviceID, _deviceIOProcID)); 1474 WEBRTC_CA_LOG_WARN( 1475 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); 1476 } 1477 mutex_.Lock(); 1478 _doStop = false; 1479 RTC_LOG(LS_INFO) << "Playout stopped"; 1480 } else if (_twoDevices && _playIsInitialized) { 1481 WEBRTC_CA_LOG_WARN( 1482 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); 1483 RTC_LOG(LS_INFO) << "Playout uninitialized (output device)"; 1484 } else if (!_twoDevices && _playIsInitialized && !_recIsInitialized) { 1485 WEBRTC_CA_LOG_WARN( 1486 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); 1487 RTC_LOG(LS_INFO) << "Playout uninitialized (shared device)"; 1488 } 1489 1490 // Setting this signal will allow the worker thread to be stopped. 1491 _renderDeviceIsAlive = 0; 1492 if (!render_worker_thread_.empty()) { 1493 mutex_.Unlock(); 1494 render_worker_thread_.Finalize(); 1495 mutex_.Lock(); 1496 } 1497 1498 WEBRTC_CA_LOG_WARN(AudioConverterDispose(_renderConverter)); 1499 1500 // Remove listeners. 1501 AudioObjectPropertyAddress propertyAddress = { 1502 kAudioDevicePropertyStreamFormat, kAudioDevicePropertyScopeOutput, 0}; 1503 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( 1504 _outputDeviceID, &propertyAddress, &objectListenerProc, this)); 1505 1506 propertyAddress.mSelector = kAudioDeviceProcessorOverload; 1507 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( 1508 _outputDeviceID, &propertyAddress, &objectListenerProc, this)); 1509 1510 if (_macBookPro) { 1511 Boolean hasProperty = 1512 AudioObjectHasProperty(_outputDeviceID, &propertyAddress); 1513 if (hasProperty) { 1514 propertyAddress.mSelector = kAudioDevicePropertyDataSource; 1515 WEBRTC_CA_LOG_WARN(AudioObjectRemovePropertyListener( 1516 _outputDeviceID, &propertyAddress, &objectListenerProc, this)); 1517 } 1518 } 1519 1520 _playIsInitialized = false; 1521 _playing = false; 1522 1523 return 0; 1524 } 1525 1526 int32_t AudioDeviceMac::PlayoutDelay(uint16_t& delayMS) const { 1527 int32_t renderDelayUs = _renderDelayUs; 1528 delayMS = 1529 static_cast<uint16_t>(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5); 1530 return 0; 1531 } 1532 1533 bool AudioDeviceMac::Playing() const { 1534 return (_playing); 1535 } 1536 1537 // ============================================================================ 1538 // Private Methods 1539 // ============================================================================ 1540 1541 int32_t AudioDeviceMac::GetNumberDevices(const AudioObjectPropertyScope scope, 1542 AudioDeviceID scopedDeviceIds[], 1543 const uint32_t deviceListLength) { 1544 OSStatus err = noErr; 1545 1546 AudioObjectPropertyAddress propertyAddress = { 1547 kAudioHardwarePropertyDevices, kAudioObjectPropertyScopeGlobal, 1548 kAudioObjectPropertyElementMain}; 1549 UInt32 size = 0; 1550 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyDataSize( 1551 kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size)); 1552 if (size == 0) { 1553 RTC_LOG(LS_WARNING) << "No devices"; 1554 return 0; 1555 } 1556 1557 UInt32 numberDevices = size / sizeof(AudioDeviceID); 1558 const auto deviceIds = std::make_unique<AudioDeviceID[]>(numberDevices); 1559 AudioBufferList* bufferList = NULL; 1560 UInt32 numberScopedDevices = 0; 1561 1562 // First check if there is a default device and list it 1563 UInt32 hardwareProperty = 0; 1564 if (scope == kAudioDevicePropertyScopeOutput) { 1565 hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice; 1566 } else { 1567 hardwareProperty = kAudioHardwarePropertyDefaultInputDevice; 1568 } 1569 1570 AudioObjectPropertyAddress propertyAddressDefault = { 1571 hardwareProperty, kAudioObjectPropertyScopeGlobal, 1572 kAudioObjectPropertyElementMain}; 1573 1574 AudioDeviceID usedID; 1575 UInt32 uintSize = sizeof(UInt32); 1576 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject, 1577 &propertyAddressDefault, 0, 1578 NULL, &uintSize, &usedID)); 1579 if (usedID != kAudioDeviceUnknown) { 1580 scopedDeviceIds[numberScopedDevices] = usedID; 1581 numberScopedDevices++; 1582 } else { 1583 RTC_LOG(LS_WARNING) << "GetNumberDevices(): Default device unknown"; 1584 } 1585 1586 // Then list the rest of the devices 1587 bool listOK = true; 1588 1589 WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData(kAudioObjectSystemObject, 1590 &propertyAddress, 0, NULL, &size, 1591 deviceIds.get())); 1592 if (err != noErr) { 1593 listOK = false; 1594 } else { 1595 propertyAddress.mSelector = kAudioDevicePropertyStreamConfiguration; 1596 propertyAddress.mScope = scope; 1597 propertyAddress.mElement = 0; 1598 for (UInt32 i = 0; i < numberDevices; i++) { 1599 // Check for input channels 1600 WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyDataSize( 1601 deviceIds[i], &propertyAddress, 0, NULL, &size)); 1602 if (err == kAudioHardwareBadDeviceError) { 1603 // This device doesn't actually exist; continue iterating. 1604 continue; 1605 } else if (err != noErr) { 1606 listOK = false; 1607 break; 1608 } 1609 1610 bufferList = (AudioBufferList*)malloc(size); 1611 WEBRTC_CA_LOG_ERR(AudioObjectGetPropertyData( 1612 deviceIds[i], &propertyAddress, 0, NULL, &size, bufferList)); 1613 if (err != noErr) { 1614 listOK = false; 1615 break; 1616 } 1617 1618 if (bufferList->mNumberBuffers > 0) { 1619 if (numberScopedDevices >= deviceListLength) { 1620 RTC_LOG(LS_ERROR) << "Device list is not long enough"; 1621 listOK = false; 1622 break; 1623 } 1624 1625 scopedDeviceIds[numberScopedDevices] = deviceIds[i]; 1626 numberScopedDevices++; 1627 } 1628 1629 free(bufferList); 1630 bufferList = NULL; 1631 } // for 1632 } 1633 1634 if (!listOK) { 1635 if (bufferList) { 1636 free(bufferList); 1637 bufferList = NULL; 1638 } 1639 return -1; 1640 } 1641 1642 return numberScopedDevices; 1643 } 1644 1645 int32_t AudioDeviceMac::GetDeviceName(const AudioObjectPropertyScope scope, 1646 const uint16_t index, 1647 ArrayView<char> name) { 1648 OSStatus err = noErr; 1649 AudioDeviceID deviceIds[MaxNumberDevices]; 1650 1651 int numberDevices = GetNumberDevices(scope, deviceIds, MaxNumberDevices); 1652 if (numberDevices < 0) { 1653 return -1; 1654 } else if (numberDevices == 0) { 1655 RTC_LOG(LS_ERROR) << "No devices"; 1656 return -1; 1657 } 1658 1659 // If the number is below the number of devices, assume it's "WEBRTC ID" 1660 // otherwise assume it's a CoreAudio ID 1661 AudioDeviceID usedID; 1662 1663 // Check if there is a default device 1664 bool isDefaultDevice = false; 1665 if (index == 0) { 1666 UInt32 hardwareProperty = 0; 1667 if (scope == kAudioDevicePropertyScopeOutput) { 1668 hardwareProperty = kAudioHardwarePropertyDefaultOutputDevice; 1669 } else { 1670 hardwareProperty = kAudioHardwarePropertyDefaultInputDevice; 1671 } 1672 AudioObjectPropertyAddress propertyAddress = { 1673 hardwareProperty, kAudioObjectPropertyScopeGlobal, 1674 kAudioObjectPropertyElementMain}; 1675 UInt32 size = sizeof(UInt32); 1676 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( 1677 kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &usedID)); 1678 if (usedID == kAudioDeviceUnknown) { 1679 RTC_LOG(LS_WARNING) << "GetDeviceName(): Default device unknown"; 1680 } else { 1681 isDefaultDevice = true; 1682 } 1683 } 1684 1685 AudioObjectPropertyAddress propertyAddress = {kAudioDevicePropertyDeviceName, 1686 scope, 0}; 1687 1688 if (isDefaultDevice) { 1689 std::array<char, kAdmMaxDeviceNameSize> devName; 1690 UInt32 len = devName.size(); 1691 1692 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( 1693 usedID, &propertyAddress, 0, NULL, &len, devName.data())); 1694 1695 SimpleStringBuilder ss(name); 1696 ss.AppendFormat("default (%s)", devName.data()); 1697 } else { 1698 if (index < numberDevices) { 1699 usedID = deviceIds[index]; 1700 } else { 1701 usedID = index; 1702 } 1703 UInt32 len = name.size(); 1704 1705 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( 1706 usedID, &propertyAddress, 0, NULL, &len, name.data())); 1707 } 1708 1709 return 0; 1710 } 1711 1712 int32_t AudioDeviceMac::InitDevice(const uint16_t userDeviceIndex, 1713 AudioDeviceID& deviceId, 1714 const bool isInput) { 1715 OSStatus err = noErr; 1716 UInt32 size = 0; 1717 AudioObjectPropertyScope deviceScope; 1718 AudioObjectPropertySelector defaultDeviceSelector; 1719 AudioDeviceID deviceIds[MaxNumberDevices]; 1720 1721 if (isInput) { 1722 deviceScope = kAudioDevicePropertyScopeInput; 1723 defaultDeviceSelector = kAudioHardwarePropertyDefaultInputDevice; 1724 } else { 1725 deviceScope = kAudioDevicePropertyScopeOutput; 1726 defaultDeviceSelector = kAudioHardwarePropertyDefaultOutputDevice; 1727 } 1728 1729 AudioObjectPropertyAddress propertyAddress = { 1730 defaultDeviceSelector, kAudioObjectPropertyScopeGlobal, 1731 kAudioObjectPropertyElementMain}; 1732 1733 // Get the actual device IDs 1734 int numberDevices = 1735 GetNumberDevices(deviceScope, deviceIds, MaxNumberDevices); 1736 if (numberDevices < 0) { 1737 return -1; 1738 } else if (numberDevices == 0) { 1739 RTC_LOG(LS_ERROR) << "InitDevice(): No devices"; 1740 return -1; 1741 } 1742 1743 bool isDefaultDevice = false; 1744 deviceId = kAudioDeviceUnknown; 1745 if (userDeviceIndex == 0) { 1746 // Try to use default system device 1747 size = sizeof(AudioDeviceID); 1748 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( 1749 kAudioObjectSystemObject, &propertyAddress, 0, NULL, &size, &deviceId)); 1750 if (deviceId == kAudioDeviceUnknown) { 1751 RTC_LOG(LS_WARNING) << "No default device exists"; 1752 } else { 1753 isDefaultDevice = true; 1754 } 1755 } 1756 1757 if (!isDefaultDevice) { 1758 deviceId = deviceIds[userDeviceIndex]; 1759 } 1760 1761 // Obtain device name and manufacturer for logging. 1762 // Also use this as a test to ensure a user-set device ID is valid. 1763 char devName[128]; 1764 char devManf[128]; 1765 memset(devName, 0, sizeof(devName)); 1766 memset(devManf, 0, sizeof(devManf)); 1767 1768 propertyAddress.mSelector = kAudioDevicePropertyDeviceName; 1769 propertyAddress.mScope = deviceScope; 1770 propertyAddress.mElement = 0; 1771 size = sizeof(devName); 1772 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress, 1773 0, NULL, &size, devName)); 1774 1775 propertyAddress.mSelector = kAudioDevicePropertyDeviceManufacturer; 1776 size = sizeof(devManf); 1777 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData(deviceId, &propertyAddress, 1778 0, NULL, &size, devManf)); 1779 1780 if (isInput) { 1781 RTC_LOG(LS_INFO) << "Input device: " << devManf << " " << devName; 1782 } else { 1783 RTC_LOG(LS_INFO) << "Output device: " << devManf << " " << devName; 1784 } 1785 1786 return 0; 1787 } 1788 1789 OSStatus AudioDeviceMac::SetDesiredPlayoutFormat() { 1790 // Our preferred format to work with. 1791 _outDesiredFormat.mSampleRate = N_PLAY_SAMPLES_PER_SEC; 1792 _outDesiredFormat.mChannelsPerFrame = _playChannels; 1793 1794 if (_ptrAudioBuffer) { 1795 // Update audio buffer with the selected parameters. 1796 _ptrAudioBuffer->SetPlayoutSampleRate(N_PLAY_SAMPLES_PER_SEC); 1797 _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels); 1798 } 1799 1800 _renderDelayOffsetSamples = 1801 _renderBufSizeSamples - N_BUFFERS_OUT * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * 1802 _outDesiredFormat.mChannelsPerFrame; 1803 1804 _outDesiredFormat.mBytesPerPacket = 1805 _outDesiredFormat.mChannelsPerFrame * sizeof(SInt16); 1806 // In uncompressed audio, a packet is one frame. 1807 _outDesiredFormat.mFramesPerPacket = 1; 1808 _outDesiredFormat.mBytesPerFrame = 1809 _outDesiredFormat.mChannelsPerFrame * sizeof(SInt16); 1810 _outDesiredFormat.mBitsPerChannel = sizeof(SInt16) * 8; 1811 1812 _outDesiredFormat.mFormatFlags = 1813 kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked; 1814 #ifdef WEBRTC_ARCH_BIG_ENDIAN 1815 _outDesiredFormat.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian; 1816 #endif 1817 _outDesiredFormat.mFormatID = kAudioFormatLinearPCM; 1818 1819 OSStatus err = noErr; 1820 WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew( 1821 &_outDesiredFormat, &_outStreamFormat, &_renderConverter)); 1822 1823 // Try to set buffer size to desired value set to 20ms. 1824 const uint16_t kPlayBufDelayFixed = 20; 1825 UInt32 bufByteCount = static_cast<UInt32>( 1826 (_outStreamFormat.mSampleRate / 1000.0) * kPlayBufDelayFixed * 1827 _outStreamFormat.mChannelsPerFrame * sizeof(Float32)); 1828 if (_outStreamFormat.mFramesPerPacket != 0) { 1829 if (bufByteCount % _outStreamFormat.mFramesPerPacket != 0) { 1830 bufByteCount = (static_cast<UInt32>(bufByteCount / 1831 _outStreamFormat.mFramesPerPacket) + 1832 1) * 1833 _outStreamFormat.mFramesPerPacket; 1834 } 1835 } 1836 1837 // Ensure the buffer size is within the range provided by the device. 1838 AudioObjectPropertyAddress propertyAddress = { 1839 kAudioDevicePropertyDataSource, kAudioDevicePropertyScopeOutput, 0}; 1840 propertyAddress.mSelector = kAudioDevicePropertyBufferSizeRange; 1841 AudioValueRange range; 1842 UInt32 size = sizeof(range); 1843 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( 1844 _outputDeviceID, &propertyAddress, 0, NULL, &size, &range)); 1845 if (range.mMinimum > bufByteCount) { 1846 bufByteCount = range.mMinimum; 1847 } else if (range.mMaximum < bufByteCount) { 1848 bufByteCount = range.mMaximum; 1849 } 1850 1851 propertyAddress.mSelector = kAudioDevicePropertyBufferSize; 1852 size = sizeof(bufByteCount); 1853 WEBRTC_CA_RETURN_ON_ERR(AudioObjectSetPropertyData( 1854 _outputDeviceID, &propertyAddress, 0, NULL, size, &bufByteCount)); 1855 1856 // Get render device latency. 1857 propertyAddress.mSelector = kAudioDevicePropertyLatency; 1858 UInt32 latency = 0; 1859 size = sizeof(UInt32); 1860 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( 1861 _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency)); 1862 _renderLatencyUs = 1863 static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate); 1864 1865 // Get render stream latency. 1866 propertyAddress.mSelector = kAudioDevicePropertyStreams; 1867 AudioStreamID stream = 0; 1868 size = sizeof(AudioStreamID); 1869 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( 1870 _outputDeviceID, &propertyAddress, 0, NULL, &size, &stream)); 1871 propertyAddress.mSelector = kAudioStreamPropertyLatency; 1872 size = sizeof(UInt32); 1873 latency = 0; 1874 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( 1875 _outputDeviceID, &propertyAddress, 0, NULL, &size, &latency)); 1876 _renderLatencyUs += 1877 static_cast<uint32_t>((1.0e6 * latency) / _outStreamFormat.mSampleRate); 1878 1879 RTC_LOG(LS_VERBOSE) << "initial playout status: _renderDelayOffsetSamples=" 1880 << _renderDelayOffsetSamples 1881 << ", _renderDelayUs=" << _renderDelayUs 1882 << ", _renderLatencyUs=" << _renderLatencyUs; 1883 return 0; 1884 } 1885 1886 OSStatus AudioDeviceMac::objectListenerProc( 1887 AudioObjectID objectId, 1888 UInt32 numberAddresses, 1889 const AudioObjectPropertyAddress addresses[], 1890 void* clientData) { 1891 AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData; 1892 RTC_DCHECK(ptrThis != NULL); 1893 1894 ptrThis->implObjectListenerProc(objectId, numberAddresses, addresses); 1895 1896 // AudioObjectPropertyListenerProc functions are supposed to return 0 1897 return 0; 1898 } 1899 1900 OSStatus AudioDeviceMac::implObjectListenerProc( 1901 const AudioObjectID objectId, 1902 const UInt32 numberAddresses, 1903 const AudioObjectPropertyAddress addresses[]) { 1904 RTC_LOG(LS_VERBOSE) << "AudioDeviceMac::implObjectListenerProc()"; 1905 1906 for (UInt32 i = 0; i < numberAddresses; i++) { 1907 if (addresses[i].mSelector == kAudioHardwarePropertyDevices) { 1908 HandleDeviceChange(); 1909 } else if (addresses[i].mSelector == kAudioDevicePropertyStreamFormat) { 1910 HandleStreamFormatChange(objectId, addresses[i]); 1911 } else if (addresses[i].mSelector == kAudioDevicePropertyDataSource) { 1912 HandleDataSourceChange(objectId, addresses[i]); 1913 } else if (addresses[i].mSelector == kAudioDeviceProcessorOverload) { 1914 HandleProcessorOverload(addresses[i]); 1915 } 1916 } 1917 1918 return 0; 1919 } 1920 1921 int32_t AudioDeviceMac::HandleDeviceChange() { 1922 OSStatus err = noErr; 1923 1924 RTC_LOG(LS_VERBOSE) << "kAudioHardwarePropertyDevices"; 1925 1926 // A device has changed. Check if our registered devices have been removed. 1927 // Ensure the devices have been initialized, meaning the IDs are valid. 1928 if (MicrophoneIsInitialized()) { 1929 AudioObjectPropertyAddress propertyAddress = { 1930 kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeInput, 0}; 1931 UInt32 deviceIsAlive = 1; 1932 UInt32 size = sizeof(UInt32); 1933 err = AudioObjectGetPropertyData(_inputDeviceID, &propertyAddress, 0, NULL, 1934 &size, &deviceIsAlive); 1935 1936 if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) { 1937 RTC_LOG(LS_WARNING) << "Capture device is not alive (probably removed)"; 1938 _captureDeviceIsAlive = 0; 1939 _mixerManager.CloseMicrophone(); 1940 } else if (err != noErr) { 1941 logCAMsg(LS_ERROR, "Error in AudioDeviceGetProperty()", 1942 (const char*)&err); 1943 return -1; 1944 } 1945 } 1946 1947 if (SpeakerIsInitialized()) { 1948 AudioObjectPropertyAddress propertyAddress = { 1949 kAudioDevicePropertyDeviceIsAlive, kAudioDevicePropertyScopeOutput, 0}; 1950 UInt32 deviceIsAlive = 1; 1951 UInt32 size = sizeof(UInt32); 1952 err = AudioObjectGetPropertyData(_outputDeviceID, &propertyAddress, 0, NULL, 1953 &size, &deviceIsAlive); 1954 1955 if (err == kAudioHardwareBadDeviceError || deviceIsAlive == 0) { 1956 RTC_LOG(LS_WARNING) << "Render device is not alive (probably removed)"; 1957 _renderDeviceIsAlive = 0; 1958 _mixerManager.CloseSpeaker(); 1959 } else if (err != noErr) { 1960 logCAMsg(LS_ERROR, "Error in AudioDeviceGetProperty()", 1961 (const char*)&err); 1962 return -1; 1963 } 1964 } 1965 1966 return 0; 1967 } 1968 1969 int32_t AudioDeviceMac::HandleStreamFormatChange( 1970 const AudioObjectID objectId, 1971 const AudioObjectPropertyAddress propertyAddress) { 1972 OSStatus err = noErr; 1973 1974 RTC_LOG(LS_VERBOSE) << "Stream format changed"; 1975 1976 if (objectId != _inputDeviceID && objectId != _outputDeviceID) { 1977 return 0; 1978 } 1979 1980 // Get the new device format 1981 AudioStreamBasicDescription streamFormat; 1982 UInt32 size = sizeof(streamFormat); 1983 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( 1984 objectId, &propertyAddress, 0, NULL, &size, &streamFormat)); 1985 1986 if (streamFormat.mFormatID != kAudioFormatLinearPCM) { 1987 logCAMsg(LS_ERROR, "Unacceptable input stream format -> mFormatID", 1988 (const char*)&streamFormat.mFormatID); 1989 return -1; 1990 } 1991 1992 if (streamFormat.mChannelsPerFrame > N_DEVICE_CHANNELS) { 1993 RTC_LOG(LS_ERROR) << "Too many channels on device (mChannelsPerFrame = " 1994 << streamFormat.mChannelsPerFrame << ")"; 1995 return -1; 1996 } 1997 1998 if (_ptrAudioBuffer && streamFormat.mChannelsPerFrame != _recChannels) { 1999 RTC_LOG(LS_ERROR) << "Changing channels not supported (mChannelsPerFrame = " 2000 << streamFormat.mChannelsPerFrame << ")"; 2001 return -1; 2002 } 2003 2004 RTC_LOG(LS_VERBOSE) << "Stream format:"; 2005 RTC_LOG(LS_VERBOSE) << "mSampleRate = " << streamFormat.mSampleRate 2006 << ", mChannelsPerFrame = " 2007 << streamFormat.mChannelsPerFrame; 2008 RTC_LOG(LS_VERBOSE) << "mBytesPerPacket = " << streamFormat.mBytesPerPacket 2009 << ", mFramesPerPacket = " 2010 << streamFormat.mFramesPerPacket; 2011 RTC_LOG(LS_VERBOSE) << "mBytesPerFrame = " << streamFormat.mBytesPerFrame 2012 << ", mBitsPerChannel = " << streamFormat.mBitsPerChannel; 2013 RTC_LOG(LS_VERBOSE) << "mFormatFlags = " << streamFormat.mFormatFlags; 2014 logCAMsg(LS_VERBOSE, "mFormatID", (const char*)&streamFormat.mFormatID); 2015 2016 if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) { 2017 const int io_block_size_samples = streamFormat.mChannelsPerFrame * 2018 streamFormat.mSampleRate / 100 * 2019 N_BLOCKS_IO; 2020 if (io_block_size_samples > _captureBufSizeSamples) { 2021 RTC_LOG(LS_ERROR) << "Input IO block size (" << io_block_size_samples 2022 << ") is larger than ring buffer (" 2023 << _captureBufSizeSamples << ")"; 2024 return -1; 2025 } 2026 2027 memcpy(&_inStreamFormat, &streamFormat, sizeof(streamFormat)); 2028 2029 if (_inStreamFormat.mChannelsPerFrame >= 2 && (_recChannels == 2)) { 2030 _inDesiredFormat.mChannelsPerFrame = 2; 2031 } else { 2032 // Disable stereo recording when we only have one channel on the device. 2033 _inDesiredFormat.mChannelsPerFrame = 1; 2034 _recChannels = 1; 2035 RTC_LOG(LS_VERBOSE) << "Stereo recording unavailable on this device"; 2036 } 2037 2038 // Recreate the converter with the new format 2039 // TODO(xians): make this thread safe 2040 WEBRTC_CA_RETURN_ON_ERR(AudioConverterDispose(_captureConverter)); 2041 2042 WEBRTC_CA_RETURN_ON_ERR(AudioConverterNew(&streamFormat, &_inDesiredFormat, 2043 &_captureConverter)); 2044 } else { 2045 memcpy(&_outStreamFormat, &streamFormat, sizeof(streamFormat)); 2046 2047 // Our preferred format to work with 2048 if (_outStreamFormat.mChannelsPerFrame < 2) { 2049 _playChannels = 1; 2050 RTC_LOG(LS_VERBOSE) << "Stereo playout unavailable on this device"; 2051 } 2052 WEBRTC_CA_RETURN_ON_ERR(SetDesiredPlayoutFormat()); 2053 } 2054 return 0; 2055 } 2056 2057 int32_t AudioDeviceMac::HandleDataSourceChange( 2058 const AudioObjectID objectId, 2059 const AudioObjectPropertyAddress propertyAddress) { 2060 OSStatus err = noErr; 2061 2062 if (_macBookPro && 2063 propertyAddress.mScope == kAudioDevicePropertyScopeOutput) { 2064 RTC_LOG(LS_VERBOSE) << "Data source changed"; 2065 2066 _macBookProPanRight = false; 2067 UInt32 dataSource = 0; 2068 UInt32 size = sizeof(UInt32); 2069 WEBRTC_CA_RETURN_ON_ERR(AudioObjectGetPropertyData( 2070 objectId, &propertyAddress, 0, NULL, &size, &dataSource)); 2071 if (dataSource == 'ispk') { 2072 _macBookProPanRight = true; 2073 RTC_LOG(LS_VERBOSE) 2074 << "MacBook Pro using internal speakers; stereo panning right"; 2075 } else { 2076 RTC_LOG(LS_VERBOSE) << "MacBook Pro not using internal speakers"; 2077 } 2078 } 2079 2080 return 0; 2081 } 2082 int32_t AudioDeviceMac::HandleProcessorOverload( 2083 const AudioObjectPropertyAddress propertyAddress) { 2084 // TODO(xians): we probably want to notify the user in some way of the 2085 // overload. However, the Windows interpretations of these errors seem to 2086 // be more severe than what ProcessorOverload is thrown for. 2087 // 2088 // We don't log the notification, as it's sent from the HAL's IO thread. We 2089 // don't want to slow it down even further. 2090 if (propertyAddress.mScope == kAudioDevicePropertyScopeInput) { 2091 // RTC_LOG(LS_WARNING) << "Capture processor // overload"; 2092 //_callback->ProblemIsReported( 2093 // SndCardStreamObserver::ERecordingProblem); 2094 } else { 2095 // RTC_LOG(LS_WARNING) << "Render processor overload"; 2096 //_callback->ProblemIsReported( 2097 // SndCardStreamObserver::EPlaybackProblem); 2098 } 2099 2100 return 0; 2101 } 2102 2103 // ============================================================================ 2104 // Thread Methods 2105 // ============================================================================ 2106 2107 OSStatus AudioDeviceMac::deviceIOProc(AudioDeviceID, 2108 const AudioTimeStamp*, 2109 const AudioBufferList* inputData, 2110 const AudioTimeStamp* inputTime, 2111 AudioBufferList* outputData, 2112 const AudioTimeStamp* outputTime, 2113 void* clientData) { 2114 AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData; 2115 RTC_DCHECK(ptrThis != NULL); 2116 2117 ptrThis->implDeviceIOProc(inputData, inputTime, outputData, outputTime); 2118 2119 // AudioDeviceIOProc functions are supposed to return 0 2120 return 0; 2121 } 2122 2123 OSStatus AudioDeviceMac::outConverterProc(AudioConverterRef, 2124 UInt32* numberDataPackets, 2125 AudioBufferList* data, 2126 AudioStreamPacketDescription**, 2127 void* userData) { 2128 AudioDeviceMac* ptrThis = (AudioDeviceMac*)userData; 2129 RTC_DCHECK(ptrThis != NULL); 2130 2131 return ptrThis->implOutConverterProc(numberDataPackets, data); 2132 } 2133 2134 OSStatus AudioDeviceMac::inDeviceIOProc(AudioDeviceID, 2135 const AudioTimeStamp*, 2136 const AudioBufferList* inputData, 2137 const AudioTimeStamp* inputTime, 2138 AudioBufferList*, 2139 const AudioTimeStamp*, 2140 void* clientData) { 2141 AudioDeviceMac* ptrThis = (AudioDeviceMac*)clientData; 2142 RTC_DCHECK(ptrThis != NULL); 2143 2144 ptrThis->implInDeviceIOProc(inputData, inputTime); 2145 2146 // AudioDeviceIOProc functions are supposed to return 0 2147 return 0; 2148 } 2149 2150 OSStatus AudioDeviceMac::inConverterProc( 2151 AudioConverterRef, 2152 UInt32* numberDataPackets, 2153 AudioBufferList* data, 2154 AudioStreamPacketDescription** /*dataPacketDescription*/, 2155 void* userData) { 2156 AudioDeviceMac* ptrThis = static_cast<AudioDeviceMac*>(userData); 2157 RTC_DCHECK(ptrThis != NULL); 2158 2159 return ptrThis->implInConverterProc(numberDataPackets, data); 2160 } 2161 2162 OSStatus AudioDeviceMac::implDeviceIOProc(const AudioBufferList* inputData, 2163 const AudioTimeStamp* inputTime, 2164 AudioBufferList* outputData, 2165 const AudioTimeStamp* outputTime) { 2166 OSStatus err = noErr; 2167 UInt64 outputTimeNs = AudioConvertHostTimeToNanos(outputTime->mHostTime); 2168 UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); 2169 2170 if (!_twoDevices && _recording) { 2171 implInDeviceIOProc(inputData, inputTime); 2172 } 2173 2174 // Check if we should close down audio device 2175 // Double-checked locking optimization to remove locking overhead 2176 if (_doStop) { 2177 MutexLock lock(&mutex_); 2178 if (_doStop) { 2179 if (_twoDevices || (!_recording && !_playing)) { 2180 // In the case of a shared device, the single driving ioProc 2181 // is stopped here 2182 WEBRTC_CA_LOG_ERR(AudioDeviceStop(_outputDeviceID, _deviceIOProcID)); 2183 WEBRTC_CA_LOG_WARN( 2184 AudioDeviceDestroyIOProcID(_outputDeviceID, _deviceIOProcID)); 2185 if (err == noErr) { 2186 RTC_LOG(LS_VERBOSE) << "Playout or shared device stopped"; 2187 } 2188 } 2189 2190 _doStop = false; 2191 _stopEvent.Set(); 2192 return 0; 2193 } 2194 } 2195 2196 if (!_playing) { 2197 // This can be the case when a shared device is capturing but not 2198 // rendering. We allow the checks above before returning to avoid a 2199 // timeout when capturing is stopped. 2200 return 0; 2201 } 2202 2203 RTC_DCHECK(_outStreamFormat.mBytesPerFrame != 0); 2204 UInt32 size = 2205 outputData->mBuffers->mDataByteSize / _outStreamFormat.mBytesPerFrame; 2206 2207 // TODO(xians): signal an error somehow? 2208 err = AudioConverterFillComplexBuffer(_renderConverter, outConverterProc, 2209 this, &size, outputData, NULL); 2210 if (err != noErr) { 2211 if (err == 1) { 2212 // This is our own error. 2213 RTC_LOG(LS_ERROR) << "Error in AudioConverterFillComplexBuffer()"; 2214 return 1; 2215 } else { 2216 logCAMsg(LS_ERROR, "Error in AudioConverterFillComplexBuffer()", 2217 (const char*)&err); 2218 return 1; 2219 } 2220 } 2221 2222 ring_buffer_size_t bufSizeSamples = 2223 PaUtil_GetRingBufferReadAvailable(_paRenderBuffer); 2224 2225 int32_t renderDelayUs = 2226 static_cast<int32_t>(1e-3 * (outputTimeNs - nowNs) + 0.5); 2227 renderDelayUs += static_cast<int32_t>( 2228 (1.0e6 * bufSizeSamples) / _outDesiredFormat.mChannelsPerFrame / 2229 _outDesiredFormat.mSampleRate + 2230 0.5); 2231 2232 _renderDelayUs = renderDelayUs; 2233 2234 return 0; 2235 } 2236 2237 OSStatus AudioDeviceMac::implOutConverterProc(UInt32* numberDataPackets, 2238 AudioBufferList* data) { 2239 RTC_DCHECK(data->mNumberBuffers == 1); 2240 ring_buffer_size_t numSamples = 2241 *numberDataPackets * _outDesiredFormat.mChannelsPerFrame; 2242 2243 data->mBuffers->mNumberChannels = _outDesiredFormat.mChannelsPerFrame; 2244 // Always give the converter as much as it wants, zero padding as required. 2245 data->mBuffers->mDataByteSize = 2246 *numberDataPackets * _outDesiredFormat.mBytesPerPacket; 2247 data->mBuffers->mData = _renderConvertData; 2248 memset(_renderConvertData, 0, sizeof(_renderConvertData)); 2249 2250 PaUtil_ReadRingBuffer(_paRenderBuffer, _renderConvertData, numSamples); 2251 2252 kern_return_t kernErr = semaphore_signal_all(_renderSemaphore); 2253 if (kernErr != KERN_SUCCESS) { 2254 RTC_LOG(LS_ERROR) << "semaphore_signal_all() error: " << kernErr; 2255 return 1; 2256 } 2257 2258 return 0; 2259 } 2260 2261 OSStatus AudioDeviceMac::implInDeviceIOProc(const AudioBufferList* inputData, 2262 const AudioTimeStamp* inputTime) { 2263 OSStatus err = noErr; 2264 UInt64 inputTimeNs = AudioConvertHostTimeToNanos(inputTime->mHostTime); 2265 UInt64 nowNs = AudioConvertHostTimeToNanos(AudioGetCurrentHostTime()); 2266 2267 // Check if we should close down audio device 2268 // Double-checked locking optimization to remove locking overhead 2269 if (_doStopRec) { 2270 MutexLock lock(&mutex_); 2271 if (_doStopRec) { 2272 // This will be signalled only when a shared device is not in use. 2273 WEBRTC_CA_LOG_ERR(AudioDeviceStop(_inputDeviceID, _inDeviceIOProcID)); 2274 WEBRTC_CA_LOG_WARN( 2275 AudioDeviceDestroyIOProcID(_inputDeviceID, _inDeviceIOProcID)); 2276 if (err == noErr) { 2277 RTC_LOG(LS_VERBOSE) << "Recording device stopped"; 2278 } 2279 2280 _doStopRec = false; 2281 _stopEventRec.Set(); 2282 return 0; 2283 } 2284 } 2285 2286 if (!_recording) { 2287 // Allow above checks to avoid a timeout on stopping capture. 2288 return 0; 2289 } 2290 2291 ring_buffer_size_t bufSizeSamples = 2292 PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer); 2293 2294 int32_t captureDelayUs = 2295 static_cast<int32_t>(1e-3 * (nowNs - inputTimeNs) + 0.5); 2296 captureDelayUs += static_cast<int32_t>((1.0e6 * bufSizeSamples) / 2297 _inStreamFormat.mChannelsPerFrame / 2298 _inStreamFormat.mSampleRate + 2299 0.5); 2300 2301 _captureDelayUs = captureDelayUs; 2302 2303 RTC_DCHECK(inputData->mNumberBuffers == 1); 2304 ring_buffer_size_t numSamples = inputData->mBuffers->mDataByteSize * 2305 _inStreamFormat.mChannelsPerFrame / 2306 _inStreamFormat.mBytesPerPacket; 2307 PaUtil_WriteRingBuffer(_paCaptureBuffer, inputData->mBuffers->mData, 2308 numSamples); 2309 2310 kern_return_t kernErr = semaphore_signal_all(_captureSemaphore); 2311 if (kernErr != KERN_SUCCESS) { 2312 RTC_LOG(LS_ERROR) << "semaphore_signal_all() error: " << kernErr; 2313 } 2314 2315 return err; 2316 } 2317 2318 OSStatus AudioDeviceMac::implInConverterProc(UInt32* numberDataPackets, 2319 AudioBufferList* data) { 2320 RTC_DCHECK(data->mNumberBuffers == 1); 2321 ring_buffer_size_t numSamples = 2322 *numberDataPackets * _inStreamFormat.mChannelsPerFrame; 2323 2324 while (PaUtil_GetRingBufferReadAvailable(_paCaptureBuffer) < numSamples) { 2325 mach_timespec_t timeout; 2326 timeout.tv_sec = 0; 2327 timeout.tv_nsec = TIMER_PERIOD_MS; 2328 2329 kern_return_t kernErr = semaphore_timedwait(_captureSemaphore, timeout); 2330 if (kernErr == KERN_OPERATION_TIMED_OUT) { 2331 int32_t signal = _captureDeviceIsAlive; 2332 if (signal == 0) { 2333 // The capture device is no longer alive; stop the worker thread. 2334 *numberDataPackets = 0; 2335 return 1; 2336 } 2337 } else if (kernErr != KERN_SUCCESS) { 2338 RTC_LOG(LS_ERROR) << "semaphore_wait() error: " << kernErr; 2339 } 2340 } 2341 2342 // Pass the read pointer directly to the converter to avoid a memcpy. 2343 void* dummyPtr; 2344 ring_buffer_size_t dummySize; 2345 PaUtil_GetRingBufferReadRegions(_paCaptureBuffer, numSamples, 2346 &data->mBuffers->mData, &numSamples, 2347 &dummyPtr, &dummySize); 2348 PaUtil_AdvanceRingBufferReadIndex(_paCaptureBuffer, numSamples); 2349 2350 data->mBuffers->mNumberChannels = _inStreamFormat.mChannelsPerFrame; 2351 *numberDataPackets = numSamples / _inStreamFormat.mChannelsPerFrame; 2352 data->mBuffers->mDataByteSize = 2353 *numberDataPackets * _inStreamFormat.mBytesPerPacket; 2354 2355 return 0; 2356 } 2357 2358 bool AudioDeviceMac::RenderWorkerThread() { 2359 ring_buffer_size_t numSamples = 2360 ENGINE_PLAY_BUF_SIZE_IN_SAMPLES * _outDesiredFormat.mChannelsPerFrame; 2361 while (PaUtil_GetRingBufferWriteAvailable(_paRenderBuffer) - 2362 _renderDelayOffsetSamples < 2363 numSamples) { 2364 mach_timespec_t timeout; 2365 timeout.tv_sec = 0; 2366 timeout.tv_nsec = TIMER_PERIOD_MS; 2367 2368 kern_return_t kernErr = semaphore_timedwait(_renderSemaphore, timeout); 2369 if (kernErr == KERN_OPERATION_TIMED_OUT) { 2370 int32_t signal = _renderDeviceIsAlive; 2371 if (signal == 0) { 2372 // The render device is no longer alive; stop the worker thread. 2373 return false; 2374 } 2375 } else if (kernErr != KERN_SUCCESS) { 2376 RTC_LOG(LS_ERROR) << "semaphore_timedwait() error: " << kernErr; 2377 } 2378 } 2379 2380 int8_t playBuffer[4 * ENGINE_PLAY_BUF_SIZE_IN_SAMPLES]; 2381 2382 if (!_ptrAudioBuffer) { 2383 RTC_LOG(LS_ERROR) << "capture AudioBuffer is invalid"; 2384 return false; 2385 } 2386 2387 // Ask for new PCM data to be played out using the AudioDeviceBuffer. 2388 uint32_t nSamples = 2389 _ptrAudioBuffer->RequestPlayoutData(ENGINE_PLAY_BUF_SIZE_IN_SAMPLES); 2390 2391 nSamples = _ptrAudioBuffer->GetPlayoutData(playBuffer); 2392 if (nSamples != ENGINE_PLAY_BUF_SIZE_IN_SAMPLES) { 2393 RTC_LOG(LS_ERROR) << "invalid number of output samples(" << nSamples << ")"; 2394 } 2395 2396 uint32_t nOutSamples = nSamples * _outDesiredFormat.mChannelsPerFrame; 2397 2398 SInt16* pPlayBuffer = (SInt16*)&playBuffer; 2399 if (_macBookProPanRight && (_playChannels == 2)) { 2400 // Mix entirely into the right channel and zero the left channel. 2401 SInt32 sampleInt32 = 0; 2402 for (uint32_t sampleIdx = 0; sampleIdx < nOutSamples; sampleIdx += 2) { 2403 sampleInt32 = pPlayBuffer[sampleIdx]; 2404 sampleInt32 += pPlayBuffer[sampleIdx + 1]; 2405 sampleInt32 /= 2; 2406 2407 if (sampleInt32 > 32767) { 2408 sampleInt32 = 32767; 2409 } else if (sampleInt32 < -32768) { 2410 sampleInt32 = -32768; 2411 } 2412 2413 pPlayBuffer[sampleIdx] = 0; 2414 pPlayBuffer[sampleIdx + 1] = static_cast<SInt16>(sampleInt32); 2415 } 2416 } 2417 2418 PaUtil_WriteRingBuffer(_paRenderBuffer, pPlayBuffer, nOutSamples); 2419 2420 return true; 2421 } 2422 2423 bool AudioDeviceMac::CaptureWorkerThread() { 2424 OSStatus err = noErr; 2425 UInt32 noRecSamples = 2426 ENGINE_REC_BUF_SIZE_IN_SAMPLES * _inDesiredFormat.mChannelsPerFrame; 2427 std::vector<SInt16> recordBuffer(noRecSamples); 2428 UInt32 size = ENGINE_REC_BUF_SIZE_IN_SAMPLES; 2429 2430 AudioBufferList engineBuffer; 2431 engineBuffer.mNumberBuffers = 1; // Interleaved channels. 2432 engineBuffer.mBuffers->mNumberChannels = _inDesiredFormat.mChannelsPerFrame; 2433 engineBuffer.mBuffers->mDataByteSize = 2434 _inDesiredFormat.mBytesPerPacket * noRecSamples; 2435 engineBuffer.mBuffers->mData = recordBuffer.data(); 2436 2437 err = AudioConverterFillComplexBuffer(_captureConverter, inConverterProc, 2438 this, &size, &engineBuffer, NULL); 2439 if (err != noErr) { 2440 if (err == 1) { 2441 // This is our own error. 2442 return false; 2443 } else { 2444 logCAMsg(LS_ERROR, "Error in AudioConverterFillComplexBuffer()", 2445 (const char*)&err); 2446 return false; 2447 } 2448 } 2449 2450 // TODO(xians): what if the returned size is incorrect? 2451 if (size == ENGINE_REC_BUF_SIZE_IN_SAMPLES) { 2452 int32_t msecOnPlaySide; 2453 int32_t msecOnRecordSide; 2454 2455 int32_t captureDelayUs = _captureDelayUs; 2456 int32_t renderDelayUs = _renderDelayUs; 2457 2458 msecOnPlaySide = 2459 static_cast<int32_t>(1e-3 * (renderDelayUs + _renderLatencyUs) + 0.5); 2460 msecOnRecordSide = 2461 static_cast<int32_t>(1e-3 * (captureDelayUs + _captureLatencyUs) + 0.5); 2462 2463 if (!_ptrAudioBuffer) { 2464 RTC_LOG(LS_ERROR) << "capture AudioBuffer is invalid"; 2465 return false; 2466 } 2467 2468 // store the recorded buffer (no action will be taken if the 2469 // #recorded samples is not a full buffer) 2470 _ptrAudioBuffer->SetRecordedBuffer((int8_t*)recordBuffer.data(), 2471 (uint32_t)size); 2472 _ptrAudioBuffer->SetVQEData(msecOnPlaySide, msecOnRecordSide); 2473 _ptrAudioBuffer->SetTypingStatus(KeyPressed()); 2474 2475 // deliver recorded samples at specified sample rate, mic level etc. 2476 // to the observer using callback 2477 _ptrAudioBuffer->DeliverRecordedData(); 2478 } 2479 2480 return true; 2481 } 2482 2483 bool AudioDeviceMac::KeyPressed() { 2484 bool key_down = false; 2485 // Loop through all Mac virtual key constant values. 2486 for (unsigned int key_index = 0; key_index < std::size(prev_key_state_); 2487 ++key_index) { 2488 bool keyState = 2489 CGEventSourceKeyState(kCGEventSourceStateHIDSystemState, key_index); 2490 // A false -> true change in keymap means a key is pressed. 2491 key_down |= (keyState && !prev_key_state_[key_index]); 2492 // Save current state. 2493 prev_key_state_[key_index] = keyState; 2494 } 2495 return key_down; 2496 } 2497 } // namespace webrtc