1/*
2 * Copyright (C) 2010 Google Inc. All rights reserved.
3 * Copyright (C) 2016 Apple Inc. All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
15 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
16 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
17 * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
18 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
19 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
20 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
21 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
23 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#include "config.h"
27
28#if ENABLE(WEB_AUDIO)
29
30#include "AudioContext.h"
31
32#include "AnalyserNode.h"
33#include "AsyncAudioDecoder.h"
34#include "AudioBuffer.h"
35#include "AudioBufferCallback.h"
36#include "AudioBufferSourceNode.h"
37#include "AudioListener.h"
38#include "AudioNodeInput.h"
39#include "AudioNodeOutput.h"
40#include "AudioSession.h"
41#include "BiquadFilterNode.h"
42#include "ChannelMergerNode.h"
43#include "ChannelSplitterNode.h"
44#include "ConvolverNode.h"
45#include "DefaultAudioDestinationNode.h"
46#include "DelayNode.h"
47#include "Document.h"
48#include "DynamicsCompressorNode.h"
49#include "EventNames.h"
50#include "FFTFrame.h"
51#include "Frame.h"
52#include "FrameLoader.h"
53#include "GainNode.h"
54#include "GenericEventQueue.h"
55#include "HRTFDatabaseLoader.h"
56#include "HRTFPanner.h"
57#include "JSDOMPromiseDeferred.h"
58#include "Logging.h"
59#include "NetworkingContext.h"
60#include "OfflineAudioCompletionEvent.h"
61#include "OfflineAudioDestinationNode.h"
62#include "OscillatorNode.h"
63#include "Page.h"
64#include "PannerNode.h"
65#include "PeriodicWave.h"
66#include "ScriptController.h"
67#include "ScriptProcessorNode.h"
68#include "WaveShaperNode.h"
69#include <JavaScriptCore/ScriptCallStack.h>
70
71#if ENABLE(MEDIA_STREAM)
72#include "MediaStream.h"
73#include "MediaStreamAudioDestinationNode.h"
74#include "MediaStreamAudioSource.h"
75#include "MediaStreamAudioSourceNode.h"
76#endif
77
78#if ENABLE(VIDEO)
79#include "HTMLMediaElement.h"
80#include "MediaElementAudioSourceNode.h"
81#endif
82
83#if DEBUG_AUDIONODE_REFERENCES
84#include <stdio.h>
85#endif
86
87#if USE(GSTREAMER)
88#include "GStreamerCommon.h"
89#endif
90
91#if PLATFORM(IOS_FAMILY)
92#include "ScriptController.h"
93#include "Settings.h"
94#endif
95
96#include <JavaScriptCore/ArrayBuffer.h>
97#include <wtf/Atomics.h>
98#include <wtf/IsoMallocInlines.h>
99#include <wtf/MainThread.h>
100#include <wtf/Ref.h>
101#include <wtf/RefCounted.h>
102#include <wtf/text/WTFString.h>
103
104const unsigned MaxPeriodicWaveLength = 4096;
105
106namespace WebCore {
107
108WTF_MAKE_ISO_ALLOCATED_IMPL(AudioContext);
109
110#define RELEASE_LOG_IF_ALLOWED(fmt, ...) RELEASE_LOG_IF(document() && document()->page() && document()->page()->isAlwaysOnLoggingAllowed(), Media, "%p - AudioContext::" fmt, this, ##__VA_ARGS__)
111
112bool AudioContext::isSampleRateRangeGood(float sampleRate)
113{
114 // FIXME: It would be nice if the minimum sample-rate could be less than 44.1KHz,
115 // but that will require some fixes in HRTFPanner::fftSizeForSampleRate(), and some testing there.
116 return sampleRate >= 44100 && sampleRate <= 96000;
117}
118
119// Don't allow more than this number of simultaneous AudioContexts talking to hardware.
120const unsigned MaxHardwareContexts = 4;
121unsigned AudioContext::s_hardwareContextCount = 0;
122
123RefPtr<AudioContext> AudioContext::create(Document& document)
124{
125 ASSERT(isMainThread());
126 if (s_hardwareContextCount >= MaxHardwareContexts)
127 return nullptr;
128
129 RefPtr<AudioContext> audioContext(adoptRef(new AudioContext(document)));
130 audioContext->suspendIfNeeded();
131 return audioContext;
132}
133
134// Constructor for rendering to the audio hardware.
135AudioContext::AudioContext(Document& document)
136 : ActiveDOMObject(document)
137#if !RELEASE_LOG_DISABLED
138 , m_logger(document.logger())
139 , m_logIdentifier(uniqueLogIdentifier())
140#endif
141 , m_mediaSession(PlatformMediaSession::create(*this))
142 , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
143{
144 constructCommon();
145
146 m_destinationNode = DefaultAudioDestinationNode::create(*this);
147
148 // Initialize the destination node's muted state to match the page's current muted state.
149 pageMutedStateDidChange();
150
151 document.addAudioProducer(*this);
152 document.registerForVisibilityStateChangedCallbacks(*this);
153}
154
155// Constructor for offline (non-realtime) rendering.
156AudioContext::AudioContext(Document& document, unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
157 : ActiveDOMObject(document)
158#if !RELEASE_LOG_DISABLED
159 , m_logger(document.logger())
160 , m_logIdentifier(uniqueLogIdentifier())
161#endif
162 , m_isOfflineContext(true)
163 , m_mediaSession(PlatformMediaSession::create(*this))
164 , m_eventQueue(std::make_unique<GenericEventQueue>(*this))
165{
166 constructCommon();
167
168 // Create a new destination for offline rendering.
169 m_renderTarget = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
170 m_destinationNode = OfflineAudioDestinationNode::create(*this, m_renderTarget.get());
171}
172
173void AudioContext::constructCommon()
174{
175 // According to spec AudioContext must die only after page navigate.
176 // Lets mark it as ActiveDOMObject with pending activity and unmark it in clear method.
177 setPendingActivity(*this);
178
179 FFTFrame::initialize();
180
181 m_listener = AudioListener::create();
182
183 ASSERT(document());
184 if (document()->audioPlaybackRequiresUserGesture())
185 addBehaviorRestriction(RequireUserGestureForAudioStartRestriction);
186 else
187 m_restrictions = NoRestrictions;
188
189#if PLATFORM(COCOA)
190 addBehaviorRestriction(RequirePageConsentForAudioStartRestriction);
191#endif
192}
193
194AudioContext::~AudioContext()
195{
196#if DEBUG_AUDIONODE_REFERENCES
197 fprintf(stderr, "%p: AudioContext::~AudioContext()\n", this);
198#endif
199 ASSERT(!m_isInitialized);
200 ASSERT(m_isStopScheduled);
201 ASSERT(m_nodesToDelete.isEmpty());
202 ASSERT(m_referencedNodes.isEmpty());
203 ASSERT(m_finishedNodes.isEmpty()); // FIXME (bug 105870): This assertion fails on tests sometimes.
204 ASSERT(m_automaticPullNodes.isEmpty());
205 if (m_automaticPullNodesNeedUpdating)
206 m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
207 ASSERT(m_renderingAutomaticPullNodes.isEmpty());
208 // FIXME: Can we assert that m_deferredFinishDerefList is empty?
209
210 if (!isOfflineContext() && scriptExecutionContext()) {
211 document()->removeAudioProducer(*this);
212 document()->unregisterForVisibilityStateChangedCallbacks(*this);
213 }
214}
215
216void AudioContext::lazyInitialize()
217{
218 ASSERT(!m_isStopScheduled);
219
220 if (m_isInitialized)
221 return;
222
223 // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
224 ASSERT(!m_isAudioThreadFinished);
225 if (m_isAudioThreadFinished)
226 return;
227
228 if (m_destinationNode) {
229 m_destinationNode->initialize();
230
231 if (!isOfflineContext()) {
232 // This starts the audio thread. The destination node's provideInput() method will now be called repeatedly to render audio.
233 // Each time provideInput() is called, a portion of the audio stream is rendered. Let's call this time period a "render quantum".
234 // NOTE: for now default AudioContext does not need an explicit startRendering() call from JavaScript.
235 // We may want to consider requiring it for symmetry with OfflineAudioContext.
236 startRendering();
237 ++s_hardwareContextCount;
238 }
239 }
240 m_isInitialized = true;
241}
242
243void AudioContext::clear()
244{
245 // We have to release our reference to the destination node before the context will ever be deleted since the destination node holds a reference to the context.
246 if (m_destinationNode)
247 m_destinationNode = nullptr;
248
249 // Audio thread is dead. Nobody will schedule node deletion action. Let's do it ourselves.
250 do {
251 deleteMarkedNodes();
252 m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
253 m_nodesMarkedForDeletion.clear();
254 } while (m_nodesToDelete.size());
255
256 // It was set in constructCommon.
257 unsetPendingActivity(*this);
258}
259
260void AudioContext::uninitialize()
261{
262 ALWAYS_LOG(LOGIDENTIFIER);
263
264 ASSERT(isMainThread());
265
266 if (!m_isInitialized)
267 return;
268
269 // This stops the audio thread and all audio rendering.
270 if (m_destinationNode)
271 m_destinationNode->uninitialize();
272
273 // Don't allow the context to initialize a second time after it's already been explicitly uninitialized.
274 m_isAudioThreadFinished = true;
275
276 if (!isOfflineContext()) {
277 ASSERT(s_hardwareContextCount);
278 --s_hardwareContextCount;
279
280 // Offline contexts move to 'Closed' state when dispatching the completion event.
281 setState(State::Closed);
282 }
283
284 // Get rid of the sources which may still be playing.
285 derefUnfinishedSourceNodes();
286
287 m_isInitialized = false;
288}
289
290bool AudioContext::isInitialized() const
291{
292 return m_isInitialized;
293}
294
295void AudioContext::addReaction(State state, DOMPromiseDeferred<void>&& promise)
296{
297 size_t stateIndex = static_cast<size_t>(state);
298 if (stateIndex >= m_stateReactions.size())
299 m_stateReactions.grow(stateIndex + 1);
300
301 m_stateReactions[stateIndex].append(WTFMove(promise));
302}
303
304void AudioContext::setState(State state)
305{
306 if (m_state == state)
307 return;
308
309 m_state = state;
310 m_eventQueue->enqueueEvent(Event::create(eventNames().statechangeEvent, Event::CanBubble::Yes, Event::IsCancelable::No));
311
312 size_t stateIndex = static_cast<size_t>(state);
313 if (stateIndex >= m_stateReactions.size())
314 return;
315
316 Vector<DOMPromiseDeferred<void>> reactions;
317 m_stateReactions[stateIndex].swap(reactions);
318
319 for (auto& promise : reactions)
320 promise.resolve();
321}
322
323void AudioContext::stop()
324{
325 ALWAYS_LOG(LOGIDENTIFIER);
326
327 ASSERT(isMainThread());
328
329 // Usually ScriptExecutionContext calls stop twice.
330 if (m_isStopScheduled)
331 return;
332 m_isStopScheduled = true;
333
334 ASSERT(document());
335 document()->updateIsPlayingMedia();
336
337 m_eventQueue->close();
338
339 uninitialize();
340 clear();
341}
342
343bool AudioContext::canSuspendForDocumentSuspension() const
344{
345 // FIXME: We should be able to suspend while rendering as well with some more code.
346 return m_state == State::Suspended || m_state == State::Closed;
347}
348
349const char* AudioContext::activeDOMObjectName() const
350{
351 return "AudioContext";
352}
353
354Document* AudioContext::document() const
355{
356 return downcast<Document>(m_scriptExecutionContext);
357}
358
359Document* AudioContext::hostingDocument() const
360{
361 return downcast<Document>(m_scriptExecutionContext);
362}
363
364String AudioContext::sourceApplicationIdentifier() const
365{
366 Document* document = this->document();
367 if (Frame* frame = document ? document->frame() : nullptr) {
368 if (NetworkingContext* networkingContext = frame->loader().networkingContext())
369 return networkingContext->sourceApplicationIdentifier();
370 }
371 return emptyString();
372}
373
374bool AudioContext::processingUserGestureForMedia() const
375{
376 return document() ? document()->processingUserGestureForMedia() : false;
377}
378
379bool AudioContext::isSuspended() const
380{
381 return !document() || document()->activeDOMObjectsAreSuspended() || document()->activeDOMObjectsAreStopped();
382}
383
384void AudioContext::visibilityStateChanged()
385{
386 // Do not suspend if audio is audible.
387 if (!document() || mediaState() == MediaProducer::IsPlayingAudio || m_isStopScheduled)
388 return;
389
390 if (document()->hidden()) {
391 if (state() == State::Running) {
392 RELEASE_LOG_IF_ALLOWED("visibilityStateChanged() Suspending playback after going to the background");
393 m_mediaSession->beginInterruption(PlatformMediaSession::EnteringBackground);
394 }
395 } else {
396 if (state() == State::Interrupted) {
397 RELEASE_LOG_IF_ALLOWED("visibilityStateChanged() Resuming playback after entering foreground");
398 m_mediaSession->endInterruption(PlatformMediaSession::MayResumePlaying);
399 }
400 }
401}
402
403bool AudioContext::wouldTaintOrigin(const URL& url) const
404{
405 if (url.protocolIsData())
406 return false;
407
408 if (auto* document = this->document())
409 return !document->securityOrigin().canRequest(url);
410
411 return false;
412}
413
414ExceptionOr<Ref<AudioBuffer>> AudioContext::createBuffer(unsigned numberOfChannels, size_t numberOfFrames, float sampleRate)
415{
416 auto audioBuffer = AudioBuffer::create(numberOfChannels, numberOfFrames, sampleRate);
417 if (!audioBuffer)
418 return Exception { NotSupportedError };
419 return audioBuffer.releaseNonNull();
420}
421
422ExceptionOr<Ref<AudioBuffer>> AudioContext::createBuffer(ArrayBuffer& arrayBuffer, bool mixToMono)
423{
424 auto audioBuffer = AudioBuffer::createFromAudioFileData(arrayBuffer.data(), arrayBuffer.byteLength(), mixToMono, sampleRate());
425 if (!audioBuffer)
426 return Exception { SyntaxError };
427 return audioBuffer.releaseNonNull();
428}
429
430void AudioContext::decodeAudioData(Ref<ArrayBuffer>&& audioData, RefPtr<AudioBufferCallback>&& successCallback, RefPtr<AudioBufferCallback>&& errorCallback)
431{
432 m_audioDecoder.decodeAsync(WTFMove(audioData), sampleRate(), WTFMove(successCallback), WTFMove(errorCallback));
433}
434
435ExceptionOr<Ref<AudioBufferSourceNode>> AudioContext::createBufferSource()
436{
437 ALWAYS_LOG(LOGIDENTIFIER);
438
439 ASSERT(isMainThread());
440
441 if (m_isStopScheduled)
442 return Exception { InvalidStateError };
443
444 lazyInitialize();
445 Ref<AudioBufferSourceNode> node = AudioBufferSourceNode::create(*this, sampleRate());
446
447 // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
448 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
449 refNode(node);
450
451 return node;
452}
453
454#if ENABLE(VIDEO)
455
456ExceptionOr<Ref<MediaElementAudioSourceNode>> AudioContext::createMediaElementSource(HTMLMediaElement& mediaElement)
457{
458 ALWAYS_LOG(LOGIDENTIFIER);
459
460 ASSERT(isMainThread());
461
462 if (m_isStopScheduled || mediaElement.audioSourceNode())
463 return Exception { InvalidStateError };
464
465 lazyInitialize();
466
467 auto node = MediaElementAudioSourceNode::create(*this, mediaElement);
468
469 mediaElement.setAudioSourceNode(node.ptr());
470
471 refNode(node.get()); // context keeps reference until node is disconnected
472 return node;
473}
474
475#endif
476
477#if ENABLE(MEDIA_STREAM)
478
479ExceptionOr<Ref<MediaStreamAudioSourceNode>> AudioContext::createMediaStreamSource(MediaStream& mediaStream)
480{
481 ALWAYS_LOG(LOGIDENTIFIER);
482
483 ASSERT(isMainThread());
484
485 if (m_isStopScheduled)
486 return Exception { InvalidStateError };
487
488 auto audioTracks = mediaStream.getAudioTracks();
489 if (audioTracks.isEmpty())
490 return Exception { InvalidStateError };
491
492 MediaStreamTrack* providerTrack = nullptr;
493 for (auto& track : audioTracks) {
494 if (track->audioSourceProvider()) {
495 providerTrack = track.get();
496 break;
497 }
498 }
499 if (!providerTrack)
500 return Exception { InvalidStateError };
501
502 lazyInitialize();
503
504 auto node = MediaStreamAudioSourceNode::create(*this, mediaStream, *providerTrack);
505 node->setFormat(2, sampleRate());
506
507 refNode(node); // context keeps reference until node is disconnected
508 return node;
509}
510
511ExceptionOr<Ref<MediaStreamAudioDestinationNode>> AudioContext::createMediaStreamDestination()
512{
513 if (m_isStopScheduled)
514 return Exception { InvalidStateError };
515
516 // FIXME: Add support for an optional argument which specifies the number of channels.
517 // FIXME: The default should probably be stereo instead of mono.
518 return MediaStreamAudioDestinationNode::create(*this, 1);
519}
520
521#endif
522
523ExceptionOr<Ref<ScriptProcessorNode>> AudioContext::createScriptProcessor(size_t bufferSize, size_t numberOfInputChannels, size_t numberOfOutputChannels)
524{
525 ALWAYS_LOG(LOGIDENTIFIER);
526
527 ASSERT(isMainThread());
528
529 if (m_isStopScheduled)
530 return Exception { InvalidStateError };
531
532 lazyInitialize();
533
534 // W3C Editor's Draft 06 June 2017
535 // https://webaudio.github.io/web-audio-api/#widl-BaseAudioContext-createScriptProcessor-ScriptProcessorNode-unsigned-long-bufferSize-unsigned-long-numberOfInputChannels-unsigned-long-numberOfOutputChannels
536
537 // The bufferSize parameter determines the buffer size in units of sample-frames. If it's not passed in,
538 // or if the value is 0, then the implementation will choose the best buffer size for the given environment,
539 // which will be constant power of 2 throughout the lifetime of the node. ... If the value of this parameter
540 // is not one of the allowed power-of-2 values listed above, an IndexSizeError must be thrown.
541 switch (bufferSize) {
542 case 0:
543#if USE(AUDIO_SESSION)
544 // Pick a value between 256 (2^8) and 16384 (2^14), based on the buffer size of the current AudioSession:
545 bufferSize = 1 << std::max<size_t>(8, std::min<size_t>(14, std::log2(AudioSession::sharedSession().bufferSize())));
546#else
547 bufferSize = 2048;
548#endif
549 break;
550 case 256:
551 case 512:
552 case 1024:
553 case 2048:
554 case 4096:
555 case 8192:
556 case 16384:
557 break;
558 default:
559 return Exception { IndexSizeError };
560 }
561
562 // An IndexSizeError exception must be thrown if bufferSize or numberOfInputChannels or numberOfOutputChannels
563 // are outside the valid range. It is invalid for both numberOfInputChannels and numberOfOutputChannels to be zero.
564 // In this case an IndexSizeError must be thrown.
565
566 if (!numberOfInputChannels && !numberOfOutputChannels)
567 return Exception { NotSupportedError };
568
569 // This parameter [numberOfInputChannels] determines the number of channels for this node's input. Values of
570 // up to 32 must be supported. A NotSupportedError must be thrown if the number of channels is not supported.
571
572 if (numberOfInputChannels > maxNumberOfChannels())
573 return Exception { NotSupportedError };
574
575 // This parameter [numberOfOutputChannels] determines the number of channels for this node's output. Values of
576 // up to 32 must be supported. A NotSupportedError must be thrown if the number of channels is not supported.
577
578 if (numberOfOutputChannels > maxNumberOfChannels())
579 return Exception { NotSupportedError };
580
581 auto node = ScriptProcessorNode::create(*this, sampleRate(), bufferSize, numberOfInputChannels, numberOfOutputChannels);
582
583 refNode(node); // context keeps reference until we stop making javascript rendering callbacks
584 return node;
585}
586
587ExceptionOr<Ref<BiquadFilterNode>> AudioContext::createBiquadFilter()
588{
589 ALWAYS_LOG(LOGIDENTIFIER);
590
591 ASSERT(isMainThread());
592 if (m_isStopScheduled)
593 return Exception { InvalidStateError };
594
595 lazyInitialize();
596
597 return BiquadFilterNode::create(*this, sampleRate());
598}
599
600ExceptionOr<Ref<WaveShaperNode>> AudioContext::createWaveShaper()
601{
602 ALWAYS_LOG(LOGIDENTIFIER);
603
604 ASSERT(isMainThread());
605 if (m_isStopScheduled)
606 return Exception { InvalidStateError };
607
608 lazyInitialize();
609 return WaveShaperNode::create(*this);
610}
611
612ExceptionOr<Ref<PannerNode>> AudioContext::createPanner()
613{
614 ALWAYS_LOG(LOGIDENTIFIER);
615
616 ASSERT(isMainThread());
617 if (m_isStopScheduled)
618 return Exception { InvalidStateError };
619
620 lazyInitialize();
621 return PannerNode::create(*this, sampleRate());
622}
623
624ExceptionOr<Ref<ConvolverNode>> AudioContext::createConvolver()
625{
626 ALWAYS_LOG(LOGIDENTIFIER);
627
628 ASSERT(isMainThread());
629 if (m_isStopScheduled)
630 return Exception { InvalidStateError };
631
632 lazyInitialize();
633 return ConvolverNode::create(*this, sampleRate());
634}
635
636ExceptionOr<Ref<DynamicsCompressorNode>> AudioContext::createDynamicsCompressor()
637{
638 ALWAYS_LOG(LOGIDENTIFIER);
639
640 ASSERT(isMainThread());
641 if (m_isStopScheduled)
642 return Exception { InvalidStateError };
643
644 lazyInitialize();
645 return DynamicsCompressorNode::create(*this, sampleRate());
646}
647
648ExceptionOr<Ref<AnalyserNode>> AudioContext::createAnalyser()
649{
650 ALWAYS_LOG(LOGIDENTIFIER);
651
652 ASSERT(isMainThread());
653 if (m_isStopScheduled)
654 return Exception { InvalidStateError };
655
656 lazyInitialize();
657 return AnalyserNode::create(*this, sampleRate());
658}
659
660ExceptionOr<Ref<GainNode>> AudioContext::createGain()
661{
662 ALWAYS_LOG(LOGIDENTIFIER);
663
664 ASSERT(isMainThread());
665 if (m_isStopScheduled)
666 return Exception { InvalidStateError };
667
668 lazyInitialize();
669 return GainNode::create(*this, sampleRate());
670}
671
672ExceptionOr<Ref<DelayNode>> AudioContext::createDelay(double maxDelayTime)
673{
674 ALWAYS_LOG(LOGIDENTIFIER);
675
676 ASSERT(isMainThread());
677 if (m_isStopScheduled)
678 return Exception { InvalidStateError };
679
680 lazyInitialize();
681 return DelayNode::create(*this, sampleRate(), maxDelayTime);
682}
683
684ExceptionOr<Ref<ChannelSplitterNode>> AudioContext::createChannelSplitter(size_t numberOfOutputs)
685{
686 ALWAYS_LOG(LOGIDENTIFIER);
687
688 ASSERT(isMainThread());
689 if (m_isStopScheduled)
690 return Exception { InvalidStateError };
691
692 lazyInitialize();
693 auto node = ChannelSplitterNode::create(*this, sampleRate(), numberOfOutputs);
694 if (!node)
695 return Exception { IndexSizeError };
696 return node.releaseNonNull();
697}
698
699ExceptionOr<Ref<ChannelMergerNode>> AudioContext::createChannelMerger(size_t numberOfInputs)
700{
701 ALWAYS_LOG(LOGIDENTIFIER);
702
703 ASSERT(isMainThread());
704 if (m_isStopScheduled)
705 return Exception { InvalidStateError };
706
707 lazyInitialize();
708 auto node = ChannelMergerNode::create(*this, sampleRate(), numberOfInputs);
709 if (!node)
710 return Exception { IndexSizeError };
711 return node.releaseNonNull();
712}
713
714ExceptionOr<Ref<OscillatorNode>> AudioContext::createOscillator()
715{
716 ALWAYS_LOG(LOGIDENTIFIER);
717
718 ASSERT(isMainThread());
719 if (m_isStopScheduled)
720 return Exception { InvalidStateError };
721
722 lazyInitialize();
723
724 Ref<OscillatorNode> node = OscillatorNode::create(*this, sampleRate());
725
726 // Because this is an AudioScheduledSourceNode, the context keeps a reference until it has finished playing.
727 // When this happens, AudioScheduledSourceNode::finish() calls AudioContext::notifyNodeFinishedProcessing().
728 refNode(node);
729
730 return node;
731}
732
733ExceptionOr<Ref<PeriodicWave>> AudioContext::createPeriodicWave(Float32Array& real, Float32Array& imaginary)
734{
735 ALWAYS_LOG(LOGIDENTIFIER);
736
737 ASSERT(isMainThread());
738 if (m_isStopScheduled)
739 return Exception { InvalidStateError };
740
741 if (real.length() != imaginary.length() || (real.length() > MaxPeriodicWaveLength) || !real.length())
742 return Exception { IndexSizeError };
743 lazyInitialize();
744 return PeriodicWave::create(sampleRate(), real, imaginary);
745}
746
747void AudioContext::notifyNodeFinishedProcessing(AudioNode* node)
748{
749 ASSERT(isAudioThread());
750 m_finishedNodes.append(node);
751}
752
753void AudioContext::derefFinishedSourceNodes()
754{
755 ASSERT(isGraphOwner());
756 ASSERT(isAudioThread() || isAudioThreadFinished());
757 for (auto& node : m_finishedNodes)
758 derefNode(*node);
759
760 m_finishedNodes.clear();
761}
762
763void AudioContext::refNode(AudioNode& node)
764{
765 ASSERT(isMainThread());
766 AutoLocker locker(*this);
767
768 node.ref(AudioNode::RefTypeConnection);
769 m_referencedNodes.append(&node);
770}
771
772void AudioContext::derefNode(AudioNode& node)
773{
774 ASSERT(isGraphOwner());
775
776 node.deref(AudioNode::RefTypeConnection);
777
778 ASSERT(m_referencedNodes.contains(&node));
779 m_referencedNodes.removeFirst(&node);
780}
781
782void AudioContext::derefUnfinishedSourceNodes()
783{
784 ASSERT(isMainThread() && isAudioThreadFinished());
785 for (auto& node : m_referencedNodes)
786 node->deref(AudioNode::RefTypeConnection);
787
788 m_referencedNodes.clear();
789}
790
791void AudioContext::lock(bool& mustReleaseLock)
792{
793 // Don't allow regular lock in real-time audio thread.
794 ASSERT(isMainThread());
795
796 Thread& thisThread = Thread::current();
797
798 if (&thisThread == m_graphOwnerThread) {
799 // We already have the lock.
800 mustReleaseLock = false;
801 } else {
802 // Acquire the lock.
803 m_contextGraphMutex.lock();
804 m_graphOwnerThread = &thisThread;
805 mustReleaseLock = true;
806 }
807}
808
809bool AudioContext::tryLock(bool& mustReleaseLock)
810{
811 Thread& thisThread = Thread::current();
812 bool isAudioThread = &thisThread == audioThread();
813
814 // Try to catch cases of using try lock on main thread - it should use regular lock.
815 ASSERT(isAudioThread || isAudioThreadFinished());
816
817 if (!isAudioThread) {
818 // In release build treat tryLock() as lock() (since above ASSERT(isAudioThread) never fires) - this is the best we can do.
819 lock(mustReleaseLock);
820 return true;
821 }
822
823 bool hasLock;
824
825 if (&thisThread == m_graphOwnerThread) {
826 // Thread already has the lock.
827 hasLock = true;
828 mustReleaseLock = false;
829 } else {
830 // Don't already have the lock - try to acquire it.
831 hasLock = m_contextGraphMutex.tryLock();
832
833 if (hasLock)
834 m_graphOwnerThread = &thisThread;
835
836 mustReleaseLock = hasLock;
837 }
838
839 return hasLock;
840}
841
842void AudioContext::unlock()
843{
844 ASSERT(m_graphOwnerThread == &Thread::current());
845
846 m_graphOwnerThread = nullptr;
847 m_contextGraphMutex.unlock();
848}
849
850bool AudioContext::isAudioThread() const
851{
852 return m_audioThread == &Thread::current();
853}
854
855bool AudioContext::isGraphOwner() const
856{
857 return m_graphOwnerThread == &Thread::current();
858}
859
860void AudioContext::addDeferredFinishDeref(AudioNode* node)
861{
862 ASSERT(isAudioThread());
863 m_deferredFinishDerefList.append(node);
864}
865
866void AudioContext::handlePreRenderTasks()
867{
868 ASSERT(isAudioThread());
869
870 // At the beginning of every render quantum, try to update the internal rendering graph state (from main thread changes).
871 // It's OK if the tryLock() fails, we'll just take slightly longer to pick up the changes.
872 bool mustReleaseLock;
873 if (tryLock(mustReleaseLock)) {
874 // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
875 handleDirtyAudioSummingJunctions();
876 handleDirtyAudioNodeOutputs();
877
878 updateAutomaticPullNodes();
879
880 if (mustReleaseLock)
881 unlock();
882 }
883}
884
885void AudioContext::handlePostRenderTasks()
886{
887 ASSERT(isAudioThread());
888
889 // Must use a tryLock() here too. Don't worry, the lock will very rarely be contended and this method is called frequently.
890 // The worst that can happen is that there will be some nodes which will take slightly longer than usual to be deleted or removed
891 // from the render graph (in which case they'll render silence).
892 bool mustReleaseLock;
893 if (tryLock(mustReleaseLock)) {
894 // Take care of finishing any derefs where the tryLock() failed previously.
895 handleDeferredFinishDerefs();
896
897 // Dynamically clean up nodes which are no longer needed.
898 derefFinishedSourceNodes();
899
900 // Don't delete in the real-time thread. Let the main thread do it.
901 // Ref-counted objects held by certain AudioNodes may not be thread-safe.
902 scheduleNodeDeletion();
903
904 // Fixup the state of any dirty AudioSummingJunctions and AudioNodeOutputs.
905 handleDirtyAudioSummingJunctions();
906 handleDirtyAudioNodeOutputs();
907
908 updateAutomaticPullNodes();
909
910 if (mustReleaseLock)
911 unlock();
912 }
913}
914
915void AudioContext::handleDeferredFinishDerefs()
916{
917 ASSERT(isAudioThread() && isGraphOwner());
918 for (auto& node : m_deferredFinishDerefList)
919 node->finishDeref(AudioNode::RefTypeConnection);
920
921 m_deferredFinishDerefList.clear();
922}
923
924void AudioContext::markForDeletion(AudioNode& node)
925{
926 ASSERT(isGraphOwner());
927
928 if (isAudioThreadFinished())
929 m_nodesToDelete.append(&node);
930 else
931 m_nodesMarkedForDeletion.append(&node);
932
933 // This is probably the best time for us to remove the node from automatic pull list,
934 // since all connections are gone and we hold the graph lock. Then when handlePostRenderTasks()
935 // gets a chance to schedule the deletion work, updateAutomaticPullNodes() also gets a chance to
936 // modify m_renderingAutomaticPullNodes.
937 removeAutomaticPullNode(node);
938}
939
940void AudioContext::scheduleNodeDeletion()
941{
942 bool isGood = m_isInitialized && isGraphOwner();
943 ASSERT(isGood);
944 if (!isGood)
945 return;
946
947 // Make sure to call deleteMarkedNodes() on main thread.
948 if (m_nodesMarkedForDeletion.size() && !m_isDeletionScheduled) {
949 m_nodesToDelete.appendVector(m_nodesMarkedForDeletion);
950 m_nodesMarkedForDeletion.clear();
951
952 m_isDeletionScheduled = true;
953
954 callOnMainThread([protectedThis = makeRef(*this)]() mutable {
955 protectedThis->deleteMarkedNodes();
956 });
957 }
958}
959
960void AudioContext::deleteMarkedNodes()
961{
962 ASSERT(isMainThread());
963
964 // Protect this object from being deleted before we release the mutex locked by AutoLocker.
965 Ref<AudioContext> protectedThis(*this);
966 {
967 AutoLocker locker(*this);
968
969 while (m_nodesToDelete.size()) {
970 AudioNode* node = m_nodesToDelete.takeLast();
971
972 // Before deleting the node, clear out any AudioNodeInputs from m_dirtySummingJunctions.
973 unsigned numberOfInputs = node->numberOfInputs();
974 for (unsigned i = 0; i < numberOfInputs; ++i)
975 m_dirtySummingJunctions.remove(node->input(i));
976
977 // Before deleting the node, clear out any AudioNodeOutputs from m_dirtyAudioNodeOutputs.
978 unsigned numberOfOutputs = node->numberOfOutputs();
979 for (unsigned i = 0; i < numberOfOutputs; ++i)
980 m_dirtyAudioNodeOutputs.remove(node->output(i));
981
982 // Finally, delete it.
983 delete node;
984 }
985 m_isDeletionScheduled = false;
986 }
987}
988
989void AudioContext::markSummingJunctionDirty(AudioSummingJunction* summingJunction)
990{
991 ASSERT(isGraphOwner());
992 m_dirtySummingJunctions.add(summingJunction);
993}
994
995void AudioContext::removeMarkedSummingJunction(AudioSummingJunction* summingJunction)
996{
997 ASSERT(isMainThread());
998 AutoLocker locker(*this);
999 m_dirtySummingJunctions.remove(summingJunction);
1000}
1001
1002void AudioContext::markAudioNodeOutputDirty(AudioNodeOutput* output)
1003{
1004 ASSERT(isGraphOwner());
1005 m_dirtyAudioNodeOutputs.add(output);
1006}
1007
1008void AudioContext::handleDirtyAudioSummingJunctions()
1009{
1010 ASSERT(isGraphOwner());
1011
1012 for (auto& junction : m_dirtySummingJunctions)
1013 junction->updateRenderingState();
1014
1015 m_dirtySummingJunctions.clear();
1016}
1017
1018void AudioContext::handleDirtyAudioNodeOutputs()
1019{
1020 ASSERT(isGraphOwner());
1021
1022 for (auto& output : m_dirtyAudioNodeOutputs)
1023 output->updateRenderingState();
1024
1025 m_dirtyAudioNodeOutputs.clear();
1026}
1027
1028void AudioContext::addAutomaticPullNode(AudioNode& node)
1029{
1030 ASSERT(isGraphOwner());
1031
1032 if (m_automaticPullNodes.add(&node).isNewEntry)
1033 m_automaticPullNodesNeedUpdating = true;
1034}
1035
1036void AudioContext::removeAutomaticPullNode(AudioNode& node)
1037{
1038 ASSERT(isGraphOwner());
1039
1040 if (m_automaticPullNodes.remove(&node))
1041 m_automaticPullNodesNeedUpdating = true;
1042}
1043
1044void AudioContext::updateAutomaticPullNodes()
1045{
1046 ASSERT(isGraphOwner());
1047
1048 if (m_automaticPullNodesNeedUpdating) {
1049 // Copy from m_automaticPullNodes to m_renderingAutomaticPullNodes.
1050 m_renderingAutomaticPullNodes.resize(m_automaticPullNodes.size());
1051
1052 unsigned i = 0;
1053 for (auto& output : m_automaticPullNodes)
1054 m_renderingAutomaticPullNodes[i++] = output;
1055
1056 m_automaticPullNodesNeedUpdating = false;
1057 }
1058}
1059
1060void AudioContext::processAutomaticPullNodes(size_t framesToProcess)
1061{
1062 ASSERT(isAudioThread());
1063
1064 for (auto& node : m_renderingAutomaticPullNodes)
1065 node->processIfNecessary(framesToProcess);
1066}
1067
1068ScriptExecutionContext* AudioContext::scriptExecutionContext() const
1069{
1070 return ActiveDOMObject::scriptExecutionContext();
1071}
1072
1073void AudioContext::nodeWillBeginPlayback()
1074{
1075 // Called by scheduled AudioNodes when clients schedule their start times.
1076 // Prior to the introduction of suspend(), resume(), and stop(), starting
1077 // a scheduled AudioNode would remove the user-gesture restriction, if present,
1078 // and would thus unmute the context. Now that AudioContext stays in the
1079 // "suspended" state if a user-gesture restriction is present, starting a
1080 // schedule AudioNode should set the state to "running", but only if the
1081 // user-gesture restriction is set.
1082 if (userGestureRequiredForAudioStart())
1083 startRendering();
1084}
1085
1086bool AudioContext::willBeginPlayback()
1087{
1088 if (!document())
1089 return false;
1090
1091 if (userGestureRequiredForAudioStart()) {
1092 if (!processingUserGestureForMedia() && !document()->isCapturing()) {
1093 ALWAYS_LOG(LOGIDENTIFIER, "returning false, not processing user gesture or capturing");
1094 return false;
1095 }
1096 removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
1097 }
1098
1099 if (pageConsentRequiredForAudioStart()) {
1100 Page* page = document()->page();
1101 if (page && !page->canStartMedia()) {
1102 document()->addMediaCanStartListener(*this);
1103 ALWAYS_LOG(LOGIDENTIFIER, "returning false, page doesn't allow media to start");
1104 return false;
1105 }
1106 removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
1107 }
1108
1109 auto willBegin = m_mediaSession->clientWillBeginPlayback();
1110 ALWAYS_LOG(LOGIDENTIFIER, "returning ", willBegin);
1111
1112 return willBegin;
1113}
1114
1115bool AudioContext::willPausePlayback()
1116{
1117 if (!document())
1118 return false;
1119
1120 if (userGestureRequiredForAudioStart()) {
1121 if (!processingUserGestureForMedia())
1122 return false;
1123 removeBehaviorRestriction(AudioContext::RequireUserGestureForAudioStartRestriction);
1124 }
1125
1126 if (pageConsentRequiredForAudioStart()) {
1127 Page* page = document()->page();
1128 if (page && !page->canStartMedia()) {
1129 document()->addMediaCanStartListener(*this);
1130 return false;
1131 }
1132 removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
1133 }
1134
1135 return m_mediaSession->clientWillPausePlayback();
1136}
1137
1138void AudioContext::startRendering()
1139{
1140 ALWAYS_LOG(LOGIDENTIFIER);
1141 if (m_isStopScheduled || !willBeginPlayback())
1142 return;
1143
1144 destination()->startRendering();
1145 setState(State::Running);
1146}
1147
1148void AudioContext::mediaCanStart(Document& document)
1149{
1150 ASSERT_UNUSED(document, &document == this->document());
1151 removeBehaviorRestriction(AudioContext::RequirePageConsentForAudioStartRestriction);
1152 mayResumePlayback(true);
1153}
1154
1155MediaProducer::MediaStateFlags AudioContext::mediaState() const
1156{
1157 if (!m_isStopScheduled && m_destinationNode && m_destinationNode->isPlayingAudio())
1158 return MediaProducer::IsPlayingAudio;
1159
1160 return MediaProducer::IsNotPlaying;
1161}
1162
1163void AudioContext::pageMutedStateDidChange()
1164{
1165 if (m_destinationNode && document() && document()->page())
1166 m_destinationNode->setMuted(document()->page()->isAudioMuted());
1167}
1168
1169void AudioContext::isPlayingAudioDidChange()
1170{
1171 // Make sure to call Document::updateIsPlayingMedia() on the main thread, since
1172 // we could be on the audio I/O thread here and the call into WebCore could block.
1173 callOnMainThread([protectedThis = makeRef(*this)] {
1174 if (protectedThis->document())
1175 protectedThis->document()->updateIsPlayingMedia();
1176 });
1177}
1178
1179void AudioContext::fireCompletionEvent()
1180{
1181 ASSERT(isMainThread());
1182 if (!isMainThread())
1183 return;
1184
1185 ALWAYS_LOG(LOGIDENTIFIER);
1186
1187 AudioBuffer* renderedBuffer = m_renderTarget.get();
1188 setState(State::Closed);
1189
1190 ASSERT(renderedBuffer);
1191 if (!renderedBuffer)
1192 return;
1193
1194 // Avoid firing the event if the document has already gone away.
1195 if (!m_isStopScheduled) {
1196 // Call the offline rendering completion event listener.
1197 m_eventQueue->enqueueEvent(OfflineAudioCompletionEvent::create(renderedBuffer));
1198 }
1199}
1200
1201void AudioContext::incrementActiveSourceCount()
1202{
1203 ++m_activeSourceCount;
1204}
1205
1206void AudioContext::decrementActiveSourceCount()
1207{
1208 --m_activeSourceCount;
1209}
1210
1211void AudioContext::suspend(DOMPromiseDeferred<void>&& promise)
1212{
1213 if (isOfflineContext() || m_isStopScheduled) {
1214 promise.reject(InvalidStateError);
1215 return;
1216 }
1217
1218 if (m_state == State::Suspended) {
1219 promise.resolve();
1220 return;
1221 }
1222
1223 if (m_state == State::Closed || m_state == State::Interrupted || !m_destinationNode) {
1224 promise.reject();
1225 return;
1226 }
1227
1228 addReaction(State::Suspended, WTFMove(promise));
1229
1230 if (!willPausePlayback())
1231 return;
1232
1233 lazyInitialize();
1234
1235 m_destinationNode->suspend([this, protectedThis = makeRef(*this)] {
1236 setState(State::Suspended);
1237 });
1238}
1239
1240void AudioContext::resume(DOMPromiseDeferred<void>&& promise)
1241{
1242 if (isOfflineContext() || m_isStopScheduled) {
1243 promise.reject(InvalidStateError);
1244 return;
1245 }
1246
1247 if (m_state == State::Running) {
1248 promise.resolve();
1249 return;
1250 }
1251
1252 if (m_state == State::Closed || !m_destinationNode) {
1253 promise.reject();
1254 return;
1255 }
1256
1257 addReaction(State::Running, WTFMove(promise));
1258
1259 if (!willBeginPlayback())
1260 return;
1261
1262 lazyInitialize();
1263
1264 m_destinationNode->resume([this, protectedThis = makeRef(*this)] {
1265 setState(State::Running);
1266 });
1267}
1268
1269void AudioContext::close(DOMPromiseDeferred<void>&& promise)
1270{
1271 if (isOfflineContext() || m_isStopScheduled) {
1272 promise.reject(InvalidStateError);
1273 return;
1274 }
1275
1276 if (m_state == State::Closed || !m_destinationNode) {
1277 promise.resolve();
1278 return;
1279 }
1280
1281 addReaction(State::Closed, WTFMove(promise));
1282
1283 lazyInitialize();
1284
1285 m_destinationNode->close([this, protectedThis = makeRef(*this)] {
1286 setState(State::Closed);
1287 uninitialize();
1288 });
1289}
1290
1291
1292void AudioContext::suspendPlayback()
1293{
1294 if (!m_destinationNode || m_state == State::Closed)
1295 return;
1296
1297 if (m_state == State::Suspended) {
1298 if (m_mediaSession->state() == PlatformMediaSession::Interrupted)
1299 setState(State::Interrupted);
1300 return;
1301 }
1302
1303 lazyInitialize();
1304
1305 m_destinationNode->suspend([this, protectedThis = makeRef(*this)] {
1306 bool interrupted = m_mediaSession->state() == PlatformMediaSession::Interrupted;
1307 setState(interrupted ? State::Interrupted : State::Suspended);
1308 });
1309}
1310
1311void AudioContext::mayResumePlayback(bool shouldResume)
1312{
1313 if (!m_destinationNode || m_state == State::Closed || m_state == State::Running)
1314 return;
1315
1316 if (!shouldResume) {
1317 setState(State::Suspended);
1318 return;
1319 }
1320
1321 if (!willBeginPlayback())
1322 return;
1323
1324 lazyInitialize();
1325
1326 m_destinationNode->resume([this, protectedThis = makeRef(*this)] {
1327 setState(State::Running);
1328 });
1329}
1330
1331void AudioContext::postTask(WTF::Function<void()>&& task)
1332{
1333 if (m_isStopScheduled)
1334 return;
1335
1336 m_scriptExecutionContext->postTask(WTFMove(task));
1337}
1338
1339const SecurityOrigin* AudioContext::origin() const
1340{
1341 return m_scriptExecutionContext ? m_scriptExecutionContext->securityOrigin() : nullptr;
1342}
1343
1344void AudioContext::addConsoleMessage(MessageSource source, MessageLevel level, const String& message)
1345{
1346 if (m_scriptExecutionContext)
1347 m_scriptExecutionContext->addConsoleMessage(source, level, message);
1348}
1349
1350#if !RELEASE_LOG_DISABLED
1351WTFLogChannel& AudioContext::logChannel() const
1352{
1353 return LogMedia;
1354}
1355#endif
1356
1357} // namespace WebCore
1358
1359#endif // ENABLE(WEB_AUDIO)
1360