Jamoma API  0.6.0.a19
AUEffectBase.cpp
1 /* Copyright © 2007 Apple Inc. All Rights Reserved.
2 
3  Disclaimer: IMPORTANT: This Apple software is supplied to you by
4  Apple Inc. ("Apple") in consideration of your agreement to the
5  following terms, and your use, installation, modification or
6  redistribution of this Apple software constitutes acceptance of these
7  terms. If you do not agree with these terms, please do not use,
8  install, modify or redistribute this Apple software.
9 
10  In consideration of your agreement to abide by the following terms, and
11  subject to these terms, Apple grants you a personal, non-exclusive
12  license, under Apple's copyrights in this original Apple software (the
13  "Apple Software"), to use, reproduce, modify and redistribute the Apple
14  Software, with or without modifications, in source and/or binary forms;
15  provided that if you redistribute the Apple Software in its entirety and
16  without modifications, you must retain this notice and the following
17  text and disclaimers in all such redistributions of the Apple Software.
18  Neither the name, trademarks, service marks or logos of Apple Inc.
19  may be used to endorse or promote products derived from the Apple
20  Software without specific prior written permission from Apple. Except
21  as expressly stated in this notice, no other rights or licenses, express
22  or implied, are granted by Apple herein, including but not limited to
23  any patent rights that may be infringed by your derivative works or by
24  other works in which the Apple Software may be incorporated.
25 
26  The Apple Software is provided by Apple on an "AS IS" basis. APPLE
27  MAKES NO WARRANTIES, EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION
28  THE IMPLIED WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY AND FITNESS
29  FOR A PARTICULAR PURPOSE, REGARDING THE APPLE SOFTWARE OR ITS USE AND
30  OPERATION ALONE OR IN COMBINATION WITH YOUR PRODUCTS.
31 
32  IN NO EVENT SHALL APPLE BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL
33  OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
35  INTERRUPTION) ARISING IN ANY WAY OUT OF THE USE, REPRODUCTION,
36  MODIFICATION AND/OR DISTRIBUTION OF THE APPLE SOFTWARE, HOWEVER CAUSED
37  AND WHETHER UNDER THEORY OF CONTRACT, TORT (INCLUDING NEGLIGENCE),
38  STRICT LIABILITY OR OTHERWISE, EVEN IF APPLE HAS BEEN ADVISED OF THE
39  POSSIBILITY OF SUCH DAMAGE.
40 */
41 /*=============================================================================
42  AUEffectBase.cpp
43 
44 =============================================================================*/
45 
46 #include "AUEffectBase.h"
47 
48 /*
49  This class does not deal as well as it should with N-M effects...
50 
51  The problem areas are (if the channels don't match):
52  ProcessInPlace if the channels don't match - there will be problems if InputChan != OutputChan
53  Bypass - its just passing the buffers through when not processing them
54 
55  This will be fixed in a future update...
56 */
57 
58 //_____________________________________________________________________________
59 //
60 AUEffectBase::AUEffectBase( AudioUnit audioUnit,
61  bool inProcessesInPlace ) :
62  AUBase(audioUnit, 1, 1), // 1 in bus, 1 out bus
63  mBypassEffect(false),
64  mParamSRDep (false),
65  mProcessesInPlace(inProcessesInPlace)
66 {
67 }
68 
69 //_____________________________________________________________________________
70 //
71 AUEffectBase::~AUEffectBase()
72 {
73  Cleanup();
74 }
75 
76 //_____________________________________________________________________________
77 //
78 void AUEffectBase::Cleanup()
79 {
80  for (KernelList::iterator it = mKernelList.begin(); it != mKernelList.end(); ++it)
81  delete *it;
82 
83  mKernelList.clear();
84 }
85 
86 
87 //_____________________________________________________________________________
88 //
89 ComponentResult AUEffectBase::Initialize()
90 {
91  // get our current numChannels for input and output
92  SInt16 auNumInputs = (SInt16) GetInput(0)->GetStreamFormat().mChannelsPerFrame;
93  SInt16 auNumOutputs = (SInt16) GetOutput(0)->GetStreamFormat().mChannelsPerFrame;
94 
95  // does the unit publish specific information about channel configurations?
96  const AUChannelInfo *auChannelConfigs = NULL;
97  UInt32 numIOconfigs = SupportedNumChannels(&auChannelConfigs);
98 
99  if ((numIOconfigs > 0) && (auChannelConfigs != NULL))
100  {
101  bool foundMatch = false;
102  for (UInt32 i = 0; (i < numIOconfigs) && !foundMatch; ++i)
103  {
104  SInt16 configNumInputs = auChannelConfigs[i].inChannels;
105  SInt16 configNumOutputs = auChannelConfigs[i].outChannels;
106  if ((configNumInputs < 0) && (configNumOutputs < 0))
107  {
108  // unit accepts any number of channels on input and output
109  if (((configNumInputs == -1) && (configNumOutputs == -2))
110  || ((configNumInputs == -2) && (configNumOutputs == -1)))
111  {
112  foundMatch = true;
113  // unit accepts any number of channels on input and output IFF they are the same number on both scopes
114  }
115  else if (((configNumInputs == -1) && (configNumOutputs == -1)) && (auNumInputs == auNumOutputs))
116  {
117  foundMatch = true;
118  // unit has specified a particular number of channels on both scopes
119  }
120  else
121  continue;
122  }
123  else
124  {
125  // the -1 case on either scope is saying that the unit doesn't care about the
126  // number of channels on that scope
127  bool inputMatch = (auNumInputs == configNumInputs) || (configNumInputs == -1);
128  bool outputMatch = (auNumOutputs == configNumOutputs) || (configNumOutputs == -1);
129  if (inputMatch && outputMatch)
130  foundMatch = true;
131  }
132  }
133  if (!foundMatch)
134  return kAudioUnitErr_FormatNotSupported;
135  }
136  else
137  {
138  // there is no specifically published channel info
139  // so for those kinds of effects, the assumption is that the channels (whatever their number)
140  // should match on both scopes
141  if ((auNumOutputs != auNumInputs) || (auNumOutputs == 0))
142  {
143  return kAudioUnitErr_FormatNotSupported;
144  }
145  }
146 
147  MaintainKernels();
148  return noErr;
149 }
150 
151 ComponentResult AUEffectBase::Reset( AudioUnitScope inScope,
152  AudioUnitElement inElement)
153 {
154  for (KernelList::iterator it = mKernelList.begin(); it != mKernelList.end(); ++it) {
155  AUKernelBase *kernel = *it;
156  if (kernel != NULL)
157  kernel->Reset();
158  }
159 
160  return noErr;
161 }
162 
163 ComponentResult AUEffectBase::GetPropertyInfo (AudioUnitPropertyID inID,
164  AudioUnitScope inScope,
165  AudioUnitElement inElement,
166  UInt32 & outDataSize,
167  Boolean & outWritable)
168 {
169  if (inScope == kAudioUnitScope_Global) {
170  switch (inID) {
171  case kAudioUnitProperty_BypassEffect:
172  outWritable = true;
173  outDataSize = sizeof (UInt32);
174  return noErr;
175  case kAudioUnitProperty_InPlaceProcessing:
176  outWritable = true;
177  outDataSize = sizeof (UInt32);
178  return noErr;
179  }
180  }
181  return AUBase::GetPropertyInfo (inID, inScope, inElement, outDataSize, outWritable);
182 }
183 
184 
185 ComponentResult AUEffectBase::GetProperty (AudioUnitPropertyID inID,
186  AudioUnitScope inScope,
187  AudioUnitElement inElement,
188  void * outData)
189 {
190  if (inScope == kAudioUnitScope_Global) {
191  switch (inID) {
192  case kAudioUnitProperty_BypassEffect:
193  *((UInt32*)outData) = (IsBypassEffect() ? 1 : 0);
194  return noErr;
195  case kAudioUnitProperty_InPlaceProcessing:
196  *((UInt32*)outData) = (mProcessesInPlace ? 1 : 0);
197  return noErr;
198  }
199  }
200  return AUBase::GetProperty (inID, inScope, inElement, outData);
201 }
202 
203 
204 ComponentResult AUEffectBase::SetProperty( AudioUnitPropertyID inID,
205  AudioUnitScope inScope,
206  AudioUnitElement inElement,
207  const void * inData,
208  UInt32 inDataSize)
209 {
210  if (inScope == kAudioUnitScope_Global) {
211  switch (inID) {
212  case kAudioUnitProperty_BypassEffect:
213  {
214  if (inDataSize < sizeof(UInt32))
215  return kAudioUnitErr_InvalidPropertyValue;
216 
217  bool tempNewSetting = *((UInt32*)inData) != 0;
218  // we're changing the state of bypass
219  if (tempNewSetting != IsBypassEffect())
220  {
221  if (!tempNewSetting && IsBypassEffect() && IsInitialized()) // turning bypass off and we're initialized
222  Reset(0, 0);
223  SetBypassEffect (tempNewSetting);
224  }
225  return noErr;
226  }
227  case kAudioUnitProperty_InPlaceProcessing:
228  mProcessesInPlace = (*((UInt32*)inData) != 0);
229  return noErr;
230  }
231  }
232  return AUBase::SetProperty (inID, inScope, inElement, inData, inDataSize);
233 }
234 
235 
236 void AUEffectBase::MaintainKernels()
237 {
238  UInt32 nChannels = GetNumberOfChannels();
239 
240  if (mKernelList.size() < nChannels) {
241  mKernelList.reserve(nChannels);
242  for (UInt32 i = mKernelList.size(); i < nChannels; ++i)
243  mKernelList.push_back(NewKernel());
244  } else
245  while (mKernelList.size() > nChannels) {
246  AUKernelBase *kernel = mKernelList.back();
247  delete kernel;
248  mKernelList.pop_back();
249  }
250 
251  for(unsigned int i = 0; i < nChannels; i++ )
252  {
253  if(mKernelList[i]) {
254  mKernelList[i]->SetLastKernel(i == nChannels-1 );
255  mKernelList[i]->SetChannelNum (i);
256  }
257  }
258 }
259 
260 bool AUEffectBase::StreamFormatWritable( AudioUnitScope scope,
261  AudioUnitElement element)
262 {
263  return IsInitialized() ? false : true;
264 }
265 
266 ComponentResult AUEffectBase::ChangeStreamFormat( AudioUnitScope inScope,
267  AudioUnitElement inElement,
268  const CAStreamBasicDescription & inPrevFormat,
269  const CAStreamBasicDescription & inNewFormat)
270 {
271  ComponentResult result = AUBase::ChangeStreamFormat(inScope, inElement, inPrevFormat, inNewFormat);
272  if (result == noErr)
273  {
274  // for the moment this only dependency we know about
275  // where a parameter's range may change is with the sample rate
276  // and effects are only publishing parameters in the global scope!
277  if (GetParamHasSampleRateDependency() && fnotequal(inPrevFormat.mSampleRate, inNewFormat.mSampleRate))
278  PropertyChanged(kAudioUnitProperty_ParameterList, kAudioUnitScope_Global, 0);
279  }
280 
281  return result;
282 }
283 
284 
285 // ____________________________________________________________________________
286 //
287 // This method is called (potentially repeatedly) by ProcessForScheduledParams()
288 // in order to perform the actual DSP required for this portion of the entire buffer
289 // being processed. The entire buffer can be divided up into smaller "slices"
290 // according to the timestamps on the scheduled parameters...
291 //
292 ComponentResult AUEffectBase::ProcessScheduledSlice( void *inUserData,
293  UInt32 inStartFrameInBuffer,
294  UInt32 inSliceFramesToProcess,
295  UInt32 inTotalBufferFrames )
296 {
297  ScheduledProcessParams &sliceParams = *((ScheduledProcessParams*)inUserData);
298 
299  AudioUnitRenderActionFlags &actionFlags = *sliceParams.actionFlags;
300  AudioBufferList &inputBufferList = *sliceParams.inputBufferList;
301  AudioBufferList &outputBufferList = *sliceParams.outputBufferList;
302 
303  // fix the size of the buffer we're operating on before we render this slice of time
304  for(unsigned int i = 0; i < inputBufferList.mNumberBuffers; i++ ) {
305  inputBufferList.mBuffers[i].mDataByteSize =
306  (inputBufferList.mBuffers[i].mNumberChannels * inSliceFramesToProcess * sizeof(AudioSampleType));
307  }
308 
309  for(unsigned int i = 0; i < outputBufferList.mNumberBuffers; i++ ) {
310  outputBufferList.mBuffers[i].mDataByteSize =
311  (outputBufferList.mBuffers[i].mNumberChannels * inSliceFramesToProcess * sizeof(AudioSampleType));
312  }
313  // process the buffer
314  ComponentResult result = ProcessBufferLists(actionFlags, inputBufferList, outputBufferList, inSliceFramesToProcess );
315 
316  // we just partially processed the buffers, so increment the data pointers to the next part of the buffer to process
317  for(unsigned int i = 0; i < inputBufferList.mNumberBuffers; i++ ) {
318  inputBufferList.mBuffers[i].mData =
319  (AudioSampleType *)inputBufferList.mBuffers[i].mData + inputBufferList.mBuffers[i].mNumberChannels * inSliceFramesToProcess;
320  }
321 
322  for(unsigned int i = 0; i < outputBufferList.mNumberBuffers; i++ ) {
323  outputBufferList.mBuffers[i].mData =
324  (AudioSampleType *)outputBufferList.mBuffers[i].mData + outputBufferList.mBuffers[i].mNumberChannels * inSliceFramesToProcess;
325  }
326 
327  return result;
328 }
329 
330 // ____________________________________________________________________________
331 //
332 
333 ComponentResult AUEffectBase::Render( AudioUnitRenderActionFlags &ioActionFlags,
334  const AudioTimeStamp & inTimeStamp,
335  UInt32 nFrames)
336 {
337  if (!HasInput(0))
338  return kAudioUnitErr_NoConnection;
339 
340  ComponentResult result = noErr;
341  AUOutputElement *theOutput = GetOutput(0); // throws if error
342 
343  AUInputElement *theInput = GetInput(0);
344  result = theInput->PullInput(ioActionFlags, inTimeStamp, 0 /* element */, nFrames);
345 
346  if (result == noErr)
347  {
348  if(ProcessesInPlace() )
349  {
350  theOutput->SetBufferList(theInput->GetBufferList() );
351  }
352 
353  if (ShouldBypassEffect())
354  {
355  // leave silence bit alone
356 
357  if(!ProcessesInPlace() )
358  {
359  theInput->CopyBufferContentsTo (theOutput->GetBufferList());
360  }
361  }
362  else
363  {
364  if(mParamList.size() == 0 )
365  {
366  // this will read/write silence bit
367  result = ProcessBufferLists(ioActionFlags, theInput->GetBufferList(), theOutput->GetBufferList(), nFrames);
368  }
369  else
370  {
371  // deal with scheduled parameters...
372 
373  AudioBufferList &inputBufferList = theInput->GetBufferList();
374  AudioBufferList &outputBufferList = theOutput->GetBufferList();
375 
376  ScheduledProcessParams processParams;
377  processParams.actionFlags = &ioActionFlags;
378  processParams.inputBufferList = &inputBufferList;
379  processParams.outputBufferList = &outputBufferList;
380 
381  // divide up the buffer into slices according to scheduled params then
382  // do the DSP for each slice (ProcessScheduledSlice() called for each slice)
383  result = ProcessForScheduledParams( mParamList,
384  nFrames,
385  &processParams );
386 
387 
388  // fixup the buffer pointers to how they were before we started
389  for(unsigned int i = 0; i < inputBufferList.mNumberBuffers; i++ ) {
390  inputBufferList.mBuffers[i].mData =
391  (AudioSampleType *)inputBufferList.mBuffers[i].mData - inputBufferList.mBuffers[i].mNumberChannels * nFrames;
392  inputBufferList.mBuffers[i].mDataByteSize =
393  (inputBufferList.mBuffers[i].mNumberChannels * nFrames * sizeof(AudioSampleType));
394  }
395 
396  for(unsigned int i = 0; i < outputBufferList.mNumberBuffers; i++ ) {
397  outputBufferList.mBuffers[i].mData =
398  (AudioSampleType *)outputBufferList.mBuffers[i].mData - outputBufferList.mBuffers[i].mNumberChannels * nFrames;
399  outputBufferList.mBuffers[i].mDataByteSize =
400  (outputBufferList.mBuffers[i].mNumberChannels * nFrames * sizeof(AudioSampleType));
401  }
402  }
403  }
404 
405  if ( (ioActionFlags & kAudioUnitRenderAction_OutputIsSilence) && !ProcessesInPlace() )
406  {
407  AUBufferList::ZeroBuffer(theOutput->GetBufferList() );
408  }
409  }
410 
411  return result;
412 }
413 
414 OSStatus AUEffectBase::ProcessBufferLists(
415  AudioUnitRenderActionFlags & ioActionFlags,
416  const AudioBufferList & inBuffer,
417  AudioBufferList & outBuffer,
418  UInt32 inFramesToProcess )
419 {
420  bool ioSilence;
421 
422  bool silentInput = IsInputSilent (ioActionFlags, inFramesToProcess);
423  ioActionFlags |= kAudioUnitRenderAction_OutputIsSilence;
424 
425 
426  // call the kernels to handle either interleaved or deinterleaved
427  if (inBuffer.mNumberBuffers == 1) {
428  // interleaved (or mono)
429  int channel = 0;
430 
431  for (KernelList::iterator it = mKernelList.begin(); it != mKernelList.end(); ++it, ++channel) {
432  AUKernelBase *kernel = *it;
433 
434  if (kernel != NULL) {
435  ioSilence = silentInput;
436 
437  // process each interleaved channel individually
438  kernel->Process(
439  (const AudioSampleType *)inBuffer.mBuffers[0].mData + channel,
440  (AudioSampleType *)outBuffer.mBuffers[0].mData + channel,
441  inFramesToProcess,
442  inBuffer.mBuffers[0].mNumberChannels,
443  ioSilence);
444 
445  if (!ioSilence)
446  ioActionFlags &= ~kAudioUnitRenderAction_OutputIsSilence;
447  }
448  }
449  } else {
450  // deinterleaved
451  const AudioBuffer *srcBuffer = inBuffer.mBuffers;
452  AudioBuffer *destBuffer = outBuffer.mBuffers;
453 
454  for (KernelList::iterator it = mKernelList.begin(); it != mKernelList.end();
455  ++it, ++srcBuffer, ++destBuffer) {
456  AUKernelBase *kernel = *it;
457 
458  if (kernel != NULL) {
459  ioSilence = silentInput;
460 
461  kernel->Process(
462  (const AudioSampleType *)srcBuffer->mData,
463  (AudioSampleType *)destBuffer->mData,
464  inFramesToProcess,
465  1,
466  ioSilence);
467 
468  if (!ioSilence)
469  ioActionFlags &= ~kAudioUnitRenderAction_OutputIsSilence;
470  }
471  }
472  }
473 
474  return noErr;
475 }
476 
477 Float64 AUEffectBase::GetSampleRate()
478 {
479  return GetOutput(0)->GetStreamFormat().mSampleRate;
480 }
481 
482 UInt32 AUEffectBase::GetNumberOfChannels()
483 {
484  return GetOutput(0)->GetStreamFormat().mChannelsPerFrame;
485 }
486