我們的音頻應用程序是使用含有混頻器單元,一個轉換器單元和輸出單元AUGraphs。這是一個實時應用程序,因此性能是一個高度關注的問題。Apple CoreAudio - 爲什麼AUGraphStop()需要25ms才能完成?
的問題已被標記了其中AUGraphStop()需要25毫秒即可完成主線程上,我們的探查表明,花費這段時間的睡眠。誰能解釋爲什麼會發生這種情況?它是等待下一個零交叉點,還是等待一個更多的緩衝區渲染?我已經嘗試了幾種解決方法,包括髮送一個沉默的渲染幀(並設置kAudioUnitRenderAction_OutputIsSilence標誌),然後嘗試停止,並調用kAudioUnitRenderAction_PostRender通知回調內的AUGraphStop()(這有混合結果,並在閱讀後它似乎不是一個推薦的方法)。
我試圖降低每片幀的數量,但它似乎不有所作爲。我也試圖消除所有的音頻單元除了輸出節點,以試圖縮小問題到特定的單位,但AUGraphStop的成本()仍維持在25毫秒。
下面是我們如何在初始化圖形:
//Configure converter/mixer/output unit descriptors
AudioComponentDescription OutputUnitDesc = { kAudioUnitType_Output ,kAudioUnitSubType_DefaultOutput, kAudioUnitManufacturer_Apple };
AudioComponentDescription MixerUnitDesc = { kAudioUnitType_Mixer, kAudioUnitSubType_MatrixMixer, kAudioUnitManufacturer_Apple };
AudioComponentDescription ConverterUnitDesc = { kAudioUnitType_FormatConverter, kAudioUnitSubType_AUConverter, kAudioUnitManufacturer_Apple };
//Create graph
CA_ERR(NewAUGraph(&mAudioGraph));
//Add converter/mixer/output nodes
CA_ERR(AUGraphAddNode(mAudioGraph, &OutputUnitDesc, &mOutputNode));
CA_ERR(AUGraphAddNode(mAudioGraph, &MixerUnitDesc, &mMixerNode));
CA_ERR(AUGraphAddNode(mAudioGraph, &ConverterUnitDesc, &mConverterNode));
//Connect nodes
CA_ERR(AUGraphConnectNodeInput(mAudioGraph, mConverterNode, 0, mMixerNode, 0));
CA_ERR(AUGraphConnectNodeInput(mAudioGraph, mMixerNode, 0, mOutputNode, 0));
//Open the graph (instantiates the units)
CA_ERR(AUGraphOpen(mAudioGraph));
//Get the created units
CA_ERR(AUGraphNodeInfo(mAudioGraph, mOutputNode, NULL, &mOutputUnit));
CA_ERR(AUGraphNodeInfo(mAudioGraph, mMixerNode, NULL, &mMixerUnit));
CA_ERR(AUGraphNodeInfo(mAudioGraph, mConverterNode, NULL, &mConverterUnit));
//Setup stream format description
mStreamDesc.mFormatID = kAudioFormatLinearPCM;
mStreamDesc.mFormatFlags = kLinearPCMFormatFlagIsPacked | kAudioFormatFlagIsSignedInteger;
mStreamDesc.mChannelsPerFrame = Header->nChannels;
mStreamDesc.mSampleRate = (Float64)Header->nSamplesPerSec;
mStreamDesc.mBitsPerChannel = Header->wBitsPerSample;
mStreamDesc.mBytesPerFrame = (Header->wBitsPerSample >> 3) * Header->nChannels;
mStreamDesc.mFramesPerPacket = 1;
mStreamDesc.mBytesPerPacket = mStreamDesc.mBytesPerFrame * mStreamDesc.mFramesPerPacket;
mStreamDesc.mReserved = 0;
//Set data endianness according to file type - TODO: Get endianness from header
AudioSystem::FileType FileType = mSample->GetFiletype();
if(FileType == AudioSystem::WAV)
mStreamDesc.mFormatFlags |= kAudioFormatFlagsNativeEndian;
else if(FileType == AudioSystem::OGG)
mStreamDesc.mFormatFlags |= kLinearPCMFormatFlagIsBigEndian;
//Configure number of input/output busses for mixer unit
int NumChannelsIn = Header->nChannels;
int NumChannelsOut = (int)AudioSystem::GetOutputChannelConfig();
CA_ERR(AudioUnitSetProperty(mMixerUnit, kAudioUnitProperty_BusCount, kAudioUnitScope_Input, 0, &NumChannelsIn, sizeof(u32)));
CA_ERR(AudioUnitSetProperty(mMixerUnit, kAudioUnitProperty_BusCount, kAudioUnitScope_Output, 0, &NumChannelsOut, sizeof(u32)));
//Set render callback
AURenderCallbackStruct callback = { AudioRenderCallback, this };
CA_ERR(AUGraphSetNodeInputCallback(mAudioGraph, mConverterNode, 0, &callback));
//Set stream format to something native to CoreAudio
AudioStreamBasicDescription OutputDesc = {0};
UInt32 Size = sizeof(AudioStreamBasicDescription);
CA_ERR(AudioUnitGetProperty(mMixerUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &OutputDesc, &Size));
//Set num output channels
OutputDesc.mChannelsPerFrame = (int)AudioSystem::GetOutputChannelConfig();
//Set stream format
CA_ERR(AudioUnitSetProperty(mConverterUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &mStreamDesc, sizeof(AudioStreamBasicDescription)));
CA_ERR(AudioUnitSetProperty(mConverterUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &OutputDesc, sizeof(AudioStreamBasicDescription)));
CA_ERR(AudioUnitSetProperty(mMixerUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &OutputDesc, sizeof(AudioStreamBasicDescription)));
CA_ERR(AudioUnitSetProperty(mMixerUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &OutputDesc, sizeof(AudioStreamBasicDescription)));
CA_ERR(AudioUnitSetProperty(mOutputUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &OutputDesc, sizeof(AudioStreamBasicDescription)));
//Initialise graph
CA_ERR(AUGraphInitialize(mAudioGraph));
//Set notification callback
CA_ERR(AUGraphAddRenderNotify(mAudioGraph, AudioNotifyCallback, this));
//Set global mixer volume
CA_ERR(AudioUnitSetParameter(mMixerUnit, kMatrixMixerParam_Volume, kAudioUnitScope_Global, 0xFFFFFFFF, 1.0, 0));
//Set input channel volumes
for(int i = 0; i < Header->nChannels; i++)
{
CA_ERR(AudioUnitSetParameter(mMixerUnit, kMatrixMixerParam_Volume, kAudioUnitScope_Input, i, 1.0, 0));
}
//Set output channel volumes
for(int i = 0; i < (int)AudioSystem::GetOutputChannelConfig(); i++)
{
CA_ERR(AudioUnitSetParameter(mMixerUnit, kMatrixMixerParam_Volume, kAudioUnitScope_Output, i, 1.0, 0));
}
圖形啓動和停止使用AUGraphStart()和AUGraphStop(),沒什麼特別的。下面是使用Shark Profiler捕獲的callstack:
| | | | | | | | | + 1.3%, AudioUnitGraph::Stop(), AudioToolbox
| | | | | | | | | | + 1.3%, AudioOutputUnitStop, AudioUnit
| | | | | | | | | | | + 1.3%, CallComponentDispatch, CarbonCore
| | | | | | | | | | | | + 1.3%, DefaultOutputAUEntry, CoreAudio
| | | | | | | | | | | | | + 1.3%, AUHALEntry, CoreAudio
| | | | | | | | | | | | | | + 1.3%, usleep$UNIX2003, libSystem.B.dylib
| | | | | | | | | | | | | | | + 1.3%, nanosleep$UNIX2003, libSystem.B.dylib
| | | | | | | | | | | | | | | | 1.3%, __semwait_signal, libSystem.B.dylib
它花費了整個AUGraphStop()睡覺!
任何線索?
我害怕帶螺紋的解決方案可能是這裏的答案!看起來非常過分,只是爲了制止圖表,但我一定會試一試。 – 2011-03-24 20:03:03
哦,這是在OS X上,而不是在iPhone上,如果這對任何人都有影響 – 2011-03-24 20:03:41