I'm not entirely sure this can be achieved using AVFoundation alone, you may need to use the AudioUnit framework and create a stream. Should be relatively simple to send the content of the .WAV file to the audio buffer.
This is how I've been doing it in Piti Piti Pa. The other benefit is you can better control the delay in the audio, in order to synchronize audio and video animations (more obvious when using Bluetooth).
Here's the code I'm using to initialize the audio unit:
+(BOOL)_createAudioUnitInstance
{
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
OSStatus status = AudioComponentInstanceNew(inputComponent, &_audioUnit);
[self _logStatus:status step:@"instantiate"];
return (status == noErr );
}
+(BOOL)_setupAudioUnitOutput
{
UInt32 flag = 1;
OSStatus status = AudioUnitSetProperty(_audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
_outputAudioBus,
&flag,
sizeof(flag));
[self _logStatus:status step:@"set output bus"];
return (status == noErr );
}
+(BOOL)_setupAudioUnitFormat
{
AudioStreamBasicDescription audioFormat = {0};
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 2;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 4;
audioFormat.mBytesPerFrame = 4;
OSStatus status = AudioUnitSetProperty(_audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
_outputAudioBus,
&audioFormat,
sizeof(audioFormat));
[self _logStatus:status step:@"set audio format"];
return (status == noErr );
}
+(BOOL)_setupAudioUnitRenderCallback
{
AURenderCallbackStruct audioCallback;
audioCallback.inputProc = playbackCallback;
audioCallback.inputProcRefCon = (__bridge void *)(self);
OSStatus status = AudioUnitSetProperty(_audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
_outputAudioBus,
&audioCallback,
sizeof(audioCallback));
[self _logStatus:status step:@"set render callback"];
return (status == noErr);
}
+(BOOL)_initializeAudioUnit
{
OSStatus status = AudioUnitInitialize(_audioUnit);
[self _logStatus:status step:@"initialize"];
return (status == noErr);
}
+(void)start
{
[self clearFeeds];
[self _startAudioUnit];
}
+(void)stop
{
[self _stopAudioUnit];
}
+(BOOL)_startAudioUnit
{
OSStatus status = AudioOutputUnitStart(_audioUnit);
[self _logStatus:status step:@"start"];
return (status == noErr);
}
+(BOOL)_stopAudioUnit
{
OSStatus status = AudioOutputUnitStop(_audioUnit);
[self _logStatus:status step:@"stop"];
return (status == noErr);
}
+(void)_logStatus:(OSStatus)status step:(NSString *)step
{
if( status != noErr )
{
NSLog(@"AudioUnit failed to %@, error: %d", step, (int)status);
}
}
#pragma mark - Mixer
static OSStatus playbackCallback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData) {
@autoreleasepool {
AudioBuffer *audioBuffer = ioData->mBuffers;
_lastPushedFrame = _nextFrame;
[SIOAudioMixer _generateAudioFrames:inNumberFrames into:audioBuffer->mData];
}
return noErr;
}
Now you only need to extract the content of the .Wav files (easier if you export them to RAW format) and send it out to the buffer via the callback.
I hope that helps!
soundFile
is not nil? – Natan R.error
object for theAVAudioSession
? You're assuming that your audio session is starting without checking for an error. Also, are you just setting that one time or every time you play the audio? Should only need to have that once in the app delegate. – iwasrobbedAVAudioSession setCategory
norAVAudioSession setActive
is generating an error. – itsame69