0
votes

There are three Audio Unit:
equalizerUnit(kAudioUnitSubType_NBandEQ),
3DmixerUnit(kAudioUnitSubType_SpatialMixer),
remoteIOUnit(kAudioUnitSubType_RemoteIO).
With AUGraph and Nodes (equalizerNode, 3DmixerNode, remoteNode), they are correctly connected to each other:
equalizerUnit -> mixerUnit -> remoteIOUnit.

One problem, to connect equalizerUnit and 3DmixerUnit, I use a Converter Unit(kAudioUnitSubType_AUConverter), on the output of which I set AudioStreamBasicDescription:

    .mSampleRate = 44100.00,
    .mFormatID = kAudioFormatLinearPCM,
    .mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved,
    .mFramesPerPacket = 1,
    .mChannelsPerFrame = 1,
    .mBytesPerFrame = 2,
    .mBitsPerChannel = 16,
    .mBytesPerPacket = 2

As a result, I get mono sound from Output Score 3DmixerUnit. How solve problem with stereo in 3DmixerUnit?

I would appreciate any help!

p.s. Some edit info:
The main problem lies in the fact that I need a stereo signal to apply for two mono inputs of 3DmixerUnit.
Apple's 3D Mixer Audio Unit guide states:
To use a stereo source, you may treat its left and right channels as two independent single-channel sources, and then feed each side of the stereo stream to its own input bus.
https://developer.apple.com/library/ios/qa/qa1695/_index.html
I can not figure out how split stereo of my equalizerUnit into the two independent single-channel sources. How does one do this?

1

1 Answers

0
votes

Perhaps someone in the future will save their time by solving this problem.

canonicalAudioStreamBasicDescription = (AudioStreamBasicDescription) {
    .mSampleRate = 44100.00,
    .mFormatID = kAudioFormatLinearPCM,
    .mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked,
    .mFramesPerPacket = 1,
    .mChannelsPerFrame = 2,
    .mBytesPerFrame = 4,
    .mBitsPerChannel = 16,
    .mBytesPerPacket = 4
};
canonicalAudioStreamBasicDescription3Dmixer = (AudioStreamBasicDescription) {
    .mSampleRate = 44100.00,
    .mFormatID = kAudioFormatLinearPCM,
    .mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked,
    .mFramesPerPacket = 1,
    .mChannelsPerFrame = 1,
    .mBytesPerFrame = 2,
    .mBitsPerChannel = 16,
    .mBytesPerPacket = 2
};
canonicalAudioStreamBasicDescriptionNonInterleaved = (AudioStreamBasicDescription) {
    .mSampleRate = 44100.00,
    .mFormatID = kAudioFormatLinearPCM,
    .mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked | kAudioFormatFlagIsNonInterleaved,
    .mFramesPerPacket = 1,
    .mChannelsPerFrame = 2,
    .mBytesPerFrame = 2,
    .mBitsPerChannel = 16,
    .mBytesPerPacket = 2
};

 convertUnitDescription = (AudioComponentDescription) {
    .componentType = kAudioUnitType_FormatConverter,
    .componentSubType = kAudioUnitSubType_AUConverter,
    .componentFlags = 0,
    .componentFlagsMask = 0,
    .componentManufacturer = kAudioUnitManufacturer_Apple
};
splittertUnitDescription = (AudioComponentDescription) {
    .componentType = kAudioUnitType_FormatConverter,
    .componentSubType = kAudioUnitSubType_Splitter,
    .componentFlags = 0,
    .componentFlagsMask = 0,
    .componentManufacturer = kAudioUnitManufacturer_Apple
};
mixerDescription = (AudioComponentDescription){
    .componentType = kAudioUnitType_Mixer,
    .componentSubType = kAudioUnitSubType_SpatialMixer,
    .componentFlags = 0,
    .componentFlagsMask = 0,
    .componentManufacturer = kAudioUnitManufacturer_Apple
};

AUGraphAddNode(audioGraph, &mixerDescription, &mixerNode);
AUGraphNodeInfo(audioGraph, mixerNode, &mixerDescription, &mixerUnit);
AudioUnitSetProperty(mixerUnit, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(maxFramesPerSlice));
UInt32 busCount = 2;
AudioUnitSetProperty(mixerUnit, kAudioUnitProperty_ElementCount, kAudioUnitScope_Input, 0, &busCount, sizeof(busCount));
Float64 graphSampleRate = 44100.0;
AudioUnitSetProperty(mixerUnit, kAudioUnitProperty_SampleRate, kAudioUnitScope_Output, 0, &graphSampleRate, sizeof(graphSampleRate));
AudioUnitSetParameter(mixerUnit, kSpatialMixerParam_Distance, kAudioUnitScope_Input, 0, 1.0, 0);
AudioUnitSetParameter(mixerUnit, kSpatialMixerParam_Azimuth, kAudioUnitScope_Input, 0, -90, 0);
AudioUnitSetParameter(mixerUnit, kSpatialMixerParam_Distance, kAudioUnitScope_Input, 1, 1.0, 0);
AudioUnitSetParameter(mixerUnit, kSpatialMixerParam_Azimuth, kAudioUnitScope_Input, 1, 90, 0);

AUNode splitterNode;
AudioUnit splittertUnit;
AUGraphAddNode(audioGraph, &splittertUnitDescription, &splitterNode);
AUGraphNodeInfo(audioGraph, splitterNode, &splittertUnitDescription, &splittertUnit);

AUNode convertNodeFromInterlevantToNonInterleaved;
AudioUnit convertUnitFromInterlevantToNonInterleaved;
AUGraphAddNode(audioGraph, &convertUnitDescription, &convertNodeFromInterlevantToNonInterleaved);
AUGraphNodeInfo(audioGraph, convertNodeFromInterlevantToNonInterleaved, &convertUnitDescription, &convertUnitFromInterlevantToNonInterleaved);
AudioUnitSetProperty(convertUnitFromInterlevantToNonInterleavedLeft, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &srcFormatFromEqualizer, sizeof(srcFormatFromEqualizer));
AudioUnitSetProperty(convertUnitFromInterlevantToNonInterleavedLeft, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &canonicalAudioStreamBasicDescriptionNonInterleaved, sizeof(canonicalAudioStreamBasicDescriptionNonInterleaved));
AudioUnitSetProperty(convertUnitFromInterlevantToNonInterleavedLeft, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(maxFramesPerSlice));

AUNode convertNodeFromInterlevantToNonInterleavedRight;
AudioUnit convertUnitFromInterlevantToNonInterleavedRight;
AUGraphAddNode(audioGraph, &convertUnitDescription, &convertNodeFromInterlevantToNonInterleavedRight);
AUGraphNodeInfo(audioGraph, convertNodeFromInterlevantToNonInterleavedRight, &convertUnitDescription, &convertUnitFromInterlevantToNonInterleavedRight);
AudioUnitSetProperty(convertUnitFromInterlevantToNonInterleavedRight, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &srcFormatFromEqualizer, sizeof(srcFormatFromEqualizer));
AudioUnitSetProperty(convertUnitFromInterlevantToNonInterleavedRight, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &canonicalAudioStreamBasicDescriptionNonInterleaved, sizeof(canonicalAudioStreamBasicDescriptionNonInterleaved));
AudioUnitSetProperty(convertUnitFromInterlevantToNonInterleavedRight, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(maxFramesPerSlice));

AUNode converterNodeFromNonInterleavedToMonoLeftChannel;
AudioUnit converUnitFromNonInterleavedToMonoLeftChannel;;
SInt32 left[1] = {0};
UInt32 leftSize = (UInt32)sizeof(left);
AUGraphAddNode(audioGraph, &convertUnitDescription, &converterNodeFromNonInterleavedToMonoLeftChannel);
AUGraphNodeInfo(audioGraph, converterNodeFromNonInterleavedToMonoLeftChannel, &convertUnitDescription, &converUnitFromNonInterleavedToMonoLeftChannel);
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoLeftChannel, kAudioOutputUnitProperty_ChannelMap, kAudioUnitScope_Input, 0, &left, leftSize);
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoLeftChannel, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &canonicalAudioStreamBasicDescriptionNonInterleaved, sizeof(canonicalAudioStreamBasicDescriptionNonInterleaved));
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoLeftChannel, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &canonicalAudioStreamBasicDescription3Dmixer, sizeof(canonicalAudioStreamBasicDescription3Dmixer));
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoLeftChannel, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(maxFramesPerSlice));

AUNode converterNodeFromNonInterleavedToMonoRightChannel;
AudioUnit converUnitFromNonInterleavedToMonoRightChannel;
SInt32 right[1] = {1};
UInt32 rightSize = (UInt32)sizeof(right);
AUGraphAddNode(audioGraph, &convertUnitDescription, &converterNodeFromNonInterleavedToMonoRightChannel);
AUGraphNodeInfo(audioGraph, converterNodeFromNonInterleavedToMonoRightChannel, &convertUnitDescription, &converUnitFromNonInterleavedToMonoRightChannel);
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoRightChannel, kAudioOutputUnitProperty_ChannelMap, kAudioUnitScope_Input, 0, &right, rightSize);
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoRightChannel, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 0, &canonicalAudioStreamBasicDescriptionNonInterleaved, sizeof(canonicalAudioStreamBasicDescriptionNonInterleaved));
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoRightChannel, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 0, &canonicalAudioStreamBasicDescription3Dmixer, sizeof(canonicalAudioStreamBasicDescription3Dmixer));
AudioUnitSetProperty(converUnitFromNonInterleavedToMonoRightChannel, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maxFramesPerSlice, sizeof(maxFramesPerSlice));

AUGraphConnectNodeInput(audioGraph, еqualizerNode, 0, splitterNode, 0);
AUGraphConnectNodeInput(audioGraph, splitterNode, 0, convertNodeFromInterlevantToNonInterleavedLeft, 0);
AUGraphConnectNodeInput(audioGraph, splitterNode, 1, convertNodeFromInterlevantToNonInterleavedRight, 0);
AUGraphConnectNodeInput(audioGraph, convertNodeFromInterlevantToNonInterleavedLeft, 0, converterNodeFromNonInterleavedToMonoLeftChannel, 0);
AUGraphConnectNodeInput(audioGraph, convertNodeFromInterlevantToNonInterleavedRight, 0, converterNodeFromNonInterleavedToMonoRightChannel, 0);
AUGraphConnectNodeInput(audioGraph, converterNodeFromNonInterleavedToMonoLeftChannel, 0, mixerNode, 0);
AUGraphConnectNodeInput(audioGraph, converterNodeFromNonInterleavedToMonoRightChannel, 0, mixerNode, 1);

That's all. Full working key part of the code.