From 1145bca0422a6d4c375239b94dd2b4b51e3d193d Mon Sep 17 00:00:00 2001 From: Christopher Snowhill Date: Sun, 3 Jul 2022 22:39:43 -0700 Subject: [PATCH] [Equalizer] Fix support for arbitrary channels The deinterleaved format was being specified incorrectly. Now it asks for the correct format, which is deinterleaved, and the bytes per frame or packet sizes are relative to a single channel's buffer, not all buffers. Oops, that could have been more clear in the documentation. Signed-off-by: Christopher Snowhill --- Audio/Output/OutputAVFoundation.m | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/Audio/Output/OutputAVFoundation.m b/Audio/Output/OutputAVFoundation.m index 09b9e4f7f..897d12cf0 100644 --- a/Audio/Output/OutputAVFoundation.m +++ b/Audio/Output/OutputAVFoundation.m @@ -648,9 +648,18 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons return; } + if(eqInitialized) { + AudioUnitUninitialize(_eq); + eqInitialized = NO; + } + AudioStreamBasicDescription asbd = streamFormat; - asbd.mFormatFlags &= ~kAudioFormatFlagIsPacked; + // Of course, non-interleaved has only one sample per frame/packet, per buffer + asbd.mFormatFlags |= kAudioFormatFlagIsNonInterleaved; + asbd.mBytesPerFrame = sizeof(float); + asbd.mBytesPerPacket = sizeof(float); + asbd.mFramesPerPacket = 1; UInt32 maximumFrames = 1024; AudioUnitSetProperty(_eq, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maximumFrames, sizeof(maximumFrames)); @@ -665,6 +674,12 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons AudioUnitReset(_eq, kAudioUnitScope_Global, 0); + if(AudioUnitInitialize(_eq) != noErr) { + eqEnabled = NO; + return; + } + eqInitialized = YES; + eqEnabled = [[[[NSUserDefaultsController sharedUserDefaultsController] defaults] objectForKey:@"GraphicEQenable"] boolValue]; } @@ -733,7 +748,7 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons } if(samplesRendered) { - if(eqEnabled) { + if(eqEnabled && eqInitialized) { const int channels = streamFormat.mChannelsPerFrame; if(channels > 0) { const size_t channelsminusone = channels - 1; @@ -743,7 +758,7 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons ioData->mNumberBuffers = channels; for(size_t i = 0; i < channels; ++i) { ioData->mBuffers[i].mData = &eqBuffer[1024 * i]; - ioData->mBuffers[i].mDataByteSize = 1024 * sizeof(float); + ioData->mBuffers[i].mDataByteSize = samplesRendered * sizeof(float); ioData->mBuffers[i].mNumberChannels = 1; }