DSP: Move Equalizer processor to DSP node chain
The last of the built-in processors is now in the threaded processing chain, and all DSPs are marked high priority and with short buffers. Signed-off-by: Christopher Snowhill <kode54@gmail.com>
This commit is contained in:
parent
dc0a44067a
commit
26efcda71a
10 changed files with 500 additions and 224 deletions
|
@ -13,6 +13,7 @@
|
||||||
#import "DSPRubberbandNode.h"
|
#import "DSPRubberbandNode.h"
|
||||||
#import "DSPFSurroundNode.h"
|
#import "DSPFSurroundNode.h"
|
||||||
#import "DSPHRTFNode.h"
|
#import "DSPHRTFNode.h"
|
||||||
|
#import "DSPEqualizerNode.h"
|
||||||
#import "InputNode.h"
|
#import "InputNode.h"
|
||||||
|
|
||||||
@interface BufferChain : NSObject {
|
@interface BufferChain : NSObject {
|
||||||
|
@ -21,6 +22,7 @@
|
||||||
DSPRubberbandNode *rubberbandNode;
|
DSPRubberbandNode *rubberbandNode;
|
||||||
DSPFSurroundNode *fsurroundNode;
|
DSPFSurroundNode *fsurroundNode;
|
||||||
DSPHRTFNode *hrtfNode;
|
DSPHRTFNode *hrtfNode;
|
||||||
|
DSPEqualizerNode *equalizerNode;
|
||||||
|
|
||||||
NSURL *streamURL;
|
NSURL *streamURL;
|
||||||
id userInfo;
|
id userInfo;
|
||||||
|
@ -83,6 +85,8 @@
|
||||||
|
|
||||||
- (DSPHRTFNode *)hrtf;
|
- (DSPHRTFNode *)hrtf;
|
||||||
|
|
||||||
|
- (DSPEqualizerNode *)equalizer;
|
||||||
|
|
||||||
- (double)secondsBuffered;
|
- (double)secondsBuffered;
|
||||||
|
|
||||||
- (void)sustainHDCD;
|
- (void)sustainHDCD;
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
|
|
||||||
rubberbandNode = nil;
|
rubberbandNode = nil;
|
||||||
fsurroundNode = nil;
|
fsurroundNode = nil;
|
||||||
|
equalizerNode = nil;
|
||||||
hrtfNode = nil;
|
hrtfNode = nil;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,7 +43,8 @@
|
||||||
converterNode = [[ConverterNode alloc] initWithController:self previous:inputNode];
|
converterNode = [[ConverterNode alloc] initWithController:self previous:inputNode];
|
||||||
rubberbandNode = [[DSPRubberbandNode alloc] initWithController:self previous:converterNode latency:0.03];
|
rubberbandNode = [[DSPRubberbandNode alloc] initWithController:self previous:converterNode latency:0.03];
|
||||||
fsurroundNode = [[DSPFSurroundNode alloc] initWithController:self previous:rubberbandNode latency:0.03];
|
fsurroundNode = [[DSPFSurroundNode alloc] initWithController:self previous:rubberbandNode latency:0.03];
|
||||||
hrtfNode = [[DSPHRTFNode alloc] initWithController:self previous:fsurroundNode latency:0.03];
|
equalizerNode = [[DSPEqualizerNode alloc] initWithController:self previous:fsurroundNode latency:0.03];
|
||||||
|
hrtfNode = [[DSPHRTFNode alloc] initWithController:self previous:equalizerNode latency:0.03];
|
||||||
|
|
||||||
finalNode = hrtfNode;
|
finalNode = hrtfNode;
|
||||||
}
|
}
|
||||||
|
@ -155,6 +157,7 @@
|
||||||
[converterNode launchThread];
|
[converterNode launchThread];
|
||||||
[rubberbandNode launchThread];
|
[rubberbandNode launchThread];
|
||||||
[fsurroundNode launchThread];
|
[fsurroundNode launchThread];
|
||||||
|
[equalizerNode launchThread];
|
||||||
[hrtfNode launchThread];
|
[hrtfNode launchThread];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -225,6 +228,7 @@
|
||||||
[converterNode setShouldContinue:s];
|
[converterNode setShouldContinue:s];
|
||||||
[rubberbandNode setShouldContinue:s];
|
[rubberbandNode setShouldContinue:s];
|
||||||
[fsurroundNode setShouldContinue:s];
|
[fsurroundNode setShouldContinue:s];
|
||||||
|
[equalizerNode setShouldContinue:s];
|
||||||
[hrtfNode setShouldContinue:s];
|
[hrtfNode setShouldContinue:s];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -256,6 +260,10 @@
|
||||||
return hrtfNode;
|
return hrtfNode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
- (DSPEqualizerNode *)equalizer {
|
||||||
|
return equalizerNode;
|
||||||
|
}
|
||||||
|
|
||||||
- (AudioStreamBasicDescription)inputFormat {
|
- (AudioStreamBasicDescription)inputFormat {
|
||||||
return [inputNode nodeFormat];
|
return [inputNode nodeFormat];
|
||||||
}
|
}
|
||||||
|
|
29
Audio/Chain/DSP/DSPEqualizerNode.h
Normal file
29
Audio/Chain/DSP/DSPEqualizerNode.h
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
//
|
||||||
|
// DSPEqualizerNode.h
|
||||||
|
// CogAudio
|
||||||
|
//
|
||||||
|
// Created by Christopher Snowhill on 2/11/25.
|
||||||
|
//
|
||||||
|
|
||||||
|
#ifndef DSPEqualizerNode_h
|
||||||
|
#define DSPEqualizerNode_h
|
||||||
|
|
||||||
|
#import "DSPNode.h"
|
||||||
|
|
||||||
|
@interface DSPEqualizerNode : DSPNode {
|
||||||
|
float *samplePtr;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (id _Nullable)initWithController:(id _Nonnull)c previous:(id _Nullable)p latency:(double)latency;
|
||||||
|
|
||||||
|
- (BOOL)setup;
|
||||||
|
- (void)cleanUp;
|
||||||
|
|
||||||
|
- (void)resetBuffer;
|
||||||
|
|
||||||
|
- (void)process;
|
||||||
|
- (AudioChunk * _Nullable)convert;
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
#endif /* DSPEqualizerNode_h */
|
447
Audio/Chain/DSP/DSPEqualizerNode.m
Normal file
447
Audio/Chain/DSP/DSPEqualizerNode.m
Normal file
|
@ -0,0 +1,447 @@
|
||||||
|
//
|
||||||
|
// DSPEqualizerNode.m
|
||||||
|
// CogAudio Framework
|
||||||
|
//
|
||||||
|
// Created by Christopher Snowhill on 2/11/25.
|
||||||
|
//
|
||||||
|
|
||||||
|
#import <Foundation/Foundation.h>
|
||||||
|
|
||||||
|
#import <AudioToolbox/AudioToolbox.h>
|
||||||
|
#import <AudioUnit/AudioUnit.h>
|
||||||
|
|
||||||
|
#import <Accelerate/Accelerate.h>
|
||||||
|
|
||||||
|
#import "DSPEqualizerNode.h"
|
||||||
|
|
||||||
|
#import "BufferChain.h"
|
||||||
|
|
||||||
|
#import "AudioPlayer.h"
|
||||||
|
|
||||||
|
extern void scale_by_volume(float *buffer, size_t count, float volume);
|
||||||
|
|
||||||
|
static void * kDSPEqualizerNodeContext = &kDSPEqualizerNodeContext;
|
||||||
|
|
||||||
|
@interface EQHookContainer : NSObject {
|
||||||
|
NSMutableArray *equalizers;
|
||||||
|
}
|
||||||
|
|
||||||
|
+ (EQHookContainer *)sharedContainer;
|
||||||
|
|
||||||
|
- (id)init;
|
||||||
|
|
||||||
|
- (void)pushEqualizer:(AudioUnit)eq forPlayer:(AudioPlayer *)audioPlayer;
|
||||||
|
- (void)popEqualizer:(AudioUnit)eq forPlayer:(AudioPlayer *)audioPlayer;
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@implementation EQHookContainer
|
||||||
|
|
||||||
|
static EQHookContainer *theContainer = nil;
|
||||||
|
|
||||||
|
+ (EQHookContainer *)sharedContainer {
|
||||||
|
@synchronized(theContainer) {
|
||||||
|
if(!theContainer) {
|
||||||
|
theContainer = [[EQHookContainer alloc] init];
|
||||||
|
}
|
||||||
|
return theContainer;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- (id)init {
|
||||||
|
self = [super init];
|
||||||
|
if(self) {
|
||||||
|
equalizers = [[NSMutableArray alloc] init];
|
||||||
|
}
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)pushEqualizer:(AudioUnit)eq forPlayer:(AudioPlayer *)audioPlayer {
|
||||||
|
@synchronized (equalizers) {
|
||||||
|
[equalizers addObject:@((uintptr_t)eq)];
|
||||||
|
if([equalizers count] == 1) {
|
||||||
|
[audioPlayer beginEqualizer:eq];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)popEqualizer:(AudioUnit)eq forPlayer:(AudioPlayer *)audioPlayer {
|
||||||
|
@synchronized (equalizers) {
|
||||||
|
uintptr_t _eq = [[equalizers objectAtIndex:0] unsignedIntegerValue];
|
||||||
|
if(eq == (AudioUnit)_eq) {
|
||||||
|
[equalizers removeObject:@(_eq)];
|
||||||
|
if([equalizers count]) {
|
||||||
|
_eq = [[equalizers objectAtIndex:0] unsignedIntegerValue];
|
||||||
|
[audioPlayer beginEqualizer:(AudioUnit)_eq];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@implementation DSPEqualizerNode {
|
||||||
|
BOOL enableEqualizer;
|
||||||
|
BOOL equalizerInitialized;
|
||||||
|
|
||||||
|
double equalizerPreamp;
|
||||||
|
|
||||||
|
AudioUnit _eq;
|
||||||
|
|
||||||
|
AudioTimeStamp timeStamp;
|
||||||
|
|
||||||
|
BOOL stopping, paused;
|
||||||
|
BOOL processEntered;
|
||||||
|
|
||||||
|
BOOL observersapplied;
|
||||||
|
|
||||||
|
AudioStreamBasicDescription lastInputFormat;
|
||||||
|
AudioStreamBasicDescription inputFormat;
|
||||||
|
|
||||||
|
uint32_t lastInputChannelConfig, inputChannelConfig;
|
||||||
|
uint32_t outputChannelConfig;
|
||||||
|
|
||||||
|
float inBuffer[4096 * 32];
|
||||||
|
float eqBuffer[4096 * 32];
|
||||||
|
float outBuffer[4096 * 32];
|
||||||
|
}
|
||||||
|
|
||||||
|
static void fillBuffers(AudioBufferList *ioData, const float *inbuffer, size_t count, size_t offset) {
|
||||||
|
const size_t channels = ioData->mNumberBuffers;
|
||||||
|
for(int i = 0; i < channels; ++i) {
|
||||||
|
const size_t maxCount = (ioData->mBuffers[i].mDataByteSize / sizeof(float)) - offset;
|
||||||
|
float *output = ((float *)ioData->mBuffers[i].mData) + offset;
|
||||||
|
const float *input = inbuffer + i;
|
||||||
|
cblas_scopy((int)((count > maxCount) ? maxCount : count), input, (int)channels, output, 1);
|
||||||
|
ioData->mBuffers[i].mNumberChannels = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void clearBuffers(AudioBufferList *ioData, size_t count, size_t offset) {
|
||||||
|
for(int i = 0; i < ioData->mNumberBuffers; ++i) {
|
||||||
|
memset((uint8_t *)ioData->mBuffers[i].mData + offset * sizeof(float), 0, count * sizeof(float));
|
||||||
|
ioData->mBuffers[i].mNumberChannels = 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData) {
|
||||||
|
if(inNumberFrames > 4096 || !inRefCon) {
|
||||||
|
clearBuffers(ioData, inNumberFrames, 0);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
DSPEqualizerNode *_self = (__bridge DSPEqualizerNode *)inRefCon;
|
||||||
|
|
||||||
|
fillBuffers(ioData, _self->samplePtr, inNumberFrames, 0);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (id _Nullable)initWithController:(id _Nonnull)c previous:(id _Nullable)p latency:(double)latency {
|
||||||
|
self = [super initWithController:c previous:p latency:latency];
|
||||||
|
if(self) {
|
||||||
|
NSUserDefaults *defaults = [[NSUserDefaultsController sharedUserDefaultsController] defaults];
|
||||||
|
enableEqualizer = [defaults boolForKey:@"GraphicEQenable"];
|
||||||
|
|
||||||
|
float preamp = [defaults floatForKey:@"eqPreamp"];
|
||||||
|
equalizerPreamp = pow(10.0, preamp / 20.0);
|
||||||
|
|
||||||
|
[self addObservers];
|
||||||
|
}
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)dealloc {
|
||||||
|
[self cleanUp];
|
||||||
|
[self removeObservers];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)addObservers {
|
||||||
|
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.GraphicEQenable" options:0 context:kDSPEqualizerNodeContext];
|
||||||
|
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.eqPreamp" options:0 context:kDSPEqualizerNodeContext];
|
||||||
|
|
||||||
|
observersapplied = YES;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)removeObservers {
|
||||||
|
if(observersapplied) {
|
||||||
|
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.GraphicEQenable" context:kDSPEqualizerNodeContext];
|
||||||
|
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.eqPreamp" context:kDSPEqualizerNodeContext];
|
||||||
|
observersapplied = NO;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)observeValueForKeyPath:(NSString *)keyPath ofObject:(id)object change:(NSDictionary *)change context:(void *)context {
|
||||||
|
if(context != kDSPEqualizerNodeContext) {
|
||||||
|
[super observeValueForKeyPath:keyPath ofObject:object change:change context:context];
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if([keyPath isEqualToString:@"values.GraphicEQenable"]) {
|
||||||
|
NSUserDefaults *defaults = [[NSUserDefaultsController sharedUserDefaultsController] defaults];
|
||||||
|
enableEqualizer = [defaults boolForKey:@"GraphicEQenable"];
|
||||||
|
} else if([keyPath isEqualToString:@"values.eqPreamp"]) {
|
||||||
|
NSUserDefaults *defaults = [[NSUserDefaultsController sharedUserDefaultsController] defaults];
|
||||||
|
float preamp = [defaults floatForKey:@"eqPreamp"];
|
||||||
|
equalizerPreamp = pow(10.0, preamp / 20.0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- (AudioPlayer *)audioPlayer {
|
||||||
|
BufferChain *bufferChain = controller;
|
||||||
|
return [bufferChain controller];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (BOOL)fullInit {
|
||||||
|
if(enableEqualizer) {
|
||||||
|
AudioComponentDescription desc;
|
||||||
|
NSError *err;
|
||||||
|
|
||||||
|
desc.componentType = kAudioUnitType_Effect;
|
||||||
|
desc.componentSubType = kAudioUnitSubType_GraphicEQ;
|
||||||
|
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
|
||||||
|
desc.componentFlags = 0;
|
||||||
|
desc.componentFlagsMask = 0;
|
||||||
|
|
||||||
|
AudioComponent comp = NULL;
|
||||||
|
|
||||||
|
comp = AudioComponentFindNext(comp, &desc);
|
||||||
|
if(!comp) {
|
||||||
|
return NO;
|
||||||
|
}
|
||||||
|
|
||||||
|
OSStatus _err = AudioComponentInstanceNew(comp, &_eq);
|
||||||
|
if(err) {
|
||||||
|
return NO;
|
||||||
|
}
|
||||||
|
|
||||||
|
UInt32 value;
|
||||||
|
UInt32 size = sizeof(value);
|
||||||
|
|
||||||
|
value = 4096;
|
||||||
|
AudioUnitSetProperty(_eq, kAudioUnitProperty_MaximumFramesPerSlice,
|
||||||
|
kAudioUnitScope_Global, 0, &value, size);
|
||||||
|
|
||||||
|
value = 127;
|
||||||
|
AudioUnitSetProperty(_eq, kAudioUnitProperty_RenderQuality,
|
||||||
|
kAudioUnitScope_Global, 0, &value, size);
|
||||||
|
|
||||||
|
AURenderCallbackStruct callbackStruct;
|
||||||
|
callbackStruct.inputProcRefCon = (__bridge void *)self;
|
||||||
|
callbackStruct.inputProc = eqRenderCallback;
|
||||||
|
AudioUnitSetProperty(_eq, kAudioUnitProperty_SetRenderCallback,
|
||||||
|
kAudioUnitScope_Input, 0, &callbackStruct, sizeof(callbackStruct));
|
||||||
|
|
||||||
|
AudioUnitReset(_eq, kAudioUnitScope_Input, 0);
|
||||||
|
AudioUnitReset(_eq, kAudioUnitScope_Output, 0);
|
||||||
|
|
||||||
|
AudioUnitReset(_eq, kAudioUnitScope_Global, 0);
|
||||||
|
|
||||||
|
AudioStreamBasicDescription asbd = inputFormat;
|
||||||
|
|
||||||
|
// Of course, non-interleaved has only one sample per frame/packet, per buffer
|
||||||
|
asbd.mFormatFlags |= kAudioFormatFlagIsNonInterleaved;
|
||||||
|
asbd.mBytesPerFrame = sizeof(float);
|
||||||
|
asbd.mBytesPerPacket = sizeof(float);
|
||||||
|
asbd.mFramesPerPacket = 1;
|
||||||
|
|
||||||
|
UInt32 maximumFrames = 4096;
|
||||||
|
AudioUnitSetProperty(_eq, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maximumFrames, sizeof(maximumFrames));
|
||||||
|
|
||||||
|
AudioUnitSetProperty(_eq, kAudioUnitProperty_StreamFormat,
|
||||||
|
kAudioUnitScope_Input, 0, &asbd, sizeof(asbd));
|
||||||
|
|
||||||
|
AudioUnitSetProperty(_eq, kAudioUnitProperty_StreamFormat,
|
||||||
|
kAudioUnitScope_Output, 0, &asbd, sizeof(asbd));
|
||||||
|
AudioUnitReset(_eq, kAudioUnitScope_Input, 0);
|
||||||
|
AudioUnitReset(_eq, kAudioUnitScope_Output, 0);
|
||||||
|
|
||||||
|
AudioUnitReset(_eq, kAudioUnitScope_Global, 0);
|
||||||
|
|
||||||
|
_err = AudioUnitInitialize(_eq);
|
||||||
|
if(_err != noErr) {
|
||||||
|
return NO;
|
||||||
|
}
|
||||||
|
|
||||||
|
bzero(&timeStamp, sizeof(timeStamp));
|
||||||
|
timeStamp.mFlags = kAudioTimeStampSampleTimeValid;
|
||||||
|
|
||||||
|
equalizerInitialized = YES;
|
||||||
|
|
||||||
|
[[EQHookContainer sharedContainer] pushEqualizer:_eq forPlayer:[self audioPlayer]];
|
||||||
|
}
|
||||||
|
|
||||||
|
return YES;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)fullShutdown {
|
||||||
|
if(_eq) {
|
||||||
|
if(equalizerInitialized) {
|
||||||
|
[[EQHookContainer sharedContainer] popEqualizer:_eq forPlayer:[self audioPlayer]];
|
||||||
|
AudioUnitUninitialize(_eq);
|
||||||
|
equalizerInitialized = NO;
|
||||||
|
}
|
||||||
|
AudioComponentInstanceDispose(_eq);
|
||||||
|
_eq = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- (BOOL)setup {
|
||||||
|
if(stopping)
|
||||||
|
return NO;
|
||||||
|
[self fullShutdown];
|
||||||
|
return [self fullInit];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)cleanUp {
|
||||||
|
stopping = YES;
|
||||||
|
while(processEntered) {
|
||||||
|
usleep(1000);
|
||||||
|
}
|
||||||
|
[self fullShutdown];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)resetBuffer {
|
||||||
|
paused = YES;
|
||||||
|
while(processEntered) {
|
||||||
|
usleep(500);
|
||||||
|
}
|
||||||
|
[super resetBuffer];
|
||||||
|
[self fullShutdown];
|
||||||
|
paused = NO;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)process {
|
||||||
|
while([self shouldContinue] == YES) {
|
||||||
|
if(paused) {
|
||||||
|
usleep(500);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
@autoreleasepool {
|
||||||
|
AudioChunk *chunk = nil;
|
||||||
|
chunk = [self convert];
|
||||||
|
if(!chunk) {
|
||||||
|
if([self endOfStream] == YES) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if(paused) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
[self writeChunk:chunk];
|
||||||
|
chunk = nil;
|
||||||
|
}
|
||||||
|
if(!enableEqualizer && equalizerInitialized) {
|
||||||
|
[self fullShutdown];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- (AudioChunk *)convert {
|
||||||
|
if(stopping)
|
||||||
|
return nil;
|
||||||
|
|
||||||
|
processEntered = YES;
|
||||||
|
|
||||||
|
if(stopping || [self endOfStream] == YES || [self shouldContinue] == NO) {
|
||||||
|
processEntered = NO;
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(![self peekFormat:&inputFormat channelConfig:&inputChannelConfig]) {
|
||||||
|
processEntered = NO;
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
if((enableEqualizer && !equalizerInitialized) ||
|
||||||
|
memcmp(&inputFormat, &lastInputFormat, sizeof(inputFormat)) != 0 ||
|
||||||
|
inputChannelConfig != lastInputChannelConfig) {
|
||||||
|
lastInputFormat = inputFormat;
|
||||||
|
lastInputChannelConfig = inputChannelConfig;
|
||||||
|
[self fullShutdown];
|
||||||
|
if(![self setup]) {
|
||||||
|
processEntered = NO;
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if(!equalizerInitialized) {
|
||||||
|
processEntered = NO;
|
||||||
|
return [self readChunk:4096];
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t totalFrameCount = 0;
|
||||||
|
AudioChunk *chunk;
|
||||||
|
|
||||||
|
samplePtr = &inBuffer[0];
|
||||||
|
size_t channels = inputFormat.mChannelsPerFrame;
|
||||||
|
|
||||||
|
while(!stopping && totalFrameCount < 4096) {
|
||||||
|
AudioStreamBasicDescription newInputFormat;
|
||||||
|
uint32_t newChannelConfig;
|
||||||
|
if(![self peekFormat:&newInputFormat channelConfig:&newChannelConfig] ||
|
||||||
|
memcmp(&newInputFormat, &inputFormat, sizeof(newInputFormat)) != 0 ||
|
||||||
|
newChannelConfig != inputChannelConfig) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk = [self readChunkAsFloat32:4096 - totalFrameCount];
|
||||||
|
if(!chunk) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t frameCount = [chunk frameCount];
|
||||||
|
NSData *sampleData = [chunk removeSamples:frameCount];
|
||||||
|
|
||||||
|
cblas_scopy((int)(frameCount * channels), [sampleData bytes], 1, &inBuffer[totalFrameCount * channels], 1);
|
||||||
|
|
||||||
|
totalFrameCount += frameCount;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(!totalFrameCount) {
|
||||||
|
processEntered = NO;
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
const size_t channelsminusone = channels - 1;
|
||||||
|
uint8_t tempBuffer[sizeof(AudioBufferList) + sizeof(AudioBuffer) * channelsminusone];
|
||||||
|
AudioBufferList *ioData = (AudioBufferList *)&tempBuffer[0];
|
||||||
|
|
||||||
|
ioData->mNumberBuffers = (UInt32)channels;
|
||||||
|
for(size_t i = 0; i < channels; ++i) {
|
||||||
|
ioData->mBuffers[i].mData = &eqBuffer[4096 * i];
|
||||||
|
ioData->mBuffers[i].mDataByteSize = (UInt32)(totalFrameCount * sizeof(float));
|
||||||
|
ioData->mBuffers[i].mNumberChannels = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
OSStatus status = AudioUnitRender(_eq, NULL, &timeStamp, 0, (UInt32)totalFrameCount, ioData);
|
||||||
|
|
||||||
|
if(status != noErr) {
|
||||||
|
processEntered = NO;
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
timeStamp.mSampleTime += ((double)totalFrameCount) / inputFormat.mSampleRate;
|
||||||
|
|
||||||
|
for(int i = 0; i < channels; ++i) {
|
||||||
|
cblas_scopy((int)totalFrameCount, &eqBuffer[4096 * i], 1, &outBuffer[i], (int)channels);
|
||||||
|
}
|
||||||
|
|
||||||
|
AudioChunk *outputChunk = nil;
|
||||||
|
if(totalFrameCount) {
|
||||||
|
scale_by_volume(&outBuffer[0], totalFrameCount * channels, equalizerPreamp);
|
||||||
|
|
||||||
|
outputChunk = [[AudioChunk alloc] init];
|
||||||
|
[outputChunk setFormat:inputFormat];
|
||||||
|
if(outputChannelConfig) {
|
||||||
|
[outputChunk setChannelConfig:inputChannelConfig];
|
||||||
|
}
|
||||||
|
[outputChunk assignSamples:&outBuffer[0] frameCount:totalFrameCount];
|
||||||
|
}
|
||||||
|
|
||||||
|
processEntered = NO;
|
||||||
|
return outputChunk;
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
|
@ -163,6 +163,7 @@ static void *kInputNodeContext = &kInputNodeContext;
|
||||||
ConverterNode *converter = [bufferChain converter];
|
ConverterNode *converter = [bufferChain converter];
|
||||||
DSPRubberbandNode *rubberband = [bufferChain rubberband];
|
DSPRubberbandNode *rubberband = [bufferChain rubberband];
|
||||||
DSPFSurroundNode *fsurround = [bufferChain fsurround];
|
DSPFSurroundNode *fsurround = [bufferChain fsurround];
|
||||||
|
DSPEqualizerNode *equalizer = [bufferChain equalizer];
|
||||||
DSPHRTFNode *hrtf = [bufferChain hrtf];
|
DSPHRTFNode *hrtf = [bufferChain hrtf];
|
||||||
DLog(@"SEEKING! Resetting Buffer");
|
DLog(@"SEEKING! Resetting Buffer");
|
||||||
|
|
||||||
|
@ -172,6 +173,7 @@ static void *kInputNodeContext = &kInputNodeContext;
|
||||||
[converter inputFormatDidChange:[bufferChain inputFormat] inputConfig:[bufferChain inputConfig]];
|
[converter inputFormatDidChange:[bufferChain inputFormat] inputConfig:[bufferChain inputConfig]];
|
||||||
[rubberband resetBuffer];
|
[rubberband resetBuffer];
|
||||||
[fsurround resetBuffer];
|
[fsurround resetBuffer];
|
||||||
|
[equalizer resetBuffer];
|
||||||
[hrtf resetBuffer];
|
[hrtf resetBuffer];
|
||||||
|
|
||||||
DLog(@"Reset buffer!");
|
DLog(@"Reset buffer!");
|
||||||
|
|
|
@ -28,10 +28,6 @@
|
||||||
BOOL intervalReported;
|
BOOL intervalReported;
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)beginEqualizer:(AudioUnit)eq;
|
|
||||||
- (void)refreshEqualizer:(AudioUnit)eq;
|
|
||||||
- (void)endEqualizer:(AudioUnit)eq;
|
|
||||||
|
|
||||||
- (double)amountPlayed;
|
- (double)amountPlayed;
|
||||||
- (double)amountPlayedInterval;
|
- (double)amountPlayedInterval;
|
||||||
|
|
||||||
|
|
|
@ -179,18 +179,6 @@
|
||||||
return paused;
|
return paused;
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)beginEqualizer:(AudioUnit)eq {
|
|
||||||
[controller beginEqualizer:eq];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)refreshEqualizer:(AudioUnit)eq {
|
|
||||||
[controller refreshEqualizer:eq];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)endEqualizer:(AudioUnit)eq {
|
|
||||||
[controller endEqualizer:eq];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)sustainHDCD {
|
- (void)sustainHDCD {
|
||||||
[output sustainHDCD];
|
[output sustainHDCD];
|
||||||
}
|
}
|
||||||
|
|
|
@ -101,6 +101,8 @@
|
||||||
83B74281289E027F005AAC28 /* CogAudio-Bridging-Header.h in Headers */ = {isa = PBXBuildFile; fileRef = 83B74280289E027F005AAC28 /* CogAudio-Bridging-Header.h */; };
|
83B74281289E027F005AAC28 /* CogAudio-Bridging-Header.h in Headers */ = {isa = PBXBuildFile; fileRef = 83B74280289E027F005AAC28 /* CogAudio-Bridging-Header.h */; };
|
||||||
83F843202D5C6272008C123B /* HeadphoneFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 83F8431E2D5C6272008C123B /* HeadphoneFilter.h */; };
|
83F843202D5C6272008C123B /* HeadphoneFilter.h in Headers */ = {isa = PBXBuildFile; fileRef = 83F8431E2D5C6272008C123B /* HeadphoneFilter.h */; };
|
||||||
83F843212D5C6272008C123B /* HeadphoneFilter.mm in Sources */ = {isa = PBXBuildFile; fileRef = 83F8431F2D5C6272008C123B /* HeadphoneFilter.mm */; };
|
83F843212D5C6272008C123B /* HeadphoneFilter.mm in Sources */ = {isa = PBXBuildFile; fileRef = 83F8431F2D5C6272008C123B /* HeadphoneFilter.mm */; };
|
||||||
|
83F843232D5C66DA008C123B /* DSPEqualizerNode.h in Headers */ = {isa = PBXBuildFile; fileRef = 83F843222D5C66DA008C123B /* DSPEqualizerNode.h */; };
|
||||||
|
83F843252D5C66E9008C123B /* DSPEqualizerNode.m in Sources */ = {isa = PBXBuildFile; fileRef = 83F843242D5C66E9008C123B /* DSPEqualizerNode.m */; };
|
||||||
83FFED512D5B08BC0044CCAF /* DSPNode.h in Headers */ = {isa = PBXBuildFile; fileRef = 83FFED502D5B08BC0044CCAF /* DSPNode.h */; };
|
83FFED512D5B08BC0044CCAF /* DSPNode.h in Headers */ = {isa = PBXBuildFile; fileRef = 83FFED502D5B08BC0044CCAF /* DSPNode.h */; };
|
||||||
83FFED532D5B09320044CCAF /* DSPNode.m in Sources */ = {isa = PBXBuildFile; fileRef = 83FFED522D5B09320044CCAF /* DSPNode.m */; };
|
83FFED532D5B09320044CCAF /* DSPNode.m in Sources */ = {isa = PBXBuildFile; fileRef = 83FFED522D5B09320044CCAF /* DSPNode.m */; };
|
||||||
8DC2EF570486A6940098B216 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1058C7B1FEA5585E11CA2CBB /* Cocoa.framework */; };
|
8DC2EF570486A6940098B216 /* Cocoa.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 1058C7B1FEA5585E11CA2CBB /* Cocoa.framework */; };
|
||||||
|
@ -223,6 +225,8 @@
|
||||||
83B74280289E027F005AAC28 /* CogAudio-Bridging-Header.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "CogAudio-Bridging-Header.h"; sourceTree = "<group>"; };
|
83B74280289E027F005AAC28 /* CogAudio-Bridging-Header.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "CogAudio-Bridging-Header.h"; sourceTree = "<group>"; };
|
||||||
83F8431E2D5C6272008C123B /* HeadphoneFilter.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = HeadphoneFilter.h; sourceTree = "<group>"; };
|
83F8431E2D5C6272008C123B /* HeadphoneFilter.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = HeadphoneFilter.h; sourceTree = "<group>"; };
|
||||||
83F8431F2D5C6272008C123B /* HeadphoneFilter.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = HeadphoneFilter.mm; sourceTree = "<group>"; };
|
83F8431F2D5C6272008C123B /* HeadphoneFilter.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = HeadphoneFilter.mm; sourceTree = "<group>"; };
|
||||||
|
83F843222D5C66DA008C123B /* DSPEqualizerNode.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = DSPEqualizerNode.h; sourceTree = "<group>"; };
|
||||||
|
83F843242D5C66E9008C123B /* DSPEqualizerNode.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = DSPEqualizerNode.m; sourceTree = "<group>"; };
|
||||||
83FFED502D5B08BC0044CCAF /* DSPNode.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = DSPNode.h; sourceTree = "<group>"; };
|
83FFED502D5B08BC0044CCAF /* DSPNode.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = DSPNode.h; sourceTree = "<group>"; };
|
||||||
83FFED522D5B09320044CCAF /* DSPNode.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = DSPNode.m; sourceTree = "<group>"; };
|
83FFED522D5B09320044CCAF /* DSPNode.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = DSPNode.m; sourceTree = "<group>"; };
|
||||||
8DC2EF5A0486A6940098B216 /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist; path = Info.plist; sourceTree = "<group>"; };
|
8DC2EF5A0486A6940098B216 /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist; path = Info.plist; sourceTree = "<group>"; };
|
||||||
|
@ -538,6 +542,8 @@
|
||||||
83A3496E2D5C405E0096D530 /* DSPFSurroundNode.m */,
|
83A3496E2D5C405E0096D530 /* DSPFSurroundNode.m */,
|
||||||
83A349742D5C50A10096D530 /* DSPHRTFNode.h */,
|
83A349742D5C50A10096D530 /* DSPHRTFNode.h */,
|
||||||
83A349762D5C50B20096D530 /* DSPHRTFNode.m */,
|
83A349762D5C50B20096D530 /* DSPHRTFNode.m */,
|
||||||
|
83F843222D5C66DA008C123B /* DSPEqualizerNode.h */,
|
||||||
|
83F843242D5C66E9008C123B /* DSPEqualizerNode.m */,
|
||||||
);
|
);
|
||||||
path = DSP;
|
path = DSP;
|
||||||
sourceTree = "<group>";
|
sourceTree = "<group>";
|
||||||
|
@ -558,6 +564,7 @@
|
||||||
17D21CA50B8BE4BA00D1EBDE /* InputNode.h in Headers */,
|
17D21CA50B8BE4BA00D1EBDE /* InputNode.h in Headers */,
|
||||||
834A41A9287A90AB00EB9D9B /* freesurround_decoder.h in Headers */,
|
834A41A9287A90AB00EB9D9B /* freesurround_decoder.h in Headers */,
|
||||||
835DD2732ACAF5AD0057E319 /* util.h in Headers */,
|
835DD2732ACAF5AD0057E319 /* util.h in Headers */,
|
||||||
|
83F843232D5C66DA008C123B /* DSPEqualizerNode.h in Headers */,
|
||||||
17D21CA70B8BE4BA00D1EBDE /* Node.h in Headers */,
|
17D21CA70B8BE4BA00D1EBDE /* Node.h in Headers */,
|
||||||
8399CF2C27B5D1D5008751F1 /* NSDictionary+Merge.h in Headers */,
|
8399CF2C27B5D1D5008751F1 /* NSDictionary+Merge.h in Headers */,
|
||||||
17D21CA90B8BE4BA00D1EBDE /* OutputNode.h in Headers */,
|
17D21CA90B8BE4BA00D1EBDE /* OutputNode.h in Headers */,
|
||||||
|
@ -680,6 +687,7 @@
|
||||||
83A3496A2D5C3F430096D530 /* DSPRubberbandNode.m in Sources */,
|
83A3496A2D5C3F430096D530 /* DSPRubberbandNode.m in Sources */,
|
||||||
83504166286447DA006B32CC /* Downmix.m in Sources */,
|
83504166286447DA006B32CC /* Downmix.m in Sources */,
|
||||||
8399CF2D27B5D1D5008751F1 /* NSDictionary+Merge.m in Sources */,
|
8399CF2D27B5D1D5008751F1 /* NSDictionary+Merge.m in Sources */,
|
||||||
|
83F843252D5C66E9008C123B /* DSPEqualizerNode.m in Sources */,
|
||||||
834A41AB287A90AB00EB9D9B /* channelmaps.cpp in Sources */,
|
834A41AB287A90AB00EB9D9B /* channelmaps.cpp in Sources */,
|
||||||
831A50162865A8800049CFE4 /* rsstate.cpp in Sources */,
|
831A50162865A8800049CFE4 /* rsstate.cpp in Sources */,
|
||||||
17D21CA80B8BE4BA00D1EBDE /* Node.m in Sources */,
|
17D21CA80B8BE4BA00D1EBDE /* Node.m in Sources */,
|
||||||
|
|
|
@ -99,19 +99,13 @@ using std::atomic_long;
|
||||||
|
|
||||||
AUAudioUnit *_au;
|
AUAudioUnit *_au;
|
||||||
|
|
||||||
AudioTimeStamp timeStamp;
|
|
||||||
|
|
||||||
size_t _bufferSize;
|
size_t _bufferSize;
|
||||||
|
|
||||||
AudioUnit _eq;
|
|
||||||
|
|
||||||
DownmixProcessor *downmixer;
|
DownmixProcessor *downmixer;
|
||||||
DownmixProcessor *downmixerForVis;
|
DownmixProcessor *downmixerForVis;
|
||||||
|
|
||||||
VisualizationController *visController;
|
VisualizationController *visController;
|
||||||
|
|
||||||
int inputBufferLastTime;
|
|
||||||
|
|
||||||
int inputRemain;
|
int inputRemain;
|
||||||
|
|
||||||
AudioChunk *chunkRemain;
|
AudioChunk *chunkRemain;
|
||||||
|
@ -125,15 +119,12 @@ using std::atomic_long;
|
||||||
float *samplePtr;
|
float *samplePtr;
|
||||||
float tempBuffer[512 * 32];
|
float tempBuffer[512 * 32];
|
||||||
float inputBuffer[4096 * 32]; // 4096 samples times maximum supported channel count
|
float inputBuffer[4096 * 32]; // 4096 samples times maximum supported channel count
|
||||||
float eqBuffer[4096 * 32];
|
|
||||||
float eqOutBuffer[4096 * 32];
|
|
||||||
float downmixBuffer[4096 * 8];
|
float downmixBuffer[4096 * 8];
|
||||||
|
|
||||||
float visAudio[4096];
|
float visAudio[4096];
|
||||||
float visResamplerInput[8192];
|
float visResamplerInput[8192];
|
||||||
float visTemp[8192];
|
float visTemp[8192];
|
||||||
|
|
||||||
|
|
||||||
#ifdef OUTPUT_LOG
|
#ifdef OUTPUT_LOG
|
||||||
FILE *_logFile;
|
FILE *_logFile;
|
||||||
#endif
|
#endif
|
||||||
|
@ -153,8 +144,6 @@ using std::atomic_long;
|
||||||
|
|
||||||
- (void)setVolume:(double)v;
|
- (void)setVolume:(double)v;
|
||||||
|
|
||||||
- (void)setEqualizerEnabled:(BOOL)enabled;
|
|
||||||
|
|
||||||
- (void)setShouldPlayOutBuffer:(BOOL)enabled;
|
- (void)setShouldPlayOutBuffer:(BOOL)enabled;
|
||||||
|
|
||||||
- (void)sustainHDCD;
|
- (void)sustainHDCD;
|
||||||
|
|
|
@ -27,37 +27,6 @@ static NSString *CogPlaybackDidBeginNotificiation = @"CogPlaybackDidBeginNotific
|
||||||
|
|
||||||
static void *kOutputCoreAudioContext = &kOutputCoreAudioContext;
|
static void *kOutputCoreAudioContext = &kOutputCoreAudioContext;
|
||||||
|
|
||||||
static void fillBuffers(AudioBufferList *ioData, const float *inbuffer, size_t count, size_t offset) {
|
|
||||||
const size_t channels = ioData->mNumberBuffers;
|
|
||||||
for(int i = 0; i < channels; ++i) {
|
|
||||||
const size_t maxCount = (ioData->mBuffers[i].mDataByteSize / sizeof(float)) - offset;
|
|
||||||
float *output = ((float *)ioData->mBuffers[i].mData) + offset;
|
|
||||||
const float *input = inbuffer + i;
|
|
||||||
cblas_scopy((int)((count > maxCount) ? maxCount : count), input, (int)channels, output, 1);
|
|
||||||
ioData->mBuffers[i].mNumberChannels = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void clearBuffers(AudioBufferList *ioData, size_t count, size_t offset) {
|
|
||||||
for(int i = 0; i < ioData->mNumberBuffers; ++i) {
|
|
||||||
memset((uint8_t *)ioData->mBuffers[i].mData + offset * sizeof(float), 0, count * sizeof(float));
|
|
||||||
ioData->mBuffers[i].mNumberChannels = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData) {
|
|
||||||
if(inNumberFrames > 4096 || !inRefCon) {
|
|
||||||
clearBuffers(ioData, inNumberFrames, 0);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
OutputCoreAudio *_self = (__bridge OutputCoreAudio *)inRefCon;
|
|
||||||
|
|
||||||
fillBuffers(ioData, _self->samplePtr, inNumberFrames, 0);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (int)renderInput:(int)amountToRead toBuffer:(float *)buffer {
|
- (int)renderInput:(int)amountToRead toBuffer:(float *)buffer {
|
||||||
int amountRead = 0;
|
int amountRead = 0;
|
||||||
|
|
||||||
|
@ -228,10 +197,6 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if(eqEnabled) {
|
|
||||||
volumeScale *= eqPreamp;
|
|
||||||
}
|
|
||||||
|
|
||||||
scale_by_volume(&buffer[0], amountRead * realStreamFormat.mChannelsPerFrame, volumeScale * volume);
|
scale_by_volume(&buffer[0], amountRead * realStreamFormat.mChannelsPerFrame, volumeScale * volume);
|
||||||
|
|
||||||
return amountRead;
|
return amountRead;
|
||||||
|
@ -292,10 +257,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
NSDictionary *device = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] objectForKey:@"outputDevice"];
|
NSDictionary *device = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] objectForKey:@"outputDevice"];
|
||||||
|
|
||||||
[self setOutputDeviceWithDeviceDict:device];
|
[self setOutputDeviceWithDeviceDict:device];
|
||||||
} else if([keyPath isEqualToString:@"values.GraphicEQenable"]) {
|
|
||||||
BOOL enabled = [[[[NSUserDefaultsController sharedUserDefaultsController] defaults] objectForKey:@"GraphicEQenable"] boolValue];
|
|
||||||
|
|
||||||
[self setEqualizerEnabled:enabled];
|
|
||||||
} else if([keyPath isEqualToString:@"values.eqPreamp"]) {
|
} else if([keyPath isEqualToString:@"values.eqPreamp"]) {
|
||||||
float preamp = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] floatForKey:@"eqPreamp"];
|
float preamp = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] floatForKey:@"eqPreamp"];
|
||||||
eqPreamp = pow(10.0, preamp / 20.0);
|
eqPreamp = pow(10.0, preamp / 20.0);
|
||||||
|
@ -645,22 +606,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
return NO;
|
return NO;
|
||||||
|
|
||||||
[outputController setFormat:&deviceFormat channelConfig:deviceChannelConfig];
|
[outputController setFormat:&deviceFormat channelConfig:deviceChannelConfig];
|
||||||
|
|
||||||
AudioStreamBasicDescription asbd = deviceFormat;
|
|
||||||
|
|
||||||
asbd.mFormatFlags &= ~kAudioFormatFlagIsPacked;
|
|
||||||
|
|
||||||
AudioUnitSetProperty(_eq, kAudioUnitProperty_StreamFormat,
|
|
||||||
kAudioUnitScope_Input, 0, &asbd, sizeof(asbd));
|
|
||||||
|
|
||||||
AudioUnitSetProperty(_eq, kAudioUnitProperty_StreamFormat,
|
|
||||||
kAudioUnitScope_Output, 0, &asbd, sizeof(asbd));
|
|
||||||
AudioUnitReset(_eq, kAudioUnitScope_Input, 0);
|
|
||||||
AudioUnitReset(_eq, kAudioUnitScope_Output, 0);
|
|
||||||
|
|
||||||
AudioUnitReset(_eq, kAudioUnitScope_Global, 0);
|
|
||||||
|
|
||||||
eqEnabled = [[[[NSUserDefaultsController sharedUserDefaultsController] defaults] objectForKey:@"GraphicEQenable"] boolValue];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return YES;
|
return YES;
|
||||||
|
@ -720,45 +665,11 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
layout.mChannelLayoutTag = kAudioChannelLayoutTag_UseChannelBitmap;
|
layout.mChannelLayoutTag = kAudioChannelLayoutTag_UseChannelBitmap;
|
||||||
layout.mChannelBitmap = streamChannelConfig;
|
layout.mChannelBitmap = streamChannelConfig;
|
||||||
}
|
}
|
||||||
|
|
||||||
if(eqInitialized) {
|
|
||||||
AudioUnitUninitialize(_eq);
|
|
||||||
eqInitialized = NO;
|
|
||||||
}
|
|
||||||
|
|
||||||
AudioStreamBasicDescription asbd = streamFormat;
|
|
||||||
|
|
||||||
// Of course, non-interleaved has only one sample per frame/packet, per buffer
|
|
||||||
asbd.mFormatFlags |= kAudioFormatFlagIsNonInterleaved;
|
|
||||||
asbd.mBytesPerFrame = sizeof(float);
|
|
||||||
asbd.mBytesPerPacket = sizeof(float);
|
|
||||||
asbd.mFramesPerPacket = 1;
|
|
||||||
|
|
||||||
UInt32 maximumFrames = 4096;
|
|
||||||
AudioUnitSetProperty(_eq, kAudioUnitProperty_MaximumFramesPerSlice, kAudioUnitScope_Global, 0, &maximumFrames, sizeof(maximumFrames));
|
|
||||||
|
|
||||||
AudioUnitSetProperty(_eq, kAudioUnitProperty_StreamFormat,
|
|
||||||
kAudioUnitScope_Input, 0, &asbd, sizeof(asbd));
|
|
||||||
|
|
||||||
AudioUnitSetProperty(_eq, kAudioUnitProperty_StreamFormat,
|
|
||||||
kAudioUnitScope_Output, 0, &asbd, sizeof(asbd));
|
|
||||||
AudioUnitReset(_eq, kAudioUnitScope_Input, 0);
|
|
||||||
AudioUnitReset(_eq, kAudioUnitScope_Output, 0);
|
|
||||||
|
|
||||||
AudioUnitReset(_eq, kAudioUnitScope_Global, 0);
|
|
||||||
|
|
||||||
if(AudioUnitInitialize(_eq) != noErr) {
|
|
||||||
eqEnabled = NO;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
eqInitialized = YES;
|
|
||||||
|
|
||||||
eqEnabled = [[[[NSUserDefaultsController sharedUserDefaultsController] defaults] objectForKey:@"GraphicEQenable"] boolValue];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
- (int)renderAndConvert {
|
- (int)renderAndConvert {
|
||||||
OSStatus status;
|
OSStatus status;
|
||||||
int inputRendered = inputBufferLastTime;
|
int inputRendered = 0;
|
||||||
int bytesRendered = inputRendered * realStreamFormat.mBytesPerPacket;
|
int bytesRendered = inputRendered * realStreamFormat.mBytesPerPacket;
|
||||||
|
|
||||||
if(resetStreamFormat) {
|
if(resetStreamFormat) {
|
||||||
|
@ -788,42 +699,11 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
if([self processEndOfStream]) break;
|
if([self processEndOfStream]) break;
|
||||||
}
|
}
|
||||||
|
|
||||||
inputBufferLastTime = inputRendered;
|
|
||||||
|
|
||||||
int samplesRendered = inputRendered;
|
int samplesRendered = inputRendered;
|
||||||
|
|
||||||
samplePtr = &inputBuffer[0];
|
samplePtr = &inputBuffer[0];
|
||||||
|
|
||||||
if(samplesRendered) {
|
if(samplesRendered) {
|
||||||
if(eqEnabled && eqInitialized) {
|
|
||||||
const int channels = streamFormat.mChannelsPerFrame;
|
|
||||||
if(channels > 0) {
|
|
||||||
const size_t channelsminusone = channels - 1;
|
|
||||||
uint8_t tempBuffer[sizeof(AudioBufferList) + sizeof(AudioBuffer) * channelsminusone];
|
|
||||||
AudioBufferList *ioData = (AudioBufferList *)&tempBuffer[0];
|
|
||||||
|
|
||||||
ioData->mNumberBuffers = channels;
|
|
||||||
for(size_t i = 0; i < channels; ++i) {
|
|
||||||
ioData->mBuffers[i].mData = &eqBuffer[4096 * i];
|
|
||||||
ioData->mBuffers[i].mDataByteSize = samplesRendered * sizeof(float);
|
|
||||||
ioData->mBuffers[i].mNumberChannels = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
status = AudioUnitRender(_eq, NULL, &timeStamp, 0, samplesRendered, ioData);
|
|
||||||
|
|
||||||
if(status != noErr) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
timeStamp.mSampleTime += ((double)samplesRendered) / streamFormat.mSampleRate;
|
|
||||||
|
|
||||||
for(int i = 0; i < channels; ++i) {
|
|
||||||
cblas_scopy(samplesRendered, &eqBuffer[4096 * i], 1, &eqOutBuffer[i], channels);
|
|
||||||
}
|
|
||||||
samplePtr = &eqOutBuffer[0];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if(downmixer) {
|
if(downmixer) {
|
||||||
[downmixer process:samplePtr frameCount:samplesRendered output:&downmixBuffer[0]];
|
[downmixer process:samplePtr frameCount:samplesRendered output:&downmixBuffer[0]];
|
||||||
samplePtr = &downmixBuffer[0];
|
samplePtr = &downmixBuffer[0];
|
||||||
|
@ -836,8 +716,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
inputBufferLastTime = 0;
|
|
||||||
|
|
||||||
return samplesRendered;
|
return samplesRendered;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -907,8 +785,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
streamFormatChanged = NO;
|
streamFormatChanged = NO;
|
||||||
streamFormatStarted = NO;
|
streamFormatStarted = NO;
|
||||||
|
|
||||||
inputBufferLastTime = 0;
|
|
||||||
|
|
||||||
running = NO;
|
running = NO;
|
||||||
stopping = NO;
|
stopping = NO;
|
||||||
stopped = NO;
|
stopped = NO;
|
||||||
|
@ -960,50 +836,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
[self setOutputDeviceWithDeviceDict:nil];
|
[self setOutputDeviceWithDeviceDict:nil];
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioComponent comp = NULL;
|
|
||||||
|
|
||||||
desc.componentType = kAudioUnitType_Effect;
|
|
||||||
desc.componentSubType = kAudioUnitSubType_GraphicEQ;
|
|
||||||
|
|
||||||
comp = AudioComponentFindNext(comp, &desc);
|
|
||||||
if(!comp)
|
|
||||||
return NO;
|
|
||||||
|
|
||||||
OSStatus _err = AudioComponentInstanceNew(comp, &_eq);
|
|
||||||
if(err)
|
|
||||||
return NO;
|
|
||||||
|
|
||||||
UInt32 value;
|
|
||||||
UInt32 size = sizeof(value);
|
|
||||||
|
|
||||||
value = CHUNK_SIZE;
|
|
||||||
AudioUnitSetProperty(_eq, kAudioUnitProperty_MaximumFramesPerSlice,
|
|
||||||
kAudioUnitScope_Global, 0, &value, size);
|
|
||||||
|
|
||||||
value = 127;
|
|
||||||
AudioUnitSetProperty(_eq, kAudioUnitProperty_RenderQuality,
|
|
||||||
kAudioUnitScope_Global, 0, &value, size);
|
|
||||||
|
|
||||||
AURenderCallbackStruct callbackStruct;
|
|
||||||
callbackStruct.inputProcRefCon = (__bridge void *)self;
|
|
||||||
callbackStruct.inputProc = eqRenderCallback;
|
|
||||||
AudioUnitSetProperty(_eq, kAudioUnitProperty_SetRenderCallback,
|
|
||||||
kAudioUnitScope_Input, 0, &callbackStruct, sizeof(callbackStruct));
|
|
||||||
|
|
||||||
AudioUnitReset(_eq, kAudioUnitScope_Input, 0);
|
|
||||||
AudioUnitReset(_eq, kAudioUnitScope_Output, 0);
|
|
||||||
|
|
||||||
AudioUnitReset(_eq, kAudioUnitScope_Global, 0);
|
|
||||||
|
|
||||||
_err = AudioUnitInitialize(_eq);
|
|
||||||
if(_err)
|
|
||||||
return NO;
|
|
||||||
|
|
||||||
eqInitialized = YES;
|
|
||||||
|
|
||||||
[self setEqualizerEnabled:[[[[NSUserDefaultsController sharedUserDefaultsController] defaults] objectForKey:@"GraphicEQenable"] boolValue]];
|
|
||||||
|
|
||||||
[outputController beginEqualizer:_eq];
|
|
||||||
|
|
||||||
[self audioOutputBlock];
|
[self audioOutputBlock];
|
||||||
|
|
||||||
|
@ -1014,15 +846,11 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
visController = [VisualizationController sharedController];
|
visController = [VisualizationController sharedController];
|
||||||
|
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.outputDevice" options:0 context:kOutputCoreAudioContext];
|
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.outputDevice" options:0 context:kOutputCoreAudioContext];
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.GraphicEQenable" options:0 context:kOutputCoreAudioContext];
|
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.eqPreamp" options:(NSKeyValueObservingOptionInitial | NSKeyValueObservingOptionNew) context:kOutputCoreAudioContext];
|
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.eqPreamp" options:(NSKeyValueObservingOptionInitial | NSKeyValueObservingOptionNew) context:kOutputCoreAudioContext];
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.tempo" options:(NSKeyValueObservingOptionInitial | NSKeyValueObservingOptionNew) context:kOutputCoreAudioContext];
|
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.tempo" options:(NSKeyValueObservingOptionInitial | NSKeyValueObservingOptionNew) context:kOutputCoreAudioContext];
|
||||||
|
|
||||||
observersapplied = YES;
|
observersapplied = YES;
|
||||||
|
|
||||||
bzero(&timeStamp, sizeof(timeStamp));
|
|
||||||
timeStamp.mFlags = kAudioTimeStampSampleTimeValid;
|
|
||||||
|
|
||||||
return (err == nil);
|
return (err == nil);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1045,18 +873,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
volume = v * 0.01f;
|
volume = v * 0.01f;
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)setEqualizerEnabled:(BOOL)enabled {
|
|
||||||
if(enabled && !eqEnabled) {
|
|
||||||
if(_eq) {
|
|
||||||
AudioUnitReset(_eq, kAudioUnitScope_Input, 0);
|
|
||||||
AudioUnitReset(_eq, kAudioUnitScope_Output, 0);
|
|
||||||
AudioUnitReset(_eq, kAudioUnitScope_Global, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
eqEnabled = enabled;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (double)latency {
|
- (double)latency {
|
||||||
if(secondsLatency > 0) return secondsLatency;
|
if(secondsLatency > 0) return secondsLatency;
|
||||||
else return 0;
|
else return 0;
|
||||||
|
@ -1079,8 +895,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
stopInvoked = YES;
|
stopInvoked = YES;
|
||||||
if(observersapplied) {
|
if(observersapplied) {
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.outputDevice" context:kOutputCoreAudioContext];
|
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.outputDevice" context:kOutputCoreAudioContext];
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.GraphicEQenable" context:kOutputCoreAudioContext];
|
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.eqPreamp" context:kOutputCoreAudioContext];
|
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.tempo" context:kOutputCoreAudioContext];
|
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.tempo" context:kOutputCoreAudioContext];
|
||||||
observersapplied = NO;
|
observersapplied = NO;
|
||||||
}
|
}
|
||||||
|
@ -1128,15 +942,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
usleep(5000);
|
usleep(5000);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if(_eq) {
|
|
||||||
[outputController endEqualizer:_eq];
|
|
||||||
if(eqInitialized) {
|
|
||||||
AudioUnitUninitialize(_eq);
|
|
||||||
eqInitialized = NO;
|
|
||||||
}
|
|
||||||
AudioComponentInstanceDispose(_eq);
|
|
||||||
_eq = NULL;
|
|
||||||
}
|
|
||||||
if(downmixerForVis) {
|
if(downmixerForVis) {
|
||||||
downmixerForVis = nil;
|
downmixerForVis = nil;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue