Output: Move HRTF and downmix to very end of chain
Some checks failed
Check if Cog buildable / Build Universal Cog.app (push) Has been cancelled
Some checks failed
Check if Cog buildable / Build Universal Cog.app (push) Has been cancelled
Make the output device a node as well, so that its buffer can be pulled from by the HRTF and downmix nodes, which are then fed off by the output callback, to reduce the head tracking latency as much as possible. Signed-off-by: Christopher Snowhill <kode54@gmail.com>
This commit is contained in:
parent
8ecad92dc5
commit
93739d5a0d
3 changed files with 74 additions and 36 deletions
|
@ -13,10 +13,8 @@
|
||||||
|
|
||||||
#import "DSPRubberbandNode.h"
|
#import "DSPRubberbandNode.h"
|
||||||
#import "DSPFSurroundNode.h"
|
#import "DSPFSurroundNode.h"
|
||||||
#import "DSPHRTFNode.h"
|
|
||||||
#import "DSPEqualizerNode.h"
|
#import "DSPEqualizerNode.h"
|
||||||
#import "VisualizationNode.h"
|
#import "VisualizationNode.h"
|
||||||
#import "DSPDownmixNode.h"
|
|
||||||
|
|
||||||
#import "Logging.h"
|
#import "Logging.h"
|
||||||
|
|
||||||
|
@ -27,9 +25,7 @@
|
||||||
|
|
||||||
DSPRubberbandNode *rubberbandNode;
|
DSPRubberbandNode *rubberbandNode;
|
||||||
DSPFSurroundNode *fsurroundNode;
|
DSPFSurroundNode *fsurroundNode;
|
||||||
DSPHRTFNode *hrtfNode;
|
|
||||||
DSPEqualizerNode *equalizerNode;
|
DSPEqualizerNode *equalizerNode;
|
||||||
DSPDownmixNode *downmixNode;
|
|
||||||
VisualizationNode *visualizationNode;
|
VisualizationNode *visualizationNode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,13 +57,9 @@
|
||||||
if(!fsurroundNode) return NO;
|
if(!fsurroundNode) return NO;
|
||||||
equalizerNode = [[DSPEqualizerNode alloc] initWithController:self previous:fsurroundNode latency:0.03];
|
equalizerNode = [[DSPEqualizerNode alloc] initWithController:self previous:fsurroundNode latency:0.03];
|
||||||
if(!equalizerNode) return NO;
|
if(!equalizerNode) return NO;
|
||||||
hrtfNode = [[DSPHRTFNode alloc] initWithController:self previous:equalizerNode latency:0.03];
|
|
||||||
if(!hrtfNode) return NO;
|
|
||||||
downmixNode = [[DSPDownmixNode alloc] initWithController:self previous:hrtfNode latency:0.03];
|
|
||||||
if(!downmixNode) return NO;
|
|
||||||
|
|
||||||
// Approximately double the chunk size for Vis at 44100Hz
|
// Approximately double the chunk size for Vis at 44100Hz
|
||||||
visualizationNode = [[VisualizationNode alloc] initWithController:self previous:downmixNode latency:8192.0 / 44100.0];
|
visualizationNode = [[VisualizationNode alloc] initWithController:self previous:equalizerNode latency:8192.0 / 44100.0];
|
||||||
if(!visualizationNode) return NO;
|
if(!visualizationNode) return NO;
|
||||||
|
|
||||||
[self setPreviousNode:visualizationNode];
|
[self setPreviousNode:visualizationNode];
|
||||||
|
@ -171,7 +163,7 @@
|
||||||
|
|
||||||
- (NSArray *)DSPs {
|
- (NSArray *)DSPs {
|
||||||
if(DSPsLaunched) {
|
if(DSPsLaunched) {
|
||||||
return @[rubberbandNode, fsurroundNode, equalizerNode, hrtfNode, downmixNode, visualizationNode];
|
return @[rubberbandNode, fsurroundNode, equalizerNode, visualizationNode];
|
||||||
} else {
|
} else {
|
||||||
return @[];
|
return @[];
|
||||||
}
|
}
|
||||||
|
@ -288,7 +280,11 @@
|
||||||
formatChanged = YES;
|
formatChanged = YES;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if(downmixNode && output && !formatChanged) {
|
DSPDownmixNode *downmixNode = nil;
|
||||||
|
if(output) {
|
||||||
|
downmixNode = [output downmix];
|
||||||
|
}
|
||||||
|
if(downmixNode && !formatChanged) {
|
||||||
outputFormat = [output deviceFormat];
|
outputFormat = [output deviceFormat];
|
||||||
outputChannelConfig = [output deviceChannelConfig];
|
outputChannelConfig = [output deviceChannelConfig];
|
||||||
AudioStreamBasicDescription currentOutputFormat = [downmixNode nodeFormat];
|
AudioStreamBasicDescription currentOutputFormat = [downmixNode nodeFormat];
|
||||||
|
@ -303,7 +299,7 @@
|
||||||
if(converter) {
|
if(converter) {
|
||||||
[converter setOutputFormat:format];
|
[converter setOutputFormat:format];
|
||||||
}
|
}
|
||||||
if(downmixNode && output) {
|
if(downmixNode) {
|
||||||
[downmixNode setOutputFormat:[output deviceFormat] withChannelConfig:[output deviceChannelConfig]];
|
[downmixNode setOutputFormat:[output deviceFormat] withChannelConfig:[output deviceChannelConfig]];
|
||||||
}
|
}
|
||||||
if(inputNode) {
|
if(inputNode) {
|
||||||
|
@ -327,8 +323,6 @@
|
||||||
}
|
}
|
||||||
previousNode = nil;
|
previousNode = nil;
|
||||||
visualizationNode = nil;
|
visualizationNode = nil;
|
||||||
downmixNode = nil;
|
|
||||||
hrtfNode = nil;
|
|
||||||
fsurroundNode = nil;
|
fsurroundNode = nil;
|
||||||
rubberbandNode = nil;
|
rubberbandNode = nil;
|
||||||
previousInput = nil;
|
previousInput = nil;
|
||||||
|
@ -393,7 +387,7 @@
|
||||||
}
|
}
|
||||||
|
|
||||||
- (id)downmix {
|
- (id)downmix {
|
||||||
return downmixNode;
|
return [output downmix];
|
||||||
}
|
}
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
|
@ -25,7 +25,11 @@ using std::atomic_long;
|
||||||
#import <simd/simd.h>
|
#import <simd/simd.h>
|
||||||
|
|
||||||
#import <CogAudio/ChunkList.h>
|
#import <CogAudio/ChunkList.h>
|
||||||
#import <CogAudio/HeadphoneFilter.h>
|
|
||||||
|
#import <CogAudio/Node.h>
|
||||||
|
|
||||||
|
#import <CogAudio/DSPDownmixNode.h>
|
||||||
|
#import <CogAudio/DSPHRTFNode.h>
|
||||||
|
|
||||||
//#define OUTPUT_LOG
|
//#define OUTPUT_LOG
|
||||||
|
|
||||||
|
@ -33,12 +37,9 @@ using std::atomic_long;
|
||||||
|
|
||||||
@class AudioChunk;
|
@class AudioChunk;
|
||||||
|
|
||||||
@interface OutputCoreAudio : NSObject {
|
@interface OutputCoreAudio : Node {
|
||||||
OutputNode *outputController;
|
OutputNode *outputController;
|
||||||
|
|
||||||
dispatch_semaphore_t writeSemaphore;
|
|
||||||
dispatch_semaphore_t readSemaphore;
|
|
||||||
|
|
||||||
NSLock *outputLock;
|
NSLock *outputLock;
|
||||||
|
|
||||||
double streamTimestamp;
|
double streamTimestamp;
|
||||||
|
@ -96,7 +97,9 @@ using std::atomic_long;
|
||||||
|
|
||||||
BOOL shouldPlayOutBuffer;
|
BOOL shouldPlayOutBuffer;
|
||||||
|
|
||||||
ChunkList *outputBuffer;
|
BOOL DSPsLaunched;
|
||||||
|
DSPHRTFNode *hrtfNode;
|
||||||
|
DSPDownmixNode *downmixNode;
|
||||||
|
|
||||||
#ifdef OUTPUT_LOG
|
#ifdef OUTPUT_LOG
|
||||||
NSFileHandle *_logFile;
|
NSFileHandle *_logFile;
|
||||||
|
@ -129,4 +132,6 @@ using std::atomic_long;
|
||||||
- (AudioStreamBasicDescription)deviceFormat;
|
- (AudioStreamBasicDescription)deviceFormat;
|
||||||
- (uint32_t)deviceChannelConfig;
|
- (uint32_t)deviceChannelConfig;
|
||||||
|
|
||||||
|
- (DSPDownmixNode *)downmix;
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
|
@ -133,6 +133,10 @@ static void *kOutputCoreAudioContext = &kOutputCoreAudioContext;
|
||||||
- (id)initWithController:(OutputNode *)c {
|
- (id)initWithController:(OutputNode *)c {
|
||||||
self = [super init];
|
self = [super init];
|
||||||
if(self) {
|
if(self) {
|
||||||
|
buffer = [[ChunkList alloc] initWithMaximumDuration:0.5];
|
||||||
|
writeSemaphore = [[Semaphore alloc] init];
|
||||||
|
readSemaphore = [[Semaphore alloc] init];
|
||||||
|
|
||||||
outputController = c;
|
outputController = c;
|
||||||
volume = 1.0;
|
volume = 1.0;
|
||||||
outputDeviceID = -1;
|
outputDeviceID = -1;
|
||||||
|
@ -208,6 +212,26 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
return NO;
|
return NO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
- (NSArray *)DSPs {
|
||||||
|
if(DSPsLaunched) {
|
||||||
|
return @[hrtfNode, downmixNode];
|
||||||
|
} else {
|
||||||
|
return @[];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- (DSPDownmixNode *)downmix {
|
||||||
|
return downmixNode;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)launchDSPs {
|
||||||
|
NSArray *DSPs = [self DSPs];
|
||||||
|
|
||||||
|
for (Node *node in DSPs) {
|
||||||
|
[node launchThread];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
- (void)threadEntry:(id)arg {
|
- (void)threadEntry:(id)arg {
|
||||||
@autoreleasepool {
|
@autoreleasepool {
|
||||||
NSThread *currentThread = [NSThread currentThread];
|
NSThread *currentThread = [NSThread currentThread];
|
||||||
|
@ -236,14 +260,15 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
[outputLock lock];
|
[outputLock lock];
|
||||||
started = NO;
|
started = NO;
|
||||||
restarted = NO;
|
restarted = NO;
|
||||||
[outputBuffer reset];
|
[buffer reset];
|
||||||
|
[self setShouldReset:YES];
|
||||||
[outputLock unlock];
|
[outputLock unlock];
|
||||||
}
|
}
|
||||||
|
|
||||||
if(stopping)
|
if(stopping)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if(!cutOffInput && ![outputBuffer isFull]) {
|
if(!cutOffInput && ![buffer isFull]) {
|
||||||
[self renderAndConvert];
|
[self renderAndConvert];
|
||||||
rendered = YES;
|
rendered = YES;
|
||||||
} else {
|
} else {
|
||||||
|
@ -556,7 +581,8 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
[outputController setFormat:&deviceFormat channelConfig:deviceChannelConfig];
|
[outputController setFormat:&deviceFormat channelConfig:deviceChannelConfig];
|
||||||
|
|
||||||
[outputLock lock];
|
[outputLock lock];
|
||||||
[outputBuffer reset];
|
[buffer reset];
|
||||||
|
[self setShouldReset:YES];
|
||||||
[outputLock unlock];
|
[outputLock unlock];
|
||||||
|
|
||||||
if(started) {
|
if(started) {
|
||||||
|
@ -639,8 +665,9 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
size_t frameCount = 0;
|
size_t frameCount = 0;
|
||||||
if(chunk && (frameCount = [chunk frameCount])) {
|
if(chunk && (frameCount = [chunk frameCount])) {
|
||||||
[outputLock lock];
|
[outputLock lock];
|
||||||
[outputBuffer addChunk:chunk];
|
[buffer addChunk:chunk];
|
||||||
[outputLock unlock];
|
[outputLock unlock];
|
||||||
|
[readSemaphore signal];
|
||||||
}
|
}
|
||||||
|
|
||||||
if(streamFormatChanged) {
|
if(streamFormatChanged) {
|
||||||
|
@ -691,8 +718,8 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
while(renderedSamples < frameCount) {
|
while(renderedSamples < frameCount) {
|
||||||
[refLock lock];
|
[refLock lock];
|
||||||
AudioChunk *chunk = nil;
|
AudioChunk *chunk = nil;
|
||||||
if(_self->outputBuffer && ![_self->outputBuffer isEmpty]) {
|
if(![_self->downmixNode.buffer isEmpty]) {
|
||||||
chunk = [_self->outputBuffer removeSamples:frameCount - renderedSamples];
|
chunk = [self->downmixNode.buffer removeSamples:frameCount - renderedSamples];
|
||||||
}
|
}
|
||||||
[refLock unlock];
|
[refLock unlock];
|
||||||
|
|
||||||
|
@ -827,15 +854,19 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
|
|
||||||
visController = [VisualizationController sharedController];
|
visController = [VisualizationController sharedController];
|
||||||
|
|
||||||
|
hrtfNode = [[DSPHRTFNode alloc] initWithController:self previous:self latency:0.03];
|
||||||
|
downmixNode = [[DSPDownmixNode alloc] initWithController:self previous:hrtfNode latency:0.03];
|
||||||
|
|
||||||
|
[self setShouldContinue:YES];
|
||||||
|
[self setEndOfStream:NO];
|
||||||
|
|
||||||
|
DSPsLaunched = YES;
|
||||||
|
[self launchDSPs];
|
||||||
|
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.outputDevice" options:0 context:kOutputCoreAudioContext];
|
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.outputDevice" options:0 context:kOutputCoreAudioContext];
|
||||||
|
|
||||||
observersapplied = YES;
|
observersapplied = YES;
|
||||||
|
|
||||||
outputBuffer = [[ChunkList alloc] initWithMaximumDuration:0.5];
|
|
||||||
if(!outputBuffer) {
|
|
||||||
return NO;
|
|
||||||
}
|
|
||||||
|
|
||||||
return (err == nil);
|
return (err == nil);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -857,7 +888,7 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
}
|
}
|
||||||
|
|
||||||
- (double)latency {
|
- (double)latency {
|
||||||
return [outputBuffer listDuration];
|
return [buffer listDuration] + [[hrtfNode buffer] listDuration] + [[downmixNode buffer] listDuration];
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)start {
|
- (void)start {
|
||||||
|
@ -932,6 +963,14 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
usleep(5000);
|
usleep(5000);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if(DSPsLaunched) {
|
||||||
|
[self setShouldContinue:NO];
|
||||||
|
[hrtfNode setShouldContinue:NO];
|
||||||
|
[downmixNode setShouldContinue:NO];
|
||||||
|
hrtfNode = nil;
|
||||||
|
downmixNode = nil;
|
||||||
|
DSPsLaunched = NO;
|
||||||
|
}
|
||||||
#ifdef OUTPUT_LOG
|
#ifdef OUTPUT_LOG
|
||||||
if(_logFile) {
|
if(_logFile) {
|
||||||
[_logFile closeFile];
|
[_logFile closeFile];
|
||||||
|
@ -995,9 +1034,9 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
cutOffInput = YES;
|
cutOffInput = YES;
|
||||||
[outputLock lock];
|
[outputLock lock];
|
||||||
[fadedBuffersLock lock];
|
[fadedBuffersLock lock];
|
||||||
FadedBuffer *buffer = [[FadedBuffer alloc] initWithBuffer:outputBuffer fadeTarget:0.0 sampleRate:deviceFormat.mSampleRate];
|
FadedBuffer *fbuffer = [[FadedBuffer alloc] initWithBuffer:buffer fadeTarget:0.0 sampleRate:deviceFormat.mSampleRate];
|
||||||
outputBuffer = [[ChunkList alloc] initWithMaximumDuration:0.5];
|
buffer = [[ChunkList alloc] initWithMaximumDuration:0.5];
|
||||||
[fadedBuffers addObject:buffer];
|
[fadedBuffers addObject:fbuffer];
|
||||||
[fadedBuffersLock unlock];
|
[fadedBuffersLock unlock];
|
||||||
[outputLock unlock];
|
[outputLock unlock];
|
||||||
cutOffInput = NO;
|
cutOffInput = NO;
|
||||||
|
|
Loading…
Reference in a new issue