Compare commits
4 commits
5aec42d518
...
0494ef981c
Author | SHA1 | Date | |
---|---|---|---|
|
0494ef981c | ||
|
aeb312121f | ||
|
e5e4b4791c | ||
|
bb84d75d73 |
3 changed files with 36 additions and 74 deletions
|
@ -13,8 +13,10 @@
|
|||
|
||||
#import "DSPRubberbandNode.h"
|
||||
#import "DSPFSurroundNode.h"
|
||||
#import "DSPHRTFNode.h"
|
||||
#import "DSPEqualizerNode.h"
|
||||
#import "VisualizationNode.h"
|
||||
#import "DSPDownmixNode.h"
|
||||
|
||||
#import "Logging.h"
|
||||
|
||||
|
@ -25,7 +27,9 @@
|
|||
|
||||
DSPRubberbandNode *rubberbandNode;
|
||||
DSPFSurroundNode *fsurroundNode;
|
||||
DSPHRTFNode *hrtfNode;
|
||||
DSPEqualizerNode *equalizerNode;
|
||||
DSPDownmixNode *downmixNode;
|
||||
VisualizationNode *visualizationNode;
|
||||
}
|
||||
|
||||
|
@ -57,9 +61,13 @@
|
|||
if(!fsurroundNode) return NO;
|
||||
equalizerNode = [[DSPEqualizerNode alloc] initWithController:self previous:fsurroundNode latency:0.03];
|
||||
if(!equalizerNode) return NO;
|
||||
hrtfNode = [[DSPHRTFNode alloc] initWithController:self previous:equalizerNode latency:0.03];
|
||||
if(!hrtfNode) return NO;
|
||||
downmixNode = [[DSPDownmixNode alloc] initWithController:self previous:hrtfNode latency:0.03];
|
||||
if(!downmixNode) return NO;
|
||||
|
||||
// Approximately double the chunk size for Vis at 44100Hz
|
||||
visualizationNode = [[VisualizationNode alloc] initWithController:self previous:equalizerNode latency:8192.0 / 44100.0];
|
||||
visualizationNode = [[VisualizationNode alloc] initWithController:self previous:downmixNode latency:8192.0 / 44100.0];
|
||||
if(!visualizationNode) return NO;
|
||||
|
||||
[self setPreviousNode:visualizationNode];
|
||||
|
@ -163,7 +171,7 @@
|
|||
|
||||
- (NSArray *)DSPs {
|
||||
if(DSPsLaunched) {
|
||||
return @[rubberbandNode, fsurroundNode, equalizerNode, visualizationNode];
|
||||
return @[rubberbandNode, fsurroundNode, equalizerNode, hrtfNode, downmixNode, visualizationNode];
|
||||
} else {
|
||||
return @[];
|
||||
}
|
||||
|
@ -280,11 +288,7 @@
|
|||
formatChanged = YES;
|
||||
}
|
||||
}
|
||||
DSPDownmixNode *downmixNode = nil;
|
||||
if(output) {
|
||||
downmixNode = [output downmix];
|
||||
}
|
||||
if(downmixNode && !formatChanged) {
|
||||
if(downmixNode && output && !formatChanged) {
|
||||
outputFormat = [output deviceFormat];
|
||||
outputChannelConfig = [output deviceChannelConfig];
|
||||
AudioStreamBasicDescription currentOutputFormat = [downmixNode nodeFormat];
|
||||
|
@ -299,7 +303,7 @@
|
|||
if(converter) {
|
||||
[converter setOutputFormat:format];
|
||||
}
|
||||
if(downmixNode) {
|
||||
if(downmixNode && output) {
|
||||
[downmixNode setOutputFormat:[output deviceFormat] withChannelConfig:[output deviceChannelConfig]];
|
||||
}
|
||||
if(inputNode) {
|
||||
|
@ -323,6 +327,8 @@
|
|||
}
|
||||
previousNode = nil;
|
||||
visualizationNode = nil;
|
||||
downmixNode = nil;
|
||||
hrtfNode = nil;
|
||||
fsurroundNode = nil;
|
||||
rubberbandNode = nil;
|
||||
previousInput = nil;
|
||||
|
@ -387,7 +393,7 @@
|
|||
}
|
||||
|
||||
- (id)downmix {
|
||||
return [output downmix];
|
||||
return downmixNode;
|
||||
}
|
||||
|
||||
@end
|
||||
|
|
|
@ -25,11 +25,7 @@ using std::atomic_long;
|
|||
#import <simd/simd.h>
|
||||
|
||||
#import <CogAudio/ChunkList.h>
|
||||
|
||||
#import <CogAudio/Node.h>
|
||||
|
||||
#import <CogAudio/DSPDownmixNode.h>
|
||||
#import <CogAudio/DSPHRTFNode.h>
|
||||
#import <CogAudio/HeadphoneFilter.h>
|
||||
|
||||
//#define OUTPUT_LOG
|
||||
|
||||
|
@ -37,9 +33,12 @@ using std::atomic_long;
|
|||
|
||||
@class AudioChunk;
|
||||
|
||||
@interface OutputCoreAudio : Node {
|
||||
@interface OutputCoreAudio : NSObject {
|
||||
OutputNode *outputController;
|
||||
|
||||
dispatch_semaphore_t writeSemaphore;
|
||||
dispatch_semaphore_t readSemaphore;
|
||||
|
||||
NSLock *outputLock;
|
||||
|
||||
double streamTimestamp;
|
||||
|
@ -97,9 +96,7 @@ using std::atomic_long;
|
|||
|
||||
BOOL shouldPlayOutBuffer;
|
||||
|
||||
BOOL DSPsLaunched;
|
||||
DSPHRTFNode *hrtfNode;
|
||||
DSPDownmixNode *downmixNode;
|
||||
ChunkList *outputBuffer;
|
||||
|
||||
#ifdef OUTPUT_LOG
|
||||
NSFileHandle *_logFile;
|
||||
|
@ -132,6 +129,4 @@ using std::atomic_long;
|
|||
- (AudioStreamBasicDescription)deviceFormat;
|
||||
- (uint32_t)deviceChannelConfig;
|
||||
|
||||
- (DSPDownmixNode *)downmix;
|
||||
|
||||
@end
|
||||
|
|
|
@ -133,10 +133,6 @@ static void *kOutputCoreAudioContext = &kOutputCoreAudioContext;
|
|||
- (id)initWithController:(OutputNode *)c {
|
||||
self = [super init];
|
||||
if(self) {
|
||||
buffer = [[ChunkList alloc] initWithMaximumDuration:0.5];
|
||||
writeSemaphore = [[Semaphore alloc] init];
|
||||
readSemaphore = [[Semaphore alloc] init];
|
||||
|
||||
outputController = c;
|
||||
volume = 1.0;
|
||||
outputDeviceID = -1;
|
||||
|
@ -212,26 +208,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
|||
return NO;
|
||||
}
|
||||
|
||||
- (NSArray *)DSPs {
|
||||
if(DSPsLaunched) {
|
||||
return @[hrtfNode, downmixNode];
|
||||
} else {
|
||||
return @[];
|
||||
}
|
||||
}
|
||||
|
||||
- (DSPDownmixNode *)downmix {
|
||||
return downmixNode;
|
||||
}
|
||||
|
||||
- (void)launchDSPs {
|
||||
NSArray *DSPs = [self DSPs];
|
||||
|
||||
for (Node *node in DSPs) {
|
||||
[node launchThread];
|
||||
}
|
||||
}
|
||||
|
||||
- (void)threadEntry:(id)arg {
|
||||
@autoreleasepool {
|
||||
NSThread *currentThread = [NSThread currentThread];
|
||||
|
@ -260,15 +236,14 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
|||
[outputLock lock];
|
||||
started = NO;
|
||||
restarted = NO;
|
||||
[buffer reset];
|
||||
[self setShouldReset:YES];
|
||||
[outputBuffer reset];
|
||||
[outputLock unlock];
|
||||
}
|
||||
|
||||
if(stopping)
|
||||
break;
|
||||
|
||||
if(!cutOffInput && ![buffer isFull]) {
|
||||
if(!cutOffInput && ![outputBuffer isFull]) {
|
||||
[self renderAndConvert];
|
||||
rendered = YES;
|
||||
} else {
|
||||
|
@ -581,8 +556,7 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
|||
[outputController setFormat:&deviceFormat channelConfig:deviceChannelConfig];
|
||||
|
||||
[outputLock lock];
|
||||
[buffer reset];
|
||||
[self setShouldReset:YES];
|
||||
[outputBuffer reset];
|
||||
[outputLock unlock];
|
||||
|
||||
if(started) {
|
||||
|
@ -665,9 +639,8 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
|||
size_t frameCount = 0;
|
||||
if(chunk && (frameCount = [chunk frameCount])) {
|
||||
[outputLock lock];
|
||||
[buffer addChunk:chunk];
|
||||
[outputBuffer addChunk:chunk];
|
||||
[outputLock unlock];
|
||||
[readSemaphore signal];
|
||||
}
|
||||
|
||||
if(streamFormatChanged) {
|
||||
|
@ -718,8 +691,8 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
|||
while(renderedSamples < frameCount) {
|
||||
[refLock lock];
|
||||
AudioChunk *chunk = nil;
|
||||
if(![_self->downmixNode.buffer isEmpty]) {
|
||||
chunk = [self->downmixNode.buffer removeSamples:frameCount - renderedSamples];
|
||||
if(_self->outputBuffer && ![_self->outputBuffer isEmpty]) {
|
||||
chunk = [_self->outputBuffer removeSamples:frameCount - renderedSamples];
|
||||
}
|
||||
[refLock unlock];
|
||||
|
||||
|
@ -854,19 +827,15 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
|||
|
||||
visController = [VisualizationController sharedController];
|
||||
|
||||
hrtfNode = [[DSPHRTFNode alloc] initWithController:self previous:self latency:0.03];
|
||||
downmixNode = [[DSPDownmixNode alloc] initWithController:self previous:hrtfNode latency:0.03];
|
||||
|
||||
[self setShouldContinue:YES];
|
||||
[self setEndOfStream:NO];
|
||||
|
||||
DSPsLaunched = YES;
|
||||
[self launchDSPs];
|
||||
|
||||
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.outputDevice" options:0 context:kOutputCoreAudioContext];
|
||||
|
||||
observersapplied = YES;
|
||||
|
||||
outputBuffer = [[ChunkList alloc] initWithMaximumDuration:0.5];
|
||||
if(!outputBuffer) {
|
||||
return NO;
|
||||
}
|
||||
|
||||
return (err == nil);
|
||||
}
|
||||
}
|
||||
|
@ -888,7 +857,7 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
|||
}
|
||||
|
||||
- (double)latency {
|
||||
return [buffer listDuration] + [[hrtfNode buffer] listDuration] + [[downmixNode buffer] listDuration];
|
||||
return [outputBuffer listDuration];
|
||||
}
|
||||
|
||||
- (void)start {
|
||||
|
@ -963,14 +932,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
|||
usleep(5000);
|
||||
}
|
||||
}
|
||||
if(DSPsLaunched) {
|
||||
[self setShouldContinue:NO];
|
||||
[hrtfNode setShouldContinue:NO];
|
||||
[downmixNode setShouldContinue:NO];
|
||||
hrtfNode = nil;
|
||||
downmixNode = nil;
|
||||
DSPsLaunched = NO;
|
||||
}
|
||||
#ifdef OUTPUT_LOG
|
||||
if(_logFile) {
|
||||
[_logFile closeFile];
|
||||
|
@ -1034,9 +995,9 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
|||
cutOffInput = YES;
|
||||
[outputLock lock];
|
||||
[fadedBuffersLock lock];
|
||||
FadedBuffer *fbuffer = [[FadedBuffer alloc] initWithBuffer:buffer fadeTarget:0.0 sampleRate:deviceFormat.mSampleRate];
|
||||
buffer = [[ChunkList alloc] initWithMaximumDuration:0.5];
|
||||
[fadedBuffers addObject:fbuffer];
|
||||
FadedBuffer *buffer = [[FadedBuffer alloc] initWithBuffer:outputBuffer fadeTarget:0.0 sampleRate:deviceFormat.mSampleRate];
|
||||
outputBuffer = [[ChunkList alloc] initWithMaximumDuration:0.5];
|
||||
[fadedBuffers addObject:buffer];
|
||||
[fadedBuffersLock unlock];
|
||||
[outputLock unlock];
|
||||
cutOffInput = NO;
|
||||
|
|
Loading…
Reference in a new issue