diff --git a/Audio/Chain/AudioChunk.h b/Audio/Chain/AudioChunk.h index d1a35c41b..72b4dd72c 100644 --- a/Audio/Chain/AudioChunk.h +++ b/Audio/Chain/AudioChunk.h @@ -87,7 +87,7 @@ enum { - (id)init; - (id)initWithProperties:(NSDictionary *)properties; -- (void)assignSamples:(const void *)data frameCount:(size_t)count; +- (void)assignSamples:(const void *_Nonnull)data frameCount:(size_t)count; - (void)assignData:(NSData *)data; - (NSData *)removeSamples:(size_t)frameCount; diff --git a/Audio/Chain/AudioChunk.m b/Audio/Chain/AudioChunk.m index 0cfee6bf2..6bde9f93f 100644 --- a/Audio/Chain/AudioChunk.m +++ b/Audio/Chain/AudioChunk.m @@ -159,7 +159,7 @@ static const uint32_t AudioChannelConfigTable[] = { channelConfig = config; } -- (void)assignSamples:(const void *)data frameCount:(size_t)count { +- (void)assignSamples:(const void *_Nonnull)data frameCount:(size_t)count { if(formatAssigned) { const size_t bytesPerPacket = format.mBytesPerPacket; [chunkData appendBytes:data length:bytesPerPacket * count]; diff --git a/Audio/Chain/ChunkList.h b/Audio/Chain/ChunkList.h index cdc45300a..e42dcf213 100644 --- a/Audio/Chain/ChunkList.h +++ b/Audio/Chain/ChunkList.h @@ -25,6 +25,8 @@ NS_ASSUME_NONNULL_BEGIN BOOL inAdder; BOOL inRemover; BOOL inPeeker; + BOOL inMerger; + BOOL inConverter; BOOL stopping; // For format converter diff --git a/Audio/Chain/ChunkList.m b/Audio/Chain/ChunkList.m index 6d8bfd91e..621e4101c 100644 --- a/Audio/Chain/ChunkList.m +++ b/Audio/Chain/ChunkList.m @@ -384,6 +384,8 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes inAdder = NO; inRemover = NO; inPeeker = NO; + inMerger = NO; + inConverter = NO; stopping = NO; formatRead = NO; @@ -407,7 +409,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes - (void)dealloc { stopping = YES; - while(inAdder || inRemover || inPeeker) { + while(inAdder || inRemover || inPeeker || inMerger || inConverter) { usleep(500); } if(hdcd_decoder) { @@ -456,7 +458,9 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes } - (BOOL)isFull { - return (maxDuration - listDuration) < 0.05; + @synchronized (chunkList) { + return (maxDuration - listDuration) < 0.05; + } } - (void)addChunk:(AudioChunk *)chunk { @@ -515,7 +519,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes if(stopping) { return [[AudioChunk alloc] init]; } - + @synchronized (chunkList) { inRemover = YES; if(![chunkList count]) { @@ -553,13 +557,20 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes } - (AudioChunk *)removeAndMergeSamples:(size_t)maxFrameCount { + if(stopping) { + return [[AudioChunk alloc] init]; + } + + inMerger = YES; + BOOL formatSet = NO; AudioStreamBasicDescription currentFormat; uint32_t currentChannelConfig = 0; double streamTimestamp = 0.0; double streamTimeRatio = 1.0; - if(![self peekTimestamp:&streamTimestamp timeRatio:&streamTimeRatio]) { + if (![self peekTimestamp:&streamTimestamp timeRatio:&streamTimeRatio]) { + inMerger = NO; return [[AudioChunk alloc] init]; } @@ -570,11 +581,12 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes [outputChunk setStreamTimestamp:streamTimestamp]; [outputChunk setStreamTimeRatio:streamTimeRatio]; - while(totalFrameCount < maxFrameCount) { + while(!stopping && totalFrameCount < maxFrameCount) { AudioStreamBasicDescription newFormat; uint32_t newChannelConfig; if(![self peekFormat:&newFormat channelConfig:&newChannelConfig]) { - break; + usleep(500); + continue; } if(formatSet && (memcmp(&newFormat, ¤tFormat, sizeof(newFormat)) != 0 || @@ -589,8 +601,9 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes } chunk = [self removeSamples:maxFrameCount - totalFrameCount]; - if(![chunk duration]) { - break; + if(!chunk || ![chunk frameCount]) { + usleep(500); + continue; } if([chunk isHDCD]) { @@ -606,9 +619,11 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes } if(!totalFrameCount) { + inMerger = NO; return [[AudioChunk alloc] init]; } + inMerger = NO; return outputChunk; } @@ -618,10 +633,15 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes } - (AudioChunk *)convertChunk:(AudioChunk *)inChunk { + if(stopping) return [[AudioChunk alloc] init]; + + inConverter = YES; + AudioStreamBasicDescription chunkFormat = [inChunk format]; if(![inChunk duration] || (chunkFormat.mFormatFlags == kAudioFormatFlagsNativeFloatPacked && chunkFormat.mBitsPerChannel == 32)) { + inConverter = NO; return inChunk; } @@ -635,8 +655,10 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes inputLossless = chunkLossless; BOOL isFloat = !!(inputFormat.mFormatFlags & kAudioFormatFlagIsFloat); - if((!isFloat && !(inputFormat.mBitsPerChannel >= 1 && inputFormat.mBitsPerChannel <= 32)) || (isFloat && !(inputFormat.mBitsPerChannel == 32 || inputFormat.mBitsPerChannel == 64))) + if((!isFloat && !(inputFormat.mBitsPerChannel >= 1 && inputFormat.mBitsPerChannel <= 32)) || (isFloat && !(inputFormat.mBitsPerChannel == 32 || inputFormat.mBitsPerChannel == 64))) { + inConverter = NO; return [[AudioChunk alloc] init]; + } // These are really placeholders, as we're doing everything internally now if(inputLossless && @@ -684,6 +706,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes NSUInteger samplesRead = [inChunk frameCount]; if(!samplesRead) { + inConverter = NO; return [[AudioChunk alloc] init]; } @@ -864,35 +887,42 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes if(hdcdSustained) [outChunk setHDCD]; [outChunk assignSamples:inputBuffer frameCount:bytesReadFromInput / floatFormat.mBytesPerPacket]; - + + inConverter = NO; return outChunk; } - (BOOL)peekFormat:(AudioStreamBasicDescription *)format channelConfig:(uint32_t *)config { if(stopping) return NO; + inPeeker = YES; @synchronized(chunkList) { if([chunkList count]) { AudioChunk *chunk = [chunkList objectAtIndex:0]; *format = [chunk format]; *config = [chunk channelConfig]; + inPeeker = NO; return YES; } } + inPeeker = NO; return NO; } - (BOOL)peekTimestamp:(double *)timestamp timeRatio:(double *)timeRatio { if(stopping) return NO; + inPeeker = YES; @synchronized (chunkList) { if([chunkList count]) { AudioChunk *chunk = [chunkList objectAtIndex:0]; *timestamp = [chunk streamTimestamp]; *timeRatio = [chunk streamTimeRatio]; + inPeeker = NO; return YES; } } *timestamp = 0.0; *timeRatio = 1.0; + inPeeker = NO; return NO; } diff --git a/Audio/Chain/ConverterNode.m b/Audio/Chain/ConverterNode.m index 27970a34c..906172848 100644 --- a/Audio/Chain/ConverterNode.m +++ b/Audio/Chain/ConverterNode.m @@ -101,7 +101,7 @@ void scale_by_volume(float *buffer, size_t count, float volume) { @autoreleasepool { AudioChunk *chunk = nil; chunk = [self convert]; - if(!chunk || ![chunk duration]) { + if(!chunk || ![chunk frameCount]) { if([self endOfStream] == YES) { break; } diff --git a/Audio/Chain/DSP/DSPEqualizerNode.m b/Audio/Chain/DSP/DSPEqualizerNode.m index 44858ac5a..55ac0a4c9 100644 --- a/Audio/Chain/DSP/DSPEqualizerNode.m +++ b/Audio/Chain/DSP/DSPEqualizerNode.m @@ -299,7 +299,7 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA - (void)cleanUp { stopping = YES; while(processEntered) { - usleep(1000); + usleep(500); } [self fullShutdown]; } @@ -323,7 +323,7 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA @autoreleasepool { AudioChunk *chunk = nil; chunk = [self convert]; - if(!chunk || ![chunk duration]) { + if(!chunk || ![chunk frameCount]) { if([self endOfStream] == YES) { break; } @@ -387,7 +387,7 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA size_t totalFrameCount = 0; AudioChunk *chunk = [self readAndMergeChunksAsFloat32:4096]; - if(![chunk duration]) { + if(!chunk || ![chunk frameCount]) { processEntered = NO; return nil; } diff --git a/Audio/Chain/DSP/DSPFSurroundNode.m b/Audio/Chain/DSP/DSPFSurroundNode.m index 00e53f007..45eb2a2b2 100644 --- a/Audio/Chain/DSP/DSPFSurroundNode.m +++ b/Audio/Chain/DSP/DSPFSurroundNode.m @@ -22,18 +22,17 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext; @implementation DSPFSurroundNode { BOOL enableFSurround; BOOL FSurroundDelayRemoved; - BOOL resetStreamFormat; FSurroundFilter *fsurround; - + BOOL stopping, paused; BOOL processEntered; - + BOOL observersapplied; - + AudioStreamBasicDescription lastInputFormat; AudioStreamBasicDescription inputFormat; AudioStreamBasicDescription outputFormat; - + uint32_t lastInputChannelConfig, inputChannelConfig; uint32_t outputChannelConfig; @@ -96,7 +95,6 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext; outputChannelConfig = [fsurround channelConfig]; FSurroundDelayRemoved = NO; - resetStreamFormat = YES; } else { fsurround = nil; } @@ -118,7 +116,7 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext; - (void)cleanUp { stopping = YES; while(processEntered) { - usleep(1000); + usleep(500); } [self fullShutdown]; } @@ -142,7 +140,7 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext; @autoreleasepool { AudioChunk *chunk = nil; chunk = [self convert]; - if(!chunk || ![chunk duration]) { + if(!chunk || ![chunk frameCount]) { if([self endOfStream] == YES) { break; } @@ -204,18 +202,18 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext; return [self readChunk:4096]; } - size_t totalRequestedSamples = resetStreamFormat ? 2048 : 4096; + size_t totalRequestedSamples = 4096; size_t totalFrameCount = 0; AudioChunk *chunk = [self readAndMergeChunksAsFloat32:totalRequestedSamples]; - if(![chunk duration]) { + if(!chunk || ![chunk frameCount]) { processEntered = NO; return nil; } double streamTimestamp = [chunk streamTimestamp]; - float *samplePtr = resetStreamFormat ? &inBuffer[2048 * 2] : &inBuffer[0]; + float *samplePtr = &inBuffer[0]; size_t frameCount = [chunk frameCount]; NSData *sampleData = [chunk removeSamples:frameCount]; @@ -224,12 +222,6 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext; totalFrameCount = frameCount; - if(resetStreamFormat) { - bzero(&inBuffer[0], 2048 * 2 * sizeof(float)); - totalFrameCount += 2048; - resetStreamFormat = NO; - } - size_t countToProcess = totalFrameCount; size_t samplesRendered; if(countToProcess < 4096) { @@ -237,8 +229,8 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext; countToProcess = 4096; } - [fsurround process:&inBuffer[0] output:&outBuffer[4096 * 6] count:(int)countToProcess]; - samplePtr = &outBuffer[4096 * 6]; + [fsurround process:&inBuffer[0] output:&outBuffer[0] count:(int)countToProcess]; + samplePtr = &outBuffer[0]; samplesRendered = totalFrameCount; if(totalFrameCount < 4096) { diff --git a/Audio/Chain/DSP/DSPHRTFNode.m b/Audio/Chain/DSP/DSPHRTFNode.m index 644727d38..289845faf 100644 --- a/Audio/Chain/DSP/DSPHRTFNode.m +++ b/Audio/Chain/DSP/DSPHRTFNode.m @@ -241,7 +241,7 @@ static void unregisterMotionListener(void) { - (void)cleanUp { stopping = YES; while(processEntered) { - usleep(1000); + usleep(500); } [self fullShutdown]; } @@ -265,7 +265,7 @@ static void unregisterMotionListener(void) { @autoreleasepool { AudioChunk *chunk = nil; chunk = [self convert]; - if(!chunk || ![chunk duration]) { + if(!chunk || ![chunk frameCount]) { if([self endOfStream] == YES) { break; } @@ -328,7 +328,7 @@ static void unregisterMotionListener(void) { } AudioChunk *chunk = [self readChunkAsFloat32:4096]; - if(!chunk || ![chunk duration]) { + if(!chunk || ![chunk frameCount]) { processEntered = NO; return nil; } diff --git a/Audio/Chain/DSP/DSPRubberbandNode.m b/Audio/Chain/DSP/DSPRubberbandNode.m index 060270154..ed52f47d6 100644 --- a/Audio/Chain/DSP/DSPRubberbandNode.m +++ b/Audio/Chain/DSP/DSPRubberbandNode.m @@ -322,7 +322,7 @@ static void * kDSPRubberbandNodeContext = &kDSPRubberbandNodeContext; - (void)cleanUp { stopping = YES; while(processEntered) { - usleep(1000); + usleep(500); } [self fullShutdown]; } @@ -346,7 +346,7 @@ static void * kDSPRubberbandNodeContext = &kDSPRubberbandNodeContext; @autoreleasepool { AudioChunk *chunk = nil; chunk = [self convert]; - if(!chunk || ![chunk duration]) { + if(!chunk || ![chunk frameCount]) { if([self endOfStream] == YES) { break; } @@ -417,7 +417,7 @@ static void * kDSPRubberbandNodeContext = &kDSPRubberbandNodeContext; samplesToProcess = blockSize; AudioChunk *chunk = [self readAndMergeChunksAsFloat32:samplesToProcess]; - if(![chunk duration]) { + if(!chunk || ![chunk frameCount]) { processEntered = NO; return nil; } diff --git a/Audio/Chain/Node.m b/Audio/Chain/Node.m index 6807a95f8..901705e06 100644 --- a/Audio/Chain/Node.m +++ b/Audio/Chain/Node.m @@ -259,14 +259,14 @@ [[previousNode semaphore] signal]; } + [accessLock unlock]; + AudioChunk *ret; @autoreleasepool { ret = [[previousNode buffer] removeAndMergeSamples:maxFrames]; } - [accessLock unlock]; - if([ret frameCount]) { [[previousNode semaphore] signal]; } @@ -294,14 +294,14 @@ [[previousNode semaphore] signal]; } + [accessLock unlock]; + AudioChunk *ret; @autoreleasepool { ret = [[previousNode buffer] removeAndMergeSamplesAsFloat32:maxFrames]; } - [accessLock unlock]; - if([ret frameCount]) { [[previousNode semaphore] signal]; } diff --git a/Audio/Chain/VisualizationNode.m b/Audio/Chain/VisualizationNode.m index 4b41250df..26b8264e7 100644 --- a/Audio/Chain/VisualizationNode.m +++ b/Audio/Chain/VisualizationNode.m @@ -233,7 +233,7 @@ static VisualizationCollection *theCollection = nil; } AudioChunk *chunk = nil; chunk = [self readAndMergeChunksAsFloat32:512]; - if(!chunk || ![chunk duration]) { + if(!chunk || ![chunk frameCount]) { if([self endOfStream] == YES) { break; }