diff --git a/Audio/Chain/ChunkList.m b/Audio/Chain/ChunkList.m index 0b7c854bb..ce9f91217 100644 --- a/Audio/Chain/ChunkList.m +++ b/Audio/Chain/ChunkList.m @@ -49,7 +49,7 @@ } - (BOOL)isFull { - return listDuration >= maxDuration; + return (maxDuration - listDuration) < 0.01; } - (void)addChunk:(AudioChunk *)chunk { diff --git a/Audio/Chain/ConverterNode.m b/Audio/Chain/ConverterNode.m index 21c825deb..ab738c030 100644 --- a/Audio/Chain/ConverterNode.m +++ b/Audio/Chain/ConverterNode.m @@ -422,7 +422,10 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes // when the end of stream is reached. Convert function instead processes what it can, // and returns 0 samples when it has nothing more to process at the end of stream. while([self shouldContinue] == YES) { - int amountConverted = [self convert:writeBuf amount:CHUNK_SIZE]; + int amountConverted; + @autoreleasepool { + amountConverted = [self convert:writeBuf amount:CHUNK_SIZE]; + } if(!amountConverted) { if(paused) { while(paused) @@ -985,13 +988,15 @@ static float db_to_scale(float db) { [refillNode setChannelConfig:previousOutputConfig]; for(;;) { - AudioChunk *chunk = [buffer removeSamples:16384]; - size_t frameCount = [chunk frameCount]; - if(frameCount) { - NSData *samples = [chunk removeSamples:frameCount]; - [refillNode writeData:[samples bytes] amount:frameCount]; - } else - break; + @autoreleasepool { + AudioChunk *chunk = [buffer removeSamples:16384]; + size_t frameCount = [chunk frameCount]; + if(frameCount) { + NSData *samples = [chunk removeSamples:frameCount]; + [refillNode writeData:[samples bytes] amount:frameCount]; + } else + break; + } } [self setupWithInputFormat:previousOutputFormat withInputConfig:[AudioChunk guessChannelConfig:previousOutputFormat.mChannelsPerFrame] outputFormat:outputFormat outputConfig:outputChannelConfig isLossless:rememberedLossless]; diff --git a/Audio/Chain/Node.m b/Audio/Chain/Node.m index 5271c68a7..0eb554135 100644 --- a/Audio/Chain/Node.m +++ b/Audio/Chain/Node.m @@ -105,7 +105,9 @@ } if([previousNode shouldReset] == YES) { - [buffer reset]; + @autoreleasepool { + [buffer reset]; + } shouldReset = YES; [previousNode setShouldReset:NO]; @@ -113,7 +115,11 @@ [[previousNode semaphore] signal]; } - AudioChunk *ret = [[previousNode buffer] removeSamples:maxFrames]; + AudioChunk *ret; + + @autoreleasepool { + ret = [[previousNode buffer] removeSamples:maxFrames]; + } [accessLock unlock]; @@ -151,9 +157,11 @@ - (void)resetBuffer { shouldReset = YES; // Will reset on next write. if(previousNode == nil) { - [accessLock lock]; - [buffer reset]; - [accessLock unlock]; + @autoreleasepool { + [accessLock lock]; + [buffer reset]; + [accessLock unlock]; + } } } diff --git a/Audio/Output/OutputCoreAudio.m b/Audio/Output/OutputCoreAudio.m index b67d25733..637d284bf 100644 --- a/Audio/Output/OutputCoreAudio.m +++ b/Audio/Output/OutputCoreAudio.m @@ -48,108 +48,110 @@ static void scaleBuffersByVolume(AudioBufferList *ioData, float volume) { } static OSStatus renderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags, const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList *ioData) { - OutputCoreAudio *_self = (__bridge OutputCoreAudio *)inRefCon; + @autoreleasepool { + OutputCoreAudio *_self = (__bridge OutputCoreAudio *)inRefCon; - const int channels = _self->deviceFormat.mChannelsPerFrame; - const int bytesPerPacket = channels * sizeof(float); + const int channels = _self->deviceFormat.mChannelsPerFrame; + const int bytesPerPacket = channels * sizeof(float); - size_t amountToRead, amountRead = 0; + size_t amountToRead, amountRead = 0; - amountToRead = inNumberFrames * bytesPerPacket; + amountToRead = inNumberFrames * bytesPerPacket; - if(_self->stopping == YES || [_self->outputController shouldContinue] == NO) { - // Chain is dead, fill out the serial number pointer forever with silence - clearBuffers(ioData, amountToRead / bytesPerPacket, 0); - atomic_fetch_add(&_self->bytesRendered, amountToRead); - _self->stopping = YES; - return 0; - } - - if([[_self->outputController buffer] isEmpty] && ![_self->outputController chainQueueHasTracks]) { - // Hit end of last track, pad with silence until queue event stops us - clearBuffers(ioData, amountToRead / bytesPerPacket, 0); - atomic_fetch_add(&_self->bytesRendered, amountToRead); - return 0; - } - - AudioChunk *chunk = [[_self->outputController buffer] removeSamples:(amountToRead / bytesPerPacket)]; - - size_t frameCount = [chunk frameCount]; - AudioStreamBasicDescription format = [chunk format]; - uint32_t config = [chunk channelConfig]; - - if(frameCount) { - if(!_self->streamFormatStarted || config != _self->streamChannelConfig || memcmp(&_self->streamFormat, &format, sizeof(format)) != 0) { - _self->streamFormat = format; - _self->streamChannelConfig = config; - _self->streamFormatStarted = YES; - _self->downmixer = [[DownmixProcessor alloc] initWithInputFormat:format inputConfig:config andOutputFormat:_self->deviceFormat outputConfig:_self->deviceChannelConfig]; + if(_self->stopping == YES || [_self->outputController shouldContinue] == NO) { + // Chain is dead, fill out the serial number pointer forever with silence + clearBuffers(ioData, amountToRead / bytesPerPacket, 0); + atomic_fetch_add(&_self->bytesRendered, amountToRead); + _self->stopping = YES; + return 0; } - double chunkDuration = [chunk duration]; + if([[_self->outputController buffer] isEmpty] && ![_self->outputController chainQueueHasTracks]) { + // Hit end of last track, pad with silence until queue event stops us + clearBuffers(ioData, amountToRead / bytesPerPacket, 0); + atomic_fetch_add(&_self->bytesRendered, amountToRead); + return 0; + } - NSData *samples = [chunk removeSamples:frameCount]; + AudioChunk *chunk = [[_self->outputController buffer] removeSamples:(amountToRead / bytesPerPacket)]; - float downmixedData[frameCount * channels]; - [_self->downmixer process:[samples bytes] frameCount:frameCount output:downmixedData]; + size_t frameCount = [chunk frameCount]; + AudioStreamBasicDescription format = [chunk format]; + uint32_t config = [chunk channelConfig]; - fillBuffers(ioData, downmixedData, frameCount, 0); - amountRead = frameCount * bytesPerPacket; - [_self->outputController incrementAmountPlayed:chunkDuration]; - atomic_fetch_add(&_self->bytesRendered, amountRead); - [_self->writeSemaphore signal]; - } - - // Try repeatedly! Buffer wraps can cause a slight data shortage, as can - // unexpected track changes. - while((amountRead < amountToRead) && [_self->outputController shouldContinue] == YES) { - chunk = [[_self->outputController buffer] removeSamples:((amountToRead - amountRead) / bytesPerPacket)]; - frameCount = [chunk frameCount]; - format = [chunk format]; - config = [chunk channelConfig]; if(frameCount) { if(!_self->streamFormatStarted || config != _self->streamChannelConfig || memcmp(&_self->streamFormat, &format, sizeof(format)) != 0) { _self->streamFormat = format; + _self->streamChannelConfig = config; _self->streamFormatStarted = YES; _self->downmixer = [[DownmixProcessor alloc] initWithInputFormat:format inputConfig:config andOutputFormat:_self->deviceFormat outputConfig:_self->deviceChannelConfig]; } - atomic_fetch_add(&_self->bytesRendered, frameCount * bytesPerPacket); + double chunkDuration = [chunk duration]; + NSData *samples = [chunk removeSamples:frameCount]; + float downmixedData[frameCount * channels]; [_self->downmixer process:[samples bytes] frameCount:frameCount output:downmixedData]; - fillBuffers(ioData, downmixedData, frameCount, amountRead / bytesPerPacket); + fillBuffers(ioData, downmixedData, frameCount, 0); + amountRead = frameCount * bytesPerPacket; [_self->outputController incrementAmountPlayed:chunkDuration]; - - amountRead += frameCount * bytesPerPacket; + atomic_fetch_add(&_self->bytesRendered, amountRead); [_self->writeSemaphore signal]; - } else { - [_self->readSemaphore timedWait:500]; } - } - float volumeScale = 1.0; - long sustained = atomic_load_explicit(&_self->bytesHdcdSustained, memory_order_relaxed); - if(sustained) { - if(sustained < amountRead) { - atomic_store(&_self->bytesHdcdSustained, 0); - } else { - atomic_fetch_sub(&_self->bytesHdcdSustained, amountRead); + // Try repeatedly! Buffer wraps can cause a slight data shortage, as can + // unexpected track changes. + while((amountRead < amountToRead) && [_self->outputController shouldContinue] == YES) { + chunk = [[_self->outputController buffer] removeSamples:((amountToRead - amountRead) / bytesPerPacket)]; + frameCount = [chunk frameCount]; + format = [chunk format]; + config = [chunk channelConfig]; + if(frameCount) { + if(!_self->streamFormatStarted || config != _self->streamChannelConfig || memcmp(&_self->streamFormat, &format, sizeof(format)) != 0) { + _self->streamFormat = format; + _self->streamFormatStarted = YES; + _self->downmixer = [[DownmixProcessor alloc] initWithInputFormat:format inputConfig:config andOutputFormat:_self->deviceFormat outputConfig:_self->deviceChannelConfig]; + } + atomic_fetch_add(&_self->bytesRendered, frameCount * bytesPerPacket); + double chunkDuration = [chunk duration]; + NSData *samples = [chunk removeSamples:frameCount]; + float downmixedData[frameCount * channels]; + [_self->downmixer process:[samples bytes] frameCount:frameCount output:downmixedData]; + fillBuffers(ioData, downmixedData, frameCount, amountRead / bytesPerPacket); + + [_self->outputController incrementAmountPlayed:chunkDuration]; + + amountRead += frameCount * bytesPerPacket; + [_self->writeSemaphore signal]; + } else { + [_self->readSemaphore timedWait:500]; + } } - volumeScale = 0.5; + + float volumeScale = 1.0; + long sustained = atomic_load_explicit(&_self->bytesHdcdSustained, memory_order_relaxed); + if(sustained) { + if(sustained < amountRead) { + atomic_store(&_self->bytesHdcdSustained, 0); + } else { + atomic_fetch_sub(&_self->bytesHdcdSustained, amountRead); + } + volumeScale = 0.5; + } + + scaleBuffersByVolume(ioData, _self->volume * volumeScale); + + if(amountRead < amountToRead) { + // Either underrun, or no data at all. Caller output tends to just + // buffer loop if it doesn't get anything, so always produce a full + // buffer, and silence anything we couldn't supply. + clearBuffers(ioData, (amountToRead - amountRead) / bytesPerPacket, amountRead / bytesPerPacket); + } + + return 0; } - - scaleBuffersByVolume(ioData, _self->volume * volumeScale); - - if(amountRead < amountToRead) { - // Either underrun, or no data at all. Caller output tends to just - // buffer loop if it doesn't get anything, so always produce a full - // buffer, and silence anything we couldn't supply. - clearBuffers(ioData, (amountToRead - amountRead) / bytesPerPacket, amountRead / bytesPerPacket); - } - - return 0; }; - (id)initWithController:(OutputNode *)c { @@ -220,7 +222,9 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const } if([outputController shouldReset]) { - [[outputController buffer] reset]; + @autoreleasepool { + [[outputController buffer] reset]; + } [outputController setShouldReset:NO]; [delayedEvents removeAllObjects]; delayedEventsPopped = YES; @@ -244,10 +248,12 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const size_t frameCount = 0; if(![[outputController buffer] isFull]) { - AudioChunk *chunk = [outputController readChunk:512]; - frameCount = [chunk frameCount]; - if(frameCount) { - [[outputController buffer] addChunk:chunk]; + @autoreleasepool { + AudioChunk *chunk = [outputController readChunk:512]; + frameCount = [chunk frameCount]; + if(frameCount) { + [[outputController buffer] addChunk:chunk]; + } } } @@ -467,7 +473,9 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const AVAudioFormat *renderFormat; [outputController incrementAmountPlayed:[[outputController buffer] listDuration]]; - [[outputController buffer] reset]; + @autoreleasepool { + [[outputController buffer] reset]; + } _deviceFormat = format; deviceFormat = *(format.streamDescription);