From 477feaab1d4060ddab4c3325dbd19c3e9545d578 Mon Sep 17 00:00:00 2001 From: Christopher Snowhill Date: Mon, 7 Feb 2022 19:18:45 -0800 Subject: [PATCH] Now properly supports sample format changing Sample format can now change dynamically at play time, and the player will resample it as necessary, extrapolating edges between changes to reduce the potential for gaps. Currently supported formats for this: - FLAC - Ogg Vorbis - Any format supported by FFmpeg, such as MP3 or AAC Signed-off-by: Christopher Snowhill --- Audio/Chain/BufferChain.h | 2 - Audio/Chain/BufferChain.m | 10 ++-- Audio/Chain/ChunkList.h | 3 ++ Audio/Chain/ChunkList.m | 16 ++++++- Audio/Chain/ConverterNode.h | 6 +++ Audio/Chain/ConverterNode.m | 58 +++++++++++++++++++--- Audio/Chain/InputNode.m | 24 +++++++--- Audio/Chain/Node.h | 18 +++---- Audio/Chain/Node.m | 10 ++++ Audio/CogPluginMulti.h | 1 - Audio/CogPluginMulti.m | 72 ++++++++++++++-------------- Audio/Output/OutputCoreAudio.m | 2 + Plugins/CoreAudio/CoreAudioDecoder.m | 2 +- Plugins/CueSheet/CueSheetDecoder.h | 1 + Plugins/CueSheet/CueSheetDecoder.m | 35 ++++++++++++++ Plugins/FFMPEG/FFMPEGDecoder.m | 16 ++++++- Plugins/Flac/FlacDecoder.h | 1 + Plugins/Flac/FlacDecoder.m | 27 ++++++++++- Plugins/WavPack/WavPackDecoder.m | 2 +- 19 files changed, 231 insertions(+), 75 deletions(-) diff --git a/Audio/Chain/BufferChain.h b/Audio/Chain/BufferChain.h index 2aa09ecff..392c6f1a2 100644 --- a/Audio/Chain/BufferChain.h +++ b/Audio/Chain/BufferChain.h @@ -66,8 +66,6 @@ - (BOOL)endOfInputReached; - (BOOL)setTrack:(NSURL *)track; -- (void)inputFormatDidChange:(AudioStreamBasicDescription)format inputConfig:(uint32_t)inputConfig; - - (BOOL)isRunning; - (id)controller; diff --git a/Audio/Chain/BufferChain.m b/Audio/Chain/BufferChain.m index 579d43f76..282f38ae4 100644 --- a/Audio/Chain/BufferChain.m +++ b/Audio/Chain/BufferChain.m @@ -62,7 +62,7 @@ inputFormat = [inputNode nodeFormat]; if([properties valueForKey:@"channelConfig"]) - inputChannelConfig = [[properties valueForKey:@"channelConfig"] intValue]; + inputChannelConfig = [[properties valueForKey:@"channelConfig"] unsignedIntValue]; outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame; outputFormat.mBytesPerFrame = ((outputFormat.mBitsPerChannel + 7) / 8) * outputFormat.mChannelsPerFrame; @@ -91,7 +91,7 @@ inputFormat = [inputNode nodeFormat]; if([properties valueForKey:@"channelConfig"]) - inputChannelConfig = [[properties valueForKey:@"channelConfig"] intValue]; + inputChannelConfig = [[properties valueForKey:@"channelConfig"] unsignedIntValue]; outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame; outputFormat.mBytesPerFrame = ((outputFormat.mBitsPerChannel + 7) / 8) * outputFormat.mChannelsPerFrame; @@ -125,7 +125,7 @@ inputFormat = [inputNode nodeFormat]; if([properties valueForKey:@"channelConfig"]) - inputChannelConfig = [[properties valueForKey:@"channelConfig"] intValue]; + inputChannelConfig = [[properties valueForKey:@"channelConfig"] unsignedIntValue]; outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame; outputFormat.mBytesPerFrame = ((outputFormat.mBitsPerChannel + 7) / 8) * outputFormat.mChannelsPerFrame; @@ -193,10 +193,6 @@ [controller launchOutputThread]; } -- (void)inputFormatDidChange:(AudioStreamBasicDescription)format inputConfig:(uint32_t)inputConfig { - DLog(@"FORMAT DID CHANGE!"); -} - - (InputNode *)inputNode { return inputNode; } diff --git a/Audio/Chain/ChunkList.h b/Audio/Chain/ChunkList.h index 3bcd004a3..6768e0bb0 100644 --- a/Audio/Chain/ChunkList.h +++ b/Audio/Chain/ChunkList.h @@ -21,6 +21,7 @@ NS_ASSUME_NONNULL_BEGIN BOOL inAdder; BOOL inRemover; + BOOL inPeeker; BOOL stopping; } @@ -37,6 +38,8 @@ NS_ASSUME_NONNULL_BEGIN - (void)addChunk:(AudioChunk *)chunk; - (AudioChunk *)removeSamples:(size_t)maxFrameCount; +- (BOOL)peekFormat:(nonnull AudioStreamBasicDescription *)format channelConfig:(nonnull uint32_t *)config; + @end NS_ASSUME_NONNULL_END diff --git a/Audio/Chain/ChunkList.m b/Audio/Chain/ChunkList.m index 63288817e..475f23fbe 100644 --- a/Audio/Chain/ChunkList.m +++ b/Audio/Chain/ChunkList.m @@ -22,6 +22,7 @@ inAdder = NO; inRemover = NO; + inPeeker = NO; stopping = NO; } @@ -30,7 +31,7 @@ - (void)dealloc { stopping = YES; - while(inAdder || inRemover) { + while(inAdder || inRemover || inPeeker) { usleep(500); } } @@ -96,4 +97,17 @@ } } +- (BOOL)peekFormat:(AudioStreamBasicDescription *)format channelConfig:(uint32_t *)config { + if(stopping) return NO; + @synchronized(chunkList) { + if([chunkList count]) { + AudioChunk *chunk = [chunkList objectAtIndex:0]; + *format = [chunk format]; + *config = [chunk channelConfig]; + return YES; + } + } + return NO; +} + @end diff --git a/Audio/Chain/ConverterNode.h b/Audio/Chain/ConverterNode.h index f8ed72758..8e0dfee6d 100644 --- a/Audio/Chain/ConverterNode.h +++ b/Audio/Chain/ConverterNode.h @@ -71,6 +71,12 @@ uint32_t inputChannelConfig; uint32_t outputChannelConfig; + BOOL streamFormatChanged; + AudioStreamBasicDescription newInputFormat; + uint32_t newInputChannelConfig; + + AudioChunk *lastChunkIn; + AudioStreamBasicDescription previousOutputFormat; uint32_t previousOutputConfig; AudioStreamBasicDescription rememberedInputFormat; diff --git a/Audio/Chain/ConverterNode.m b/Audio/Chain/ConverterNode.m index ab738c030..2cda5986f 100644 --- a/Audio/Chain/ConverterNode.m +++ b/Audio/Chain/ConverterNode.m @@ -440,6 +440,10 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes [self cleanUp]; [self setupWithInputFormat:rememberedInputFormat withInputConfig:rememberedInputConfig outputFormat:outputFormat outputConfig:outputChannelConfig isLossless:rememberedLossless]; continue; + } else if(streamFormatChanged) { + [self cleanUp]; + [self setupWithInputFormat:newInputFormat withInputConfig:newInputChannelConfig outputFormat:outputFormat outputConfig:outputChannelConfig isLossless:rememberedLossless]; + continue; } else break; } @@ -452,6 +456,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes int amountReadFromFC; int amountRead = 0; int amountToSkip; + int amountToIgnorePostExtrapolated = 0; if(stopping) return 0; @@ -498,14 +503,36 @@ tryagain: ssize_t bytesReadFromInput = 0; - while(bytesReadFromInput < amountToWrite && !stopping && [self shouldContinue] == YES && [self endOfStream] == NO) { + while(bytesReadFromInput < amountToWrite && !stopping && !streamFormatChanged && [self shouldContinue] == YES && [self endOfStream] == NO) { + AudioStreamBasicDescription inf; + uint32_t config; + if([self peekFormat:&inf channelConfig:&config]) { + if(config != inputChannelConfig || memcmp(&inf, &inputFormat, sizeof(inf)) != 0) { + if(inputChannelConfig == 0 && memcmp(&inf, &inputFormat, sizeof(inf)) == 0) { + inputChannelConfig = config; + continue; + } else { + newInputFormat = inf; + newInputChannelConfig = config; + streamFormatChanged = YES; + break; + } + } + } + AudioChunk *chunk = [self readChunk:((amountToWrite - bytesReadFromInput) / inputFormat.mBytesPerPacket)]; - AudioStreamBasicDescription inf = [chunk format]; + inf = [chunk format]; size_t frameCount = [chunk frameCount]; + config = [chunk channelConfig]; size_t bytesRead = frameCount * inf.mBytesPerPacket; if(frameCount) { NSData *samples = [chunk removeSamples:frameCount]; memcpy(inputBuffer + bytesReadFromInput + amountToSkip, [samples bytes], bytesRead); + lastChunkIn = [[AudioChunk alloc] init]; + [lastChunkIn setFormat:inf]; + [lastChunkIn setChannelConfig:config]; + [lastChunkIn setLossless:[chunk lossless]]; + [lastChunkIn assignSamples:[samples bytes] frameCount:frameCount]; } bytesReadFromInput += bytesRead; if(!frameCount) { @@ -518,7 +545,7 @@ tryagain: // Pad end of track with input format silence - if(stopping || [self shouldContinue] == NO || [self endOfStream] == YES) { + if(stopping || streamFormatChanged || [self shouldContinue] == NO || [self endOfStream] == YES) { if(!skipResampler && !is_postextrapolated_) { if(dsd2pcm) { amountToSkip = dsd2pcmLatency * inputFormat.mBytesPerPacket; @@ -532,6 +559,24 @@ tryagain: } } + size_t bitsPerSample = inputFormat.mBitsPerChannel; + BOOL isBigEndian = !!(inputFormat.mFormatFlags & kAudioFormatFlagIsBigEndian); + + if(!bytesReadFromInput && streamFormatChanged && !skipResampler && is_postextrapolated_ < 2) { + AudioChunk *chunk = lastChunkIn; + lastChunkIn = nil; + AudioStreamBasicDescription inf = [chunk format]; + size_t frameCount = [chunk frameCount]; + size_t bytesRead = frameCount * inf.mBytesPerPacket; + if(frameCount) { + amountToIgnorePostExtrapolated = (int)frameCount; + NSData *samples = [chunk removeSamples:frameCount]; + memcpy(inputBuffer, [samples bytes], bytesRead); + } + bytesReadFromInput += bytesRead; + amountToSkip = 0; + } + if(!bytesReadFromInput) { convertEntered = NO; return amountRead; @@ -544,8 +589,7 @@ tryagain: dsdLatencyEaten = (int)ceil(dsd2pcmLatency * sampleRatio); } - if(bytesReadFromInput && - (inputFormat.mFormatFlags & kAudioFormatFlagIsBigEndian)) { + if(bytesReadFromInput && isBigEndian) { // Time for endian swap! convert_be_to_le(inputBuffer, inputFormat.mBitsPerChannel, bytesReadFromInput); } @@ -560,7 +604,6 @@ tryagain: if(bytesReadFromInput && !isFloat) { float gain = 1.0; - size_t bitsPerSample = inputFormat.mBitsPerChannel; if(bitsPerSample == 1) { samplesRead = bytesReadFromInput / inputFormat.mBytesPerPacket; convert_dsd_to_f32(inputBuffer + bytesReadFromInput, inputBuffer, samplesRead, inputFormat.mChannelsPerFrame, dsd2pcm); @@ -699,7 +742,7 @@ tryagain: // Input now contains bytesReadFromInput worth of floats, in the input sample rate inpSize = bytesReadFromInput; - inpOffset = 0; + inpOffset = amountToIgnorePostExtrapolated * floatFormat.mBytesPerPacket; } if(inpOffset != inpSize && floatOffset == floatSize) { @@ -948,6 +991,7 @@ static float db_to_scale(float db) { convertEntered = NO; paused = NO; outputFormatChanged = NO; + streamFormatChanged = NO; return YES; } diff --git a/Audio/Chain/InputNode.m b/Audio/Chain/InputNode.m index b342f0703..6798b641c 100644 --- a/Audio/Chain/InputNode.m +++ b/Audio/Chain/InputNode.m @@ -48,7 +48,7 @@ nodeFormat = propertiesToASBD(properties); if([properties valueForKey:@"channelConfig"]) - nodeChannelConfig = [[properties valueForKey:@"channelConfig"] intValue]; + nodeChannelConfig = [[properties valueForKey:@"channelConfig"] unsignedIntValue]; nodeLossless = [[properties valueForKey:@"encoding"] isEqualToString:@"lossless"]; shouldContinue = YES; @@ -69,7 +69,7 @@ nodeFormat = propertiesToASBD(properties); if([properties valueForKey:@"channelConfig"]) - nodeChannelConfig = [[properties valueForKey:@"channelConfig"] intValue]; + nodeChannelConfig = [[properties valueForKey:@"channelConfig"] unsignedIntValue]; nodeLossless = [[properties valueForKey:@"encoding"] isEqualToString:@"lossless"]; [self registerObservers]; @@ -105,7 +105,14 @@ DLog(@"Input format changed"); // Converter may need resetting, it'll do that when it reaches the new chunks NSDictionary *properties = [decoder properties]; + + int bitsPerSample = [[properties objectForKey:@"bitsPerSample"] intValue]; + int channels = [[properties objectForKey:@"channels"] intValue]; + + bytesPerFrame = ((bitsPerSample + 7) / 8) * channels; + nodeFormat = propertiesToASBD(properties); + nodeChannelConfig = [[properties valueForKey:@"channelConfig"] unsignedIntValue]; nodeLossless = [[properties valueForKey:@"encoding"] isEqualToString:@"lossless"]; } else if([keyPath isEqual:@"metadata"]) { // Inform something of metadata change @@ -114,7 +121,8 @@ - (void)process { int amountInBuffer = 0; - void *inputBuffer = malloc(CHUNK_SIZE); + int bytesInBuffer = 0; + void *inputBuffer = malloc(CHUNK_SIZE * 18); // Maximum 18 channels, dunno what we'll receive BOOL shouldClose = YES; BOOL seekError = NO; @@ -142,13 +150,15 @@ } if(amountInBuffer < CHUNK_SIZE) { - int framesToRead = (CHUNK_SIZE - amountInBuffer) / bytesPerFrame; - int framesRead = [decoder readAudio:((char *)inputBuffer) + amountInBuffer frames:framesToRead]; + int framesToRead = CHUNK_SIZE - amountInBuffer; + int framesRead = [decoder readAudio:((char *)inputBuffer) + bytesInBuffer frames:framesToRead]; if(framesRead > 0 && !seekError) { - amountInBuffer += (framesRead * bytesPerFrame); - [self writeData:inputBuffer amount:amountInBuffer]; + amountInBuffer += framesRead; + bytesInBuffer += framesRead * bytesPerFrame; + [self writeData:inputBuffer amount:bytesInBuffer]; amountInBuffer = 0; + bytesInBuffer = 0; } else { if(initialBufferFilled == NO) { [controller initialBufferFilled:self]; diff --git a/Audio/Chain/Node.h b/Audio/Chain/Node.h index 5a284dd9a..03ea57cff 100644 --- a/Audio/Chain/Node.h +++ b/Audio/Chain/Node.h @@ -32,33 +32,35 @@ uint32_t nodeChannelConfig; BOOL nodeLossless; } -- (id)initWithController:(id)c previous:(id)p; +- (id _Nullable)initWithController:(id _Nonnull)c previous:(id _Nullable)p; -- (void)writeData:(const void *)ptr amount:(size_t)a; -- (AudioChunk *)readChunk:(size_t)maxFrames; +- (void)writeData:(const void *_Nonnull)ptr amount:(size_t)a; +- (AudioChunk *_Nonnull)readChunk:(size_t)maxFrames; + +- (BOOL)peekFormat:(AudioStreamBasicDescription *_Nonnull)format channelConfig:(uint32_t *_Nonnull)config; - (void)process; // Should be overwriten by subclass -- (void)threadEntry:(id)arg; +- (void)threadEntry:(id _Nullable)arg; - (void)launchThread; - (void)setShouldReset:(BOOL)s; - (BOOL)shouldReset; -- (void)setPreviousNode:(id)p; -- (id)previousNode; +- (void)setPreviousNode:(id _Nullable)p; +- (id _Nullable)previousNode; - (BOOL)shouldContinue; - (void)setShouldContinue:(BOOL)s; -- (ChunkList *)buffer; +- (ChunkList *_Nonnull)buffer; - (void)resetBuffer; // WARNING! DANGER WILL ROBINSON! - (AudioStreamBasicDescription)nodeFormat; - (uint32_t)nodeChannelConfig; - (BOOL)nodeLossless; -- (Semaphore *)semaphore; +- (Semaphore *_Nonnull)semaphore; //-(void)resetBuffer; diff --git a/Audio/Chain/Node.m b/Audio/Chain/Node.m index 0eb554135..fe9d877a5 100644 --- a/Audio/Chain/Node.m +++ b/Audio/Chain/Node.m @@ -95,6 +95,16 @@ } } +- (BOOL)peekFormat:(nonnull AudioStreamBasicDescription *)format channelConfig:(nonnull uint32_t *)config { + [accessLock lock]; + + BOOL ret = [[previousNode buffer] peekFormat:format channelConfig:config]; + + [accessLock unlock]; + + return ret; +} + - (AudioChunk *)readChunk:(size_t)maxFrames { [accessLock lock]; diff --git a/Audio/CogPluginMulti.h b/Audio/CogPluginMulti.h index e376ab994..6eac16c7e 100644 --- a/Audio/CogPluginMulti.h +++ b/Audio/CogPluginMulti.h @@ -12,7 +12,6 @@ @interface CogDecoderMulti : NSObject { NSArray *theDecoders; id theDecoder; - NSMutableArray *cachedObservers; } - (id)initWithDecoders:(NSArray *)decoders; diff --git a/Audio/CogPluginMulti.m b/Audio/CogPluginMulti.m index d6f28b019..703a8771f 100644 --- a/Audio/CogPluginMulti.m +++ b/Audio/CogPluginMulti.m @@ -31,6 +31,15 @@ NSArray *sortClassesByPriority(NSArray *theClasses) { return sortedClasses; } +@interface CogDecoderMulti (Private) +- (void)registerObservers; +- (void)removeObservers; +- (void)observeValueForKeyPath:(NSString *)keyPath + ofObject:(id)object + change:(NSDictionary *)change + context:(void *)context; +@end + @implementation CogDecoderMulti + (NSArray *)mimeTypes { @@ -54,7 +63,6 @@ NSArray *sortClassesByPriority(NSArray *theClasses) { if(self) { theDecoders = sortClassesByPriority(decoders); theDecoder = nil; - cachedObservers = [[NSMutableArray alloc] init]; } return self; } @@ -73,17 +81,10 @@ NSArray *sortClassesByPriority(NSArray *theClasses) { for(NSString *classString in theDecoders) { Class decoder = NSClassFromString(classString); theDecoder = [[decoder alloc] init]; - for(NSDictionary *obsItem in cachedObservers) { - [theDecoder addObserver:[obsItem objectForKey:@"observer"] - forKeyPath:[obsItem objectForKey:@"keyPath"] - options:[[obsItem objectForKey:@"options"] unsignedIntegerValue] - context:(__bridge void *)([obsItem objectForKey:@"context"])]; - } + [self registerObservers]; if([theDecoder open:source]) return YES; - for(NSDictionary *obsItem in cachedObservers) { - [theDecoder removeObserver:[obsItem objectForKey:@"observer"] forKeyPath:[obsItem objectForKey:@"keyPath"]]; - } + [self removeObservers]; if([source seekable]) [source seek:0 whence:SEEK_SET]; } @@ -98,43 +99,40 @@ NSArray *sortClassesByPriority(NSArray *theClasses) { - (void)close { if(theDecoder != nil) { - for(NSDictionary *obsItem in cachedObservers) { - [theDecoder removeObserver:[obsItem objectForKey:@"observer"] forKeyPath:[obsItem objectForKey:@"keyPath"]]; - } - [cachedObservers removeAllObjects]; + [self removeObservers]; [theDecoder close]; theDecoder = nil; } } +- (void)registerObservers { + [theDecoder addObserver:self + forKeyPath:@"properties" + options:(NSKeyValueObservingOptionNew) + context:NULL]; + + [theDecoder addObserver:self + forKeyPath:@"metadata" + options:(NSKeyValueObservingOptionNew) + context:NULL]; +} + +- (void)removeObservers { + [theDecoder removeObserver:self forKeyPath:@"properties"]; + [theDecoder removeObserver:self forKeyPath:@"metadata"]; +} + - (BOOL)setTrack:(NSURL *)track { if(theDecoder != nil && [theDecoder respondsToSelector:@selector(setTrack:)]) return [theDecoder setTrack:track]; return NO; } -/* By the current design, the core adds its observers to decoders before they are opened */ -- (void)addObserver:(NSObject *)observer forKeyPath:(NSString *)keyPath options:(NSKeyValueObservingOptions)options context:(void *)context { - if(context != nil) { - [cachedObservers addObject:[NSDictionary dictionaryWithObjectsAndKeys:observer, @"observer", keyPath, @"keyPath", @(options), @"options", context, @"context", nil]]; - } else { - [cachedObservers addObject:[NSDictionary dictionaryWithObjectsAndKeys:observer, @"observer", keyPath, @"keyPath", @(options), @"options", nil]]; - } - if(theDecoder) { - [theDecoder addObserver:observer forKeyPath:keyPath options:options context:context]; - } -} - -/* And this is currently called after the decoder is closed */ -- (void)removeObserver:(NSObject *)observer forKeyPath:(NSString *)keyPath { - for(NSDictionary *obsItem in cachedObservers) { - if([obsItem objectForKey:@"observer"] == observer && [keyPath isEqualToString:[obsItem objectForKey:@"keyPath"]]) { - [cachedObservers removeObject:obsItem]; - break; - } - } - if(theDecoder) { - [theDecoder removeObserver:observer forKeyPath:keyPath]; - } +- (void)observeValueForKeyPath:(NSString *)keyPath + ofObject:(id)object + change:(NSDictionary *)change + context:(void *)context { + [self willChangeValueForKey:keyPath]; + [self didChangeValueForKey:keyPath]; } @end diff --git a/Audio/Output/OutputCoreAudio.m b/Audio/Output/OutputCoreAudio.m index 637d284bf..9a2bc60e3 100644 --- a/Audio/Output/OutputCoreAudio.m +++ b/Audio/Output/OutputCoreAudio.m @@ -666,6 +666,8 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const if(logFile) { fwrite(inputData->mBuffers[0].mData, 1, inputData->mBuffers[0].mDataByteSize, logFile); } + + // memset(inputData->mBuffers[0].mData, 0, inputData->mBuffers[0].mDataByteSize); #endif inputData->mBuffers[0].mNumberChannels = channels; diff --git a/Plugins/CoreAudio/CoreAudioDecoder.m b/Plugins/CoreAudio/CoreAudioDecoder.m index 489a69fbe..3fc5e9908 100644 --- a/Plugins/CoreAudio/CoreAudioDecoder.m +++ b/Plugins/CoreAudio/CoreAudioDecoder.m @@ -391,7 +391,7 @@ static SInt64 getSizeProc(void *clientData) { - (NSDictionary *)properties { return [NSDictionary dictionaryWithObjectsAndKeys: [NSNumber numberWithInt:channels], @"channels", - [NSNumber numberWithInt:channelConfig], @"channelConfig", + [NSNumber numberWithUnsignedInt:channelConfig], @"channelConfig", [NSNumber numberWithInt:bitsPerSample], @"bitsPerSample", [NSNumber numberWithBool:floatingPoint], @"floatingPoint", [NSNumber numberWithInt:bitrate], @"bitrate", diff --git a/Plugins/CueSheet/CueSheetDecoder.h b/Plugins/CueSheet/CueSheetDecoder.h index 2b1a396a5..d6fccdd01 100644 --- a/Plugins/CueSheet/CueSheetDecoder.h +++ b/Plugins/CueSheet/CueSheetDecoder.h @@ -26,6 +26,7 @@ BOOL embedded; BOOL noFragment; + BOOL observersAdded; NSURL *baseURL; CueSheet *cuesheet; diff --git a/Plugins/CueSheet/CueSheetDecoder.m b/Plugins/CueSheet/CueSheetDecoder.m index 6e3682ce1..d7ae86b8b 100644 --- a/Plugins/CueSheet/CueSheetDecoder.m +++ b/Plugins/CueSheet/CueSheetDecoder.m @@ -55,6 +55,7 @@ NSDictionary *fileMetadata; noFragment = NO; + observersAdded = NO; NSString *ext = [url pathExtension]; if([ext caseInsensitiveCompare:@"cue"] != NSOrderedSame) { @@ -143,6 +144,8 @@ decoder = [NSClassFromString(@"AudioDecoder") audioDecoderForSource:source skipCue:YES]; + [self registerObservers]; + if(![decoder open:source]) { ALog(@"Could not open cuesheet decoder"); return NO; @@ -166,8 +169,40 @@ return NO; } +- (void)registerObservers { + DLog(@"REGISTERING OBSERVERS"); + [decoder addObserver:self + forKeyPath:@"properties" + options:(NSKeyValueObservingOptionNew) + context:NULL]; + + [decoder addObserver:self + forKeyPath:@"metadata" + options:(NSKeyValueObservingOptionNew) + context:NULL]; + + observersAdded = YES; +} + +- (void)removeObservers { + if(observersAdded) { + [decoder removeObserver:self forKeyPath:@"properties"]; + [decoder removeObserver:self forKeyPath:@"metadata"]; + observersAdded = NO; + } +} + +- (void)observeValueForKeyPath:(NSString *)keyPath + ofObject:(id)object + change:(NSDictionary *)change + context:(void *)context { + [self willChangeValueForKey:keyPath]; + [self didChangeValueForKey:keyPath]; +} + - (void)close { if(decoder) { + [self removeObservers]; [decoder close]; decoder = nil; } diff --git a/Plugins/FFMPEG/FFMPEGDecoder.m b/Plugins/FFMPEG/FFMPEGDecoder.m index d060c0242..0914c1f68 100644 --- a/Plugins/FFMPEG/FFMPEGDecoder.m +++ b/Plugins/FFMPEG/FFMPEGDecoder.m @@ -575,6 +575,20 @@ int64_t ffmpeg_seek(void *opaque, int64_t offset, int whence) { bytesRead += toConsume; } + int _channels = codecCtx->channels; + uint32_t _channelConfig = (uint32_t)codecCtx->channel_layout; + float _frequency = codecCtx->sample_rate; + + if(_channels != channels || + _channelConfig != channelConfig || + _frequency != frequency) { + channels = _channels; + channelConfig = _channelConfig; + frequency = _frequency; + [self willChangeValueForKey:@"properties"]; + [self didChangeValueForKey:@"properties"]; + } + int framesReadNow = bytesRead / frameSize; if(totalFrames && (framesRead + framesReadNow > totalFrames)) framesReadNow = (int)(totalFrames - framesRead); @@ -617,7 +631,7 @@ int64_t ffmpeg_seek(void *opaque, int64_t offset, int whence) { - (NSDictionary *)properties { return [NSDictionary dictionaryWithObjectsAndKeys: [NSNumber numberWithInt:channels], @"channels", - [NSNumber numberWithInt:channelConfig], @"channelConfig", + [NSNumber numberWithUnsignedInt:channelConfig], @"channelConfig", [NSNumber numberWithInt:bitsPerSample], @"bitsPerSample", [NSNumber numberWithBool:(bitsPerSample == 8)], @"Unsigned", [NSNumber numberWithFloat:frequency], @"sampleRate", diff --git a/Plugins/Flac/FlacDecoder.h b/Plugins/Flac/FlacDecoder.h index 2d75f2e8f..d7dc4f457 100644 --- a/Plugins/Flac/FlacDecoder.h +++ b/Plugins/Flac/FlacDecoder.h @@ -33,6 +33,7 @@ long fileSize; BOOL hasStreamInfo; + BOOL streamOpened; } - (void)setSource:(id)s; diff --git a/Plugins/Flac/FlacDecoder.m b/Plugins/Flac/FlacDecoder.m index 5ccdccfb1..1ce584ead 100644 --- a/Plugins/Flac/FlacDecoder.m +++ b/Plugins/Flac/FlacDecoder.m @@ -99,6 +99,23 @@ FLAC__StreamDecoderLengthStatus LengthCallback(const FLAC__StreamDecoder *decode FLAC__StreamDecoderWriteStatus WriteCallback(const FLAC__StreamDecoder *decoder, const FLAC__Frame *frame, const FLAC__int32 *const sampleblockBuffer[], void *client_data) { FlacDecoder *flacDecoder = (__bridge FlacDecoder *)client_data; + uint32_t channels = frame->header.channels; + uint32_t bitsPerSample = frame->header.bits_per_sample; + uint32_t frequency = frame->header.sample_rate; + + if(channels != flacDecoder->channels || + bitsPerSample != flacDecoder->bitsPerSample || + frequency != flacDecoder->frequency) { + if(channels != flacDecoder->channels) { + flacDecoder->channelConfig = 0; + } + flacDecoder->channels = channels; + flacDecoder->bitsPerSample = bitsPerSample; + flacDecoder->frequency = frequency; + [flacDecoder willChangeValueForKey:@"properties"]; + [flacDecoder didChangeValueForKey:@"properties"]; + } + void *blockBuffer = [flacDecoder blockBuffer]; int8_t *alias8; @@ -185,6 +202,7 @@ void MetadataCallback(const FLAC__StreamDecoder *decoder, const FLAC__StreamMeta if(!flacDecoder->hasStreamInfo) { flacDecoder->channels = metadata->data.stream_info.channels; + flacDecoder->channelConfig = 0; flacDecoder->frequency = metadata->data.stream_info.sample_rate; flacDecoder->bitsPerSample = metadata->data.stream_info.bits_per_sample; @@ -243,9 +261,12 @@ void ErrorCallback(const FLAC__StreamDecoder *decoder, FLAC__StreamDecoderErrorS - (int)readAudio:(void *)buffer frames:(UInt32)frames { int framesRead = 0; - int bytesPerFrame = ((bitsPerSample + 7) / 8) * channels; while(framesRead < frames) { if(blockBufferFrames == 0) { + if(framesRead) { + break; + } + if(FLAC__stream_decoder_get_state(decoder) == FLAC__STREAM_DECODER_END_OF_STREAM) { break; } @@ -253,6 +274,8 @@ void ErrorCallback(const FLAC__StreamDecoder *decoder, FLAC__StreamDecoderErrorS FLAC__stream_decoder_process_single(decoder); } + int bytesPerFrame = ((bitsPerSample + 7) / 8) * channels; + int framesToRead = blockBufferFrames; if(blockBufferFrames > frames) { framesToRead = frames; @@ -333,7 +356,7 @@ void ErrorCallback(const FLAC__StreamDecoder *decoder, FLAC__StreamDecoderErrorS - (NSDictionary *)properties { return [NSDictionary dictionaryWithObjectsAndKeys: [NSNumber numberWithInt:channels], @"channels", - [NSNumber numberWithInt:channelConfig], @"channelConfig", + [NSNumber numberWithUnsignedInt:channelConfig], @"channelConfig", [NSNumber numberWithInt:bitsPerSample], @"bitsPerSample", [NSNumber numberWithFloat:frequency], @"sampleRate", [NSNumber numberWithDouble:totalFrames], @"totalFrames", diff --git a/Plugins/WavPack/WavPackDecoder.m b/Plugins/WavPack/WavPackDecoder.m index ccdabbdfc..a1791e7fb 100644 --- a/Plugins/WavPack/WavPackDecoder.m +++ b/Plugins/WavPack/WavPackDecoder.m @@ -283,7 +283,7 @@ int32_t WriteBytesProc(void *ds, void *data, int32_t bcount) { - (NSDictionary *)properties { return [NSDictionary dictionaryWithObjectsAndKeys: [NSNumber numberWithInt:channels], @"channels", - [NSNumber numberWithInt:channelConfig], @"channelConfig", + [NSNumber numberWithUnsignedInt:channelConfig], @"channelConfig", [NSNumber numberWithInt:bitsPerSample], @"bitsPerSample", [NSNumber numberWithInt:bitrate], @"bitrate", [NSNumber numberWithFloat:frequency], @"sampleRate",