From 7994929a801761589c0134f9965bf57ecdbc7f59 Mon Sep 17 00:00:00 2001 From: Christopher Snowhill Date: Wed, 12 Feb 2025 04:41:11 -0800 Subject: [PATCH] Audio: Add full timestamp accounting to playback Audio Chunks now have full timestamp accounting, including DSP playback speed ratio for the one DSP that can change play ratio, Rubber Band. Inputs which support looping and actually reporting the absolute play position now do so. Signed-off-by: Christopher Snowhill --- Audio/Chain/AudioChunk.h | 5 +++ Audio/Chain/AudioChunk.m | 13 +++++++ Audio/Chain/ChunkList.h | 4 +++ Audio/Chain/ChunkList.m | 35 +++++++++++++++++-- Audio/Chain/ConverterNode.h | 2 ++ Audio/Chain/ConverterNode.m | 9 +++++ Audio/Chain/DSP/DSPEqualizerNode.m | 18 +++++++++- Audio/Chain/DSP/DSPFSurroundNode.m | 16 +++++++++ Audio/Chain/DSP/DSPHRTFNode.m | 5 +++ Audio/Chain/DSP/DSPRubberbandNode.m | 5 +++ Audio/Chain/Node.h | 1 + Audio/Chain/Node.m | 16 +++++++++ Audio/Chain/OutputNode.h | 1 + Audio/Chain/OutputNode.m | 9 +++++ Audio/Output/OutputCoreAudio.h | 2 +- Audio/Output/OutputCoreAudio.m | 14 ++------ Plugins/AdPlug/AdPlug/AdPlugDecoder.mm | 3 ++ Plugins/CoreAudio/CoreAudioDecoder.h | 1 + Plugins/CoreAudio/CoreAudioDecoder.m | 8 +++++ Plugins/CueSheet/CueSheetDecoder.m | 3 ++ Plugins/FFMPEG/FFMPEGDecoder.m | 3 ++ Plugins/Flac/FlacDecoder.h | 2 ++ Plugins/Flac/FlacDecoder.m | 12 +++++++ Plugins/GME/GameDecoder.m | 4 +++ .../HighlyComplete/HCDecoder.mm | 3 ++ Plugins/Hively/Hively/HVLDecoder.m | 3 ++ Plugins/MAD/MADDecoder.h | 2 ++ Plugins/MAD/MADDecoder.m | 6 ++++ Plugins/MIDI/MIDI/MIDIDecoder.mm | 5 +++ Plugins/MIDI/MIDI/MIDIPlayer.cpp | 4 +++ Plugins/MIDI/MIDI/MIDIPlayer.h | 1 + Plugins/Musepack/MusepackDecoder.h | 1 + Plugins/Musepack/MusepackDecoder.m | 8 +++++ Plugins/OpenMPT/OpenMPT/OMPTDecoder.mm | 3 ++ Plugins/Opus/Opus/OpusDecoder.h | 1 + Plugins/Opus/Opus/OpusDecoder.m | 9 ++++- Plugins/Organya/OrganyaDecoder.mm | 8 +++-- Plugins/Shorten/ShortenDecoder.h | 2 ++ Plugins/Shorten/ShortenDecoder.mm | 7 ++++ .../SilenceDecoder/SilenceDecoder.h | 4 ++- .../SilenceDecoder/SilenceDecoder.m | 6 ++++ Plugins/Vorbis/VorbisDecoder.h | 1 + Plugins/Vorbis/VorbisDecoder.m | 6 ++++ Plugins/WavPack/WavPackDecoder.h | 1 + Plugins/WavPack/WavPackDecoder.m | 7 ++++ Plugins/libvgmPlayer/libvgmDecoder.mm | 3 ++ Plugins/sidplay/SidDecoder.mm | 4 +++ Plugins/vgmstream/vgmstream/VGMDecoder.m | 3 ++ 48 files changed, 269 insertions(+), 20 deletions(-) diff --git a/Audio/Chain/AudioChunk.h b/Audio/Chain/AudioChunk.h index bc56986f2..0ad68ac23 100644 --- a/Audio/Chain/AudioChunk.h +++ b/Audio/Chain/AudioChunk.h @@ -65,6 +65,8 @@ enum { AudioStreamBasicDescription format; NSMutableData *chunkData; uint32_t channelConfig; + double streamTimestamp; + double streamTimeRatio; BOOL formatAssigned; BOOL lossless; BOOL hdcd; @@ -72,6 +74,8 @@ enum { @property AudioStreamBasicDescription format; @property uint32_t channelConfig; +@property double streamTimestamp; +@property double streamTimeRatio; @property BOOL lossless; + (uint32_t)guessChannelConfig:(uint32_t)channelCount; @@ -94,6 +98,7 @@ enum { - (void)setFrameCount:(size_t)count; // For truncation only - (double)duration; +- (double)durationRatioed; - (BOOL)isHDCD; - (void)setHDCD; diff --git a/Audio/Chain/AudioChunk.m b/Audio/Chain/AudioChunk.m index 679864b60..936584d6b 100644 --- a/Audio/Chain/AudioChunk.m +++ b/Audio/Chain/AudioChunk.m @@ -19,6 +19,8 @@ formatAssigned = NO; lossless = NO; hdcd = NO; + streamTimestamp = 0.0; + streamTimeRatio = 1.0; } return self; @@ -31,6 +33,9 @@ chunkData = [[NSMutableData alloc] init]; [self setFormat:propertiesToASBD(properties)]; lossless = [[properties objectForKey:@"encoding"] isEqualToString:@"lossless"]; + hdcd = NO; + streamTimestamp = 0.0; + streamTimeRatio = 1.0; } return self; @@ -117,6 +122,8 @@ static const uint32_t AudioChannelConfigTable[] = { } @synthesize lossless; +@synthesize streamTimestamp; +@synthesize streamTimeRatio; - (AudioStreamBasicDescription)format { return format; @@ -155,10 +162,12 @@ static const uint32_t AudioChannelConfigTable[] = { - (NSData *)removeSamples:(size_t)frameCount { if(formatAssigned) { @autoreleasepool { + const double framesDuration = (double)(frameCount) / format.mSampleRate; const size_t bytesPerPacket = format.mBytesPerPacket; const size_t byteCount = bytesPerPacket * frameCount; NSData *ret = [chunkData subdataWithRange:NSMakeRange(0, byteCount)]; [chunkData replaceBytesInRange:NSMakeRange(0, byteCount) withBytes:NULL length:0]; + streamTimestamp += framesDuration * streamTimeRatio; return ret; } } @@ -196,6 +205,10 @@ static const uint32_t AudioChannelConfigTable[] = { return 0.0; } +- (double)durationRatioed { + return [self duration] * streamTimeRatio; +} + - (BOOL)isHDCD { return hdcd; } diff --git a/Audio/Chain/ChunkList.h b/Audio/Chain/ChunkList.h index ffe24fc50..6ecaae103 100644 --- a/Audio/Chain/ChunkList.h +++ b/Audio/Chain/ChunkList.h @@ -19,6 +19,7 @@ NS_ASSUME_NONNULL_BEGIN @interface ChunkList : NSObject { NSMutableArray *chunkList; double listDuration; + double listDurationRatioed; double maxDuration; BOOL inAdder; @@ -53,6 +54,7 @@ NS_ASSUME_NONNULL_BEGIN } @property(readonly) double listDuration; +@property(readonly) double listDurationRatioed; @property(readonly) double maxDuration; - (id)initWithMaximumDuration:(double)duration; @@ -69,6 +71,8 @@ NS_ASSUME_NONNULL_BEGIN - (BOOL)peekFormat:(nonnull AudioStreamBasicDescription *)format channelConfig:(nonnull uint32_t *)config; +- (BOOL)peekTimestamp:(nonnull double *)timestamp timeRatio:(nonnull double *)timeRatio; + @end NS_ASSUME_NONNULL_END diff --git a/Audio/Chain/ChunkList.m b/Audio/Chain/ChunkList.m index b69458b01..11f0afd7b 100644 --- a/Audio/Chain/ChunkList.m +++ b/Audio/Chain/ChunkList.m @@ -369,6 +369,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes @implementation ChunkList @synthesize listDuration; +@synthesize listDurationRatioed; @synthesize maxDuration; - (id)initWithMaximumDuration:(double)duration { @@ -377,6 +378,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes if(self) { chunkList = [[NSMutableArray alloc] init]; listDuration = 0.0; + listDurationRatioed = 0.0; maxDuration = duration; inAdder = NO; @@ -394,9 +396,9 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes dsd2pcmCount = 0; dsd2pcmLatency = 0; #endif - + halveDSDVolume = NO; - + [[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.halveDSDVolume" options:(NSKeyValueObservingOptionInitial | NSKeyValueObservingOptionNew) context:kChunkListContext]; } @@ -463,10 +465,12 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes inAdder = YES; const double chunkDuration = [chunk duration]; + const double chunkDurationRatioed = [chunk durationRatioed]; @synchronized(chunkList) { [chunkList addObject:chunk]; listDuration += chunkDuration; + listDurationRatioed += chunkDurationRatioed; } inAdder = NO; @@ -487,6 +491,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes if([chunk frameCount] <= maxFrameCount) { [chunkList removeObjectAtIndex:0]; listDuration -= [chunk duration]; + listDurationRatioed -= [chunk durationRatioed]; inRemover = NO; return chunk; } @@ -495,8 +500,11 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes [ret setFormat:[chunk format]]; [ret setChannelConfig:[chunk channelConfig]]; [ret setLossless:[chunk lossless]]; + [ret setStreamTimestamp:[chunk streamTimestamp]]; + [ret setStreamTimeRatio:[chunk streamTimeRatio]]; [ret assignData:removedData]; listDuration -= [ret duration]; + listDurationRatioed -= [ret durationRatioed]; inRemover = NO; return ret; } @@ -523,6 +531,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes if([chunk frameCount] <= maxFrameCount) { [chunkList removeObjectAtIndex:0]; listDuration -= [chunk duration]; + listDurationRatioed -= [chunk durationRatioed]; inRemover = NO; return [self convertChunk:chunk]; } @@ -531,8 +540,11 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes [ret setFormat:[chunk format]]; [ret setChannelConfig:[chunk channelConfig]]; [ret setLossless:[chunk lossless]]; + [ret setStreamTimestamp:[chunk streamTimestamp]]; + [ret setStreamTimeRatio:[chunk streamTimeRatio]]; [ret assignData:removedData]; listDuration -= [ret duration]; + listDurationRatioed -= [ret durationRatioed]; inRemover = NO; return [self convertChunk:ret]; } @@ -607,6 +619,8 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes size_t bitsPerSample = inputFormat.mBitsPerChannel; BOOL isBigEndian = !!(inputFormat.mFormatFlags & kAudioFormatFlagIsBigEndian); + double streamTimestamp = [inChunk streamTimestamp]; + NSData *inputData = [inChunk removeSamples:samplesRead]; #if DSD_DECIMATE @@ -772,6 +786,8 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes [outChunk setFormat:floatFormat]; [outChunk setChannelConfig:inputChannelConfig]; [outChunk setLossless:inputLossless]; + [outChunk setStreamTimestamp:streamTimestamp]; + [outChunk setStreamTimeRatio:[inChunk streamTimeRatio]]; if(hdcdSustained) [outChunk setHDCD]; [outChunk assignSamples:inputBuffer frameCount:bytesReadFromInput / floatFormat.mBytesPerPacket]; @@ -792,4 +808,19 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes return NO; } +- (BOOL)peekTimestamp:(double *)timestamp timeRatio:(double *)timeRatio { + if(stopping) return NO; + @synchronized (chunkList) { + if([chunkList count]) { + AudioChunk *chunk = [chunkList objectAtIndex:0]; + *timestamp = [chunk streamTimestamp]; + *timeRatio = [chunk streamTimeRatio]; + return YES; + } + } + *timestamp = 0.0; + *timeRatio = 1.0; + return NO; +} + @end diff --git a/Audio/Chain/ConverterNode.h b/Audio/Chain/ConverterNode.h index b96a70ba3..d5b02adfb 100644 --- a/Audio/Chain/ConverterNode.h +++ b/Audio/Chain/ConverterNode.h @@ -25,6 +25,8 @@ size_t inputBufferSize; size_t inpSize, inpOffset; + double streamTimestamp, streamTimeRatio; + BOOL stopping; BOOL convertEntered; BOOL paused; diff --git a/Audio/Chain/ConverterNode.m b/Audio/Chain/ConverterNode.m index 159d69c8c..38cc1bae6 100644 --- a/Audio/Chain/ConverterNode.m +++ b/Audio/Chain/ConverterNode.m @@ -133,6 +133,12 @@ void scale_by_volume(float *buffer, size_t count, float volume) { return nil; } + if(inpOffset == inpSize) { + streamTimestamp = 0.0; + streamTimeRatio = 1.0; + [self peekTimestamp:&streamTimestamp timeRatio:&streamTimeRatio]; + } + while(inpOffset == inpSize) { // Approximately the most we want on input ioNumberPackets = 4096; @@ -315,7 +321,10 @@ void scale_by_volume(float *buffer, size_t count, float volume) { [chunk setChannelConfig:nodeChannelConfig]; } scale_by_volume(floatBuffer, ioNumberPackets / sizeof(float), volumeScale); + [chunk setStreamTimestamp:streamTimestamp]; + [chunk setStreamTimeRatio:streamTimeRatio]; [chunk assignSamples:floatBuffer frameCount:ioNumberPackets / floatFormat.mBytesPerPacket]; + streamTimestamp += [chunk durationRatioed]; convertEntered = NO; return chunk; } diff --git a/Audio/Chain/DSP/DSPEqualizerNode.m b/Audio/Chain/DSP/DSPEqualizerNode.m index 53f03ac6f..f2f29f09f 100644 --- a/Audio/Chain/DSP/DSPEqualizerNode.m +++ b/Audio/Chain/DSP/DSPEqualizerNode.m @@ -354,6 +354,13 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA return nil; } + double streamTimestamp; + double streamTimeRatio; + if(![self peekTimestamp:&streamTimestamp timeRatio:&streamTimeRatio]) { + processEntered = NO; + return nil; + } + if((enableEqualizer && !equalizerInitialized) || memcmp(&inputFormat, &lastInputFormat, sizeof(inputFormat)) != 0 || inputChannelConfig != lastInputChannelConfig) { @@ -376,7 +383,9 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA samplePtr = &inBuffer[0]; size_t channels = inputFormat.mChannelsPerFrame; - + + BOOL isHDCD = NO; + while(!stopping && totalFrameCount < 4096) { AudioStreamBasicDescription newInputFormat; uint32_t newChannelConfig; @@ -391,6 +400,10 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA break; } + if([chunk isHDCD]) { + isHDCD = YES; + } + size_t frameCount = [chunk frameCount]; NSData *sampleData = [chunk removeSamples:frameCount]; @@ -437,6 +450,9 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA if(outputChannelConfig) { [outputChunk setChannelConfig:inputChannelConfig]; } + if(isHDCD) [outputChunk setHDCD]; + [outputChunk setStreamTimestamp:streamTimestamp]; + [outputChunk setStreamTimeRatio:streamTimeRatio]; [outputChunk assignSamples:&outBuffer[0] frameCount:totalFrameCount]; } diff --git a/Audio/Chain/DSP/DSPFSurroundNode.m b/Audio/Chain/DSP/DSPFSurroundNode.m index cb3318339..c0e4bbf02 100644 --- a/Audio/Chain/DSP/DSPFSurroundNode.m +++ b/Audio/Chain/DSP/DSPFSurroundNode.m @@ -173,6 +173,13 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext; return nil; } + double streamTimestamp; + double streamTimeRatio; + if(![self peekTimestamp:&streamTimestamp timeRatio:&streamTimeRatio]) { + processEntered = NO; + return nil; + } + if((enableFSurround && !fsurround) || memcmp(&inputFormat, &lastInputFormat, sizeof(inputFormat)) != 0 || inputChannelConfig != lastInputChannelConfig) { @@ -197,6 +204,8 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext; float *samplePtr = resetStreamFormat ? &inBuffer[2048 * 2] : &inBuffer[0]; + BOOL isHDCD = NO; + while(!stopping && totalFrameCount < totalRequestedSamples) { AudioStreamBasicDescription newInputFormat; uint32_t newChannelConfig; @@ -211,6 +220,10 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext; break; } + if([chunk isHDCD]) { + isHDCD = YES; + } + size_t frameCount = [chunk frameCount]; NSData *sampleData = [chunk removeSamples:frameCount]; @@ -262,6 +275,9 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext; if(outputChannelConfig) { [outputChunk setChannelConfig:outputChannelConfig]; } + if(isHDCD) [outputChunk setHDCD]; + [outputChunk setStreamTimestamp:streamTimestamp]; + [outputChunk setStreamTimeRatio:streamTimeRatio]; [outputChunk assignSamples:samplePtr frameCount:samplesRendered]; } diff --git a/Audio/Chain/DSP/DSPHRTFNode.m b/Audio/Chain/DSP/DSPHRTFNode.m index d46d7e9dd..68ff41c45 100644 --- a/Audio/Chain/DSP/DSPHRTFNode.m +++ b/Audio/Chain/DSP/DSPHRTFNode.m @@ -336,6 +336,8 @@ static void unregisterMotionListener(void) { [hrtf reloadWithMatrix:matrix]; } + double streamTimestamp = [chunk streamTimestamp]; + size_t frameCount = [chunk frameCount]; NSData *sampleData = [chunk removeSamples:frameCount]; @@ -346,6 +348,9 @@ static void unregisterMotionListener(void) { if(outputChannelConfig) { [outputChunk setChannelConfig:outputChannelConfig]; } + if([chunk isHDCD]) [outputChunk setHDCD]; + [outputChunk setStreamTimestamp:streamTimestamp]; + [outputChunk setStreamTimeRatio:[chunk streamTimeRatio]]; [outputChunk assignSamples:&outBuffer[0] frameCount:frameCount]; processEntered = NO; diff --git a/Audio/Chain/DSP/DSPRubberbandNode.m b/Audio/Chain/DSP/DSPRubberbandNode.m index e1d8eba6a..7c9c0417e 100644 --- a/Audio/Chain/DSP/DSPRubberbandNode.m +++ b/Audio/Chain/DSP/DSPRubberbandNode.m @@ -400,6 +400,8 @@ static void * kDSPRubberbandNodeContext = &kDSPRubberbandNodeContext; size_t frameCount = [chunk frameCount]; + double streamTimestamp = [chunk streamTimestamp]; + int len = (int)frameCount; int channels = (int)(inputFormat.mChannelsPerFrame); NSData *samples = [chunk removeSamples:frameCount]; @@ -459,6 +461,9 @@ static void * kDSPRubberbandNodeContext = &kDSPRubberbandNodeContext; if(inputChannelConfig) { [outputChunk setChannelConfig:inputChannelConfig]; } + if([chunk isHDCD]) [outputChunk setHDCD]; + [outputChunk setStreamTimestamp:streamTimestamp]; + [outputChunk setStreamTimeRatio:[chunk streamTimeRatio] * tempo]; [outputChunk assignSamples:rsOutBuffer frameCount:samplesBuffered]; samplesBuffered = 0; stretchOut += [outputChunk duration]; diff --git a/Audio/Chain/Node.h b/Audio/Chain/Node.h index adcec5cde..bf41068dc 100644 --- a/Audio/Chain/Node.h +++ b/Audio/Chain/Node.h @@ -44,6 +44,7 @@ - (AudioChunk *_Nonnull)readChunkAsFloat32:(size_t)maxFrames; - (BOOL)peekFormat:(AudioStreamBasicDescription *_Nonnull)format channelConfig:(uint32_t *_Nonnull)config; +- (BOOL)peekTimestamp:(double *_Nonnull)timestamp timeRatio:(double *_Nonnull)timeRatio; - (void)process; // Should be overwriten by subclass - (void)threadEntry:(id _Nullable)arg; diff --git a/Audio/Chain/Node.m b/Audio/Chain/Node.m index 919ec1b91..f9a75aa87 100644 --- a/Audio/Chain/Node.m +++ b/Audio/Chain/Node.m @@ -153,6 +153,22 @@ return ret; } +- (BOOL)peekTimestamp:(double *_Nonnull)timestamp timeRatio:(double *_Nonnull)timeRatio { + [accessLock lock]; + + if([[previousNode buffer] isEmpty] && [previousNode endOfStream] == YES) { + endOfStream = YES; + [accessLock unlock]; + return NO; + } + + BOOL ret = [[previousNode buffer] peekTimestamp:timestamp timeRatio:timeRatio]; + + [accessLock unlock]; + + return ret; +} + - (AudioChunk *)readChunk:(size_t)maxFrames { [accessLock lock]; diff --git a/Audio/Chain/OutputNode.h b/Audio/Chain/OutputNode.h index 6fa6a96d8..014e072f0 100644 --- a/Audio/Chain/OutputNode.h +++ b/Audio/Chain/OutputNode.h @@ -32,6 +32,7 @@ - (double)amountPlayedInterval; - (void)incrementAmountPlayed:(double)seconds; +- (void)setAmountPlayed:(double)seconds; - (void)resetAmountPlayed; - (void)resetAmountPlayedInterval; diff --git a/Audio/Chain/OutputNode.m b/Audio/Chain/OutputNode.m index ad0474d85..2d9eef81a 100644 --- a/Audio/Chain/OutputNode.m +++ b/Audio/Chain/OutputNode.m @@ -59,6 +59,15 @@ } } +- (void)setAmountPlayed:(double)seconds { + double delta = seconds - amountPlayed; + if(delta > 0.0 && delta < 5.0) { + [self incrementAmountPlayed:delta]; + } else { + amountPlayed = seconds; + } +} + - (void)resetAmountPlayed { amountPlayed = 0; } diff --git a/Audio/Output/OutputCoreAudio.h b/Audio/Output/OutputCoreAudio.h index 9a2a03dda..5d062ecad 100644 --- a/Audio/Output/OutputCoreAudio.h +++ b/Audio/Output/OutputCoreAudio.h @@ -50,7 +50,7 @@ using std::atomic_long; double secondsLatency; double visPushed; - double tempo; + double streamTimestamp; double lastClippedSampleRate; diff --git a/Audio/Output/OutputCoreAudio.m b/Audio/Output/OutputCoreAudio.m index dcc157882..ee9f964a2 100644 --- a/Audio/Output/OutputCoreAudio.m +++ b/Audio/Output/OutputCoreAudio.m @@ -86,6 +86,8 @@ static void *kOutputCoreAudioContext = &kOutputCoreAudioContext; config = [chunk channelConfig]; double chunkDuration = 0; + streamTimestamp = [chunk streamTimestamp] + [chunk durationRatioed]; + if(frameCount) { chunkDuration = [chunk duration]; @@ -211,8 +213,6 @@ static void *kOutputCoreAudioContext = &kOutputCoreAudioContext; secondsHdcdSustained = 0; - tempo = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] doubleForKey:@"tempo"]; - outputLock = [[NSLock alloc] init]; #ifdef OUTPUT_LOG @@ -257,11 +257,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons NSDictionary *device = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] objectForKey:@"outputDevice"]; [self setOutputDeviceWithDeviceDict:device]; - } else if([keyPath isEqualToString:@"values.eqPreamp"]) { - float preamp = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] floatForKey:@"eqPreamp"]; - eqPreamp = pow(10.0, preamp / 20.0); - } else if([keyPath isEqualToString:@"values.tempo"]) { - tempo = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] doubleForKey:@"tempo"]; } } @@ -846,8 +841,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons visController = [VisualizationController sharedController]; [[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.outputDevice" options:0 context:kOutputCoreAudioContext]; - [[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.eqPreamp" options:(NSKeyValueObservingOptionInitial | NSKeyValueObservingOptionNew) context:kOutputCoreAudioContext]; - [[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.tempo" options:(NSKeyValueObservingOptionInitial | NSKeyValueObservingOptionNew) context:kOutputCoreAudioContext]; observersapplied = YES; @@ -857,7 +850,7 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons - (void)updateLatency:(double)secondsPlayed { if(secondsPlayed > 0) { - [outputController incrementAmountPlayed:secondsPlayed * tempo]; + [outputController setAmountPlayed:streamTimestamp]; } double visLatency = visPushed; visPushed -= secondsPlayed; @@ -895,7 +888,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons stopInvoked = YES; if(observersapplied) { [[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.outputDevice" context:kOutputCoreAudioContext]; - [[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.tempo" context:kOutputCoreAudioContext]; observersapplied = NO; } stopping = YES; diff --git a/Plugins/AdPlug/AdPlug/AdPlugDecoder.mm b/Plugins/AdPlug/AdPlug/AdPlugDecoder.mm index aa358ae89..1d3abe5eb 100644 --- a/Plugins/AdPlug/AdPlug/AdPlugDecoder.mm +++ b/Plugins/AdPlug/AdPlug/AdPlugDecoder.mm @@ -135,6 +135,9 @@ static CAdPlugDatabase *g_database = NULL; total += samples_now; } + double streamTimestamp = (double)(current_pos) / sampleRate; + [chunk setStreamTimestamp:streamTimestamp]; + [chunk assignSamples:buffer frameCount:total]; return chunk; diff --git a/Plugins/CoreAudio/CoreAudioDecoder.h b/Plugins/CoreAudio/CoreAudioDecoder.h index 39e1d0766..ddeeb1ea4 100644 --- a/Plugins/CoreAudio/CoreAudioDecoder.h +++ b/Plugins/CoreAudio/CoreAudioDecoder.h @@ -46,6 +46,7 @@ uint32_t channelConfig; float frequency; long totalFrames; + long frame; NSString* codec; } diff --git a/Plugins/CoreAudio/CoreAudioDecoder.m b/Plugins/CoreAudio/CoreAudioDecoder.m index e343427fa..7849f9ff0 100644 --- a/Plugins/CoreAudio/CoreAudioDecoder.m +++ b/Plugins/CoreAudio/CoreAudioDecoder.m @@ -150,6 +150,8 @@ static SInt64 getSizeProc(void *clientData) { _in_opened = YES; + frame = 0; + return [self readInfoFromExtAudioFileRef]; } @@ -330,6 +332,10 @@ static SInt64 getSizeProc(void *clientData) { id audioChunkClass = NSClassFromString(@"AudioChunk"); AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; + + double streamTimestamp = (double)(frame) / frequency; + [chunk setStreamTimestamp:streamTimestamp]; + [chunk assignSamples:buffer frameCount:frameCount]; return chunk; @@ -343,6 +349,8 @@ static SInt64 getSizeProc(void *clientData) { return -1; } + self->frame = frame; + return frame; } diff --git a/Plugins/CueSheet/CueSheetDecoder.m b/Plugins/CueSheet/CueSheetDecoder.m index 7b4977d33..0016a9380 100644 --- a/Plugins/CueSheet/CueSheetDecoder.m +++ b/Plugins/CueSheet/CueSheetDecoder.m @@ -361,6 +361,9 @@ static void *kCueSheetDecoderContext = &kCueSheetDecoderContext; [chunk setFrameCount:frames / frameScale]; } + double streamTimestamp = (double)(framePosition - trackStart) / [chunk format].mSampleRate; + [chunk setStreamTimestamp:streamTimestamp]; + framePosition += chunk.frameCount * frameScale; return chunk; diff --git a/Plugins/FFMPEG/FFMPEGDecoder.m b/Plugins/FFMPEG/FFMPEGDecoder.m index ed231101e..566f29e6e 100644 --- a/Plugins/FFMPEG/FFMPEGDecoder.m +++ b/Plugins/FFMPEG/FFMPEGDecoder.m @@ -921,6 +921,8 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va if(totalFrames && (framesRead + framesReadNow > totalFrames)) framesReadNow = (int)(totalFrames - framesRead); + double streamTimestamp = (double)(framesRead) / frequency; + framesRead += framesReadNow; metadataUpdateCount += framesReadNow; @@ -931,6 +933,7 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va id audioChunkClass = NSClassFromString(@"AudioChunk"); AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; + [chunk setStreamTimestamp:framesRead]; [chunk assignSamples:sampleBuffer frameCount:framesReadNow]; return chunk; diff --git a/Plugins/Flac/FlacDecoder.h b/Plugins/Flac/FlacDecoder.h index 87c97a949..acf3ae431 100644 --- a/Plugins/Flac/FlacDecoder.h +++ b/Plugins/Flac/FlacDecoder.h @@ -30,6 +30,8 @@ uint32_t channelConfig; float frequency; long totalFrames; + long frame; + double seconds; long fileSize; diff --git a/Plugins/Flac/FlacDecoder.m b/Plugins/Flac/FlacDecoder.m index 6fa8b44d7..bd909bb62 100644 --- a/Plugins/Flac/FlacDecoder.m +++ b/Plugins/Flac/FlacDecoder.m @@ -372,6 +372,9 @@ void ErrorCallback(const FLAC__StreamDecoder *decoder, FLAC__StreamDecoderErrorS blockBuffer = malloc(SAMPLE_blockBuffer_SIZE); + frame = 0; + seconds = 0.0; + return YES; } @@ -391,8 +394,14 @@ void ErrorCallback(const FLAC__StreamDecoder *decoder, FLAC__StreamDecoderErrorS if(blockBufferFrames > 0) { chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; + + frame += blockBufferFrames; + [chunk setStreamTimestamp:seconds]; + [chunk assignSamples:blockBuffer frameCount:blockBufferFrames]; + seconds += [chunk duration]; + blockBufferFrames = 0; } @@ -453,6 +462,9 @@ void ErrorCallback(const FLAC__StreamDecoder *decoder, FLAC__StreamDecoderErrorS if(!FLAC__stream_decoder_seek_absolute(decoder, sample)) return -1; + frame = sample; + seconds = (double)(sample) / frequency; + return sample; } diff --git a/Plugins/GME/GameDecoder.m b/Plugins/GME/GameDecoder.m index 374c0a7b3..6c47b2afe 100644 --- a/Plugins/GME/GameDecoder.m +++ b/Plugins/GME/GameDecoder.m @@ -192,6 +192,8 @@ gme_err_t readCallback(void *data, void *out, int count) { else gme_set_fade(emu, (int)(length - fade), (int)fade); + double streamTimestamp = (double)(gme_tell(emu)) * 0.001; + gme_play(emu, numSamples, (short int *)buf); // Some formats support length, but we'll add that in the future. @@ -199,6 +201,8 @@ gme_err_t readCallback(void *data, void *out, int count) { // GME will always generate samples. There's no real EOS. // Addendum: The above gme_track_ended() call has been in place for years now + [chunk setStreamTimestamp:streamTimestamp]; + [chunk assignSamples:sampleBuffer frameCount:frames]; return chunk; diff --git a/Plugins/HighlyComplete/HighlyComplete/HCDecoder.mm b/Plugins/HighlyComplete/HighlyComplete/HCDecoder.mm index 1ac433610..2e6499b12 100644 --- a/Plugins/HighlyComplete/HighlyComplete/HCDecoder.mm +++ b/Plugins/HighlyComplete/HighlyComplete/HCDecoder.mm @@ -1362,10 +1362,13 @@ static int usf_info(void *context, const char *name, const char *value) { } } + double streamTimestamp = (double)(framesRead) / (double)(sampleRate); + framesRead += written; id audioChunkClass = NSClassFromString(@"AudioChunk"); AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; + [chunk setStreamTimestamp:streamTimestamp]; [chunk assignSamples:buffer frameCount:written]; return chunk; diff --git a/Plugins/Hively/Hively/HVLDecoder.m b/Plugins/Hively/Hively/HVLDecoder.m index 46352b00a..d7fd8c7ee 100644 --- a/Plugins/Hively/Hively/HVLDecoder.m +++ b/Plugins/Hively/Hively/HVLDecoder.m @@ -158,10 +158,13 @@ static void oneTimeInit(void) { total = (int)(fadePos - fadeStart); } + double streamTimestamp = (double)(framesRead) / sampleRate; + framesRead += total; id audioChunkClass = NSClassFromString(@"AudioChunk"); AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; + [chunk setStreamTimestamp:streamTimestamp]; [chunk assignSamples:sampleBuffer frameCount:total]; return chunk; diff --git a/Plugins/MAD/MADDecoder.h b/Plugins/MAD/MADDecoder.h index d2ce8a595..95cddf753 100644 --- a/Plugins/MAD/MADDecoder.h +++ b/Plugins/MAD/MADDecoder.h @@ -25,6 +25,8 @@ long _currentOutputFrames; long _fileSize; + double seconds; + id _source; BOOL _firstFrame; diff --git a/Plugins/MAD/MADDecoder.m b/Plugins/MAD/MADDecoder.m index 342a90826..8ceacabab 100644 --- a/Plugins/MAD/MADDecoder.m +++ b/Plugins/MAD/MADDecoder.m @@ -476,6 +476,8 @@ error: _endPadding = 0; // DLog(@"OPEN: %i", _firstFrame); + seconds = 0.0; + inputEOF = NO; genre = @""; @@ -748,7 +750,9 @@ error: if(framesToCopy) { chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; + [chunk setStreamTimestamp:seconds]; [chunk assignSamples:_outputBuffer frameCount:framesToCopy]; + seconds += [chunk duration]; _outputFrames = 0; break; } @@ -812,6 +816,7 @@ error: if(frame < _framesDecoded) { _framesDecoded = 0; + seconds = 0.0; _firstFrame = YES; if(_foundLAMEHeader || _foundiTunSMPB) framesToSkip = _startPadding; @@ -821,6 +826,7 @@ error: } framesToSkip += frame - _framesDecoded; + seconds += (double)(frame - _framesDecoded) / sampleRate; return frame; } diff --git a/Plugins/MIDI/MIDI/MIDIDecoder.mm b/Plugins/MIDI/MIDI/MIDIDecoder.mm index b6e24b435..25476fe99 100644 --- a/Plugins/MIDI/MIDI/MIDIDecoder.mm +++ b/Plugins/MIDI/MIDI/MIDIDecoder.mm @@ -299,6 +299,8 @@ static OSType getOSType(const char *in_) { if(![self initDecoder]) return nil; } + + double streamTimestamp = 0.0; try { player->setLoopMode((repeatone || isLooped) ? (MIDIPlayer::loop_mode_enable | MIDIPlayer::loop_mode_force) : 0); @@ -317,6 +319,8 @@ static OSType getOSType(const char *in_) { soundFontsAssigned = YES; } + streamTimestamp = (double)(player->Tell()) / sampleRate; + int frames = 1024; float buffer[frames * 2]; @@ -358,6 +362,7 @@ static OSType getOSType(const char *in_) { id audioChunkClass = NSClassFromString(@"AudioChunk"); AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; + [chunk setStreamTimestamp:streamTimestamp]; [chunk assignSamples:buffer frameCount:frames]; return chunk; diff --git a/Plugins/MIDI/MIDI/MIDIPlayer.cpp b/Plugins/MIDI/MIDI/MIDIPlayer.cpp index 7c9846f0e..ce3a125b8 100644 --- a/Plugins/MIDI/MIDI/MIDIPlayer.cpp +++ b/Plugins/MIDI/MIDI/MIDIPlayer.cpp @@ -589,3 +589,7 @@ void MIDIPlayer::send_sysex_time_filtered(const uint8_t *data, size_t size, size bool MIDIPlayer::GetLastError(std::string &p_out) { return get_last_error(p_out); } + +unsigned long MIDIPlayer::Tell() const { + return uTimeCurrent; +} diff --git a/Plugins/MIDI/MIDI/MIDIPlayer.h b/Plugins/MIDI/MIDI/MIDIPlayer.h index 3c1e7f0d8..8e5d01ea1 100644 --- a/Plugins/MIDI/MIDI/MIDIPlayer.h +++ b/Plugins/MIDI/MIDI/MIDIPlayer.h @@ -35,6 +35,7 @@ class MIDIPlayer { bool Load(const midi_container& midi_file, unsigned subsong, unsigned loop_mode, unsigned clean_flags); unsigned long Play(float* out, unsigned long count); void Seek(unsigned long sample); + unsigned long Tell() const; bool GetLastError(std::string& p_out); diff --git a/Plugins/Musepack/MusepackDecoder.h b/Plugins/Musepack/MusepackDecoder.h index a656c3c80..947c5bfdf 100644 --- a/Plugins/Musepack/MusepackDecoder.h +++ b/Plugins/Musepack/MusepackDecoder.h @@ -27,6 +27,7 @@ int bitrate; float frequency; long totalFrames; + long frame; } - (BOOL)writeToBuffer:(float *)sample_buffer fromBuffer:(const MPC_SAMPLE_FORMAT *)p_buffer frames:(unsigned)frames; diff --git a/Plugins/Musepack/MusepackDecoder.m b/Plugins/Musepack/MusepackDecoder.m index 11619edf2..9f4328db9 100644 --- a/Plugins/Musepack/MusepackDecoder.m +++ b/Plugins/Musepack/MusepackDecoder.m @@ -79,6 +79,8 @@ mpc_bool_t CanSeekProc(mpc_reader *p_reader) { totalFrames = mpc_streaminfo_get_length_samples(&info); + frame = 0; + [self willChangeValueForKey:@"properties"]; [self didChangeValueForKey:@"properties"]; @@ -151,8 +153,12 @@ mpc_bool_t CanSeekProc(mpc_reader *p_reader) { } } + double streamTimestamp = (double)(frame) / frequency; + frame += framesRead; + id audioChunkClass = NSClassFromString(@"AudioChunk"); AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; + [chunk setStreamTimestamp:streamTimestamp]; [chunk assignSamples:floatBuffer frameCount:framesRead]; return chunk; @@ -172,6 +178,8 @@ mpc_bool_t CanSeekProc(mpc_reader *p_reader) { - (long)seek:(long)sample { mpc_demux_seek_sample(demux, sample); + frame = sample; + return sample; } diff --git a/Plugins/OpenMPT/OpenMPT/OMPTDecoder.mm b/Plugins/OpenMPT/OpenMPT/OMPTDecoder.mm index 23bffa82f..9187c22dc 100644 --- a/Plugins/OpenMPT/OpenMPT/OMPTDecoder.mm +++ b/Plugins/OpenMPT/OpenMPT/OMPTDecoder.mm @@ -120,6 +120,8 @@ static void g_push_archive_extensions(std::vector &list) { try { mod->set_repeat_count(IsRepeatOneSet() ? -1 : 0); + double streamTimestamp = mod->get_position_seconds(); + int frames = 1024; float buffer[frames * 2]; void *buf = (void *)buffer; @@ -142,6 +144,7 @@ static void g_push_archive_extensions(std::vector &list) { id audioChunkClass = NSClassFromString(@"AudioChunk"); AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; + [chunk setStreamTimestamp:streamTimestamp]; [chunk assignSamples:buffer frameCount:total]; return chunk; diff --git a/Plugins/Opus/Opus/OpusDecoder.h b/Plugins/Opus/Opus/OpusDecoder.h index fb69ee19d..cdf86bf63 100644 --- a/Plugins/Opus/Opus/OpusDecoder.h +++ b/Plugins/Opus/Opus/OpusDecoder.h @@ -22,6 +22,7 @@ int bitrate; int channels; long totalFrames; + long frame; int metadataUpdateInterval; int metadataUpdateCount; diff --git a/Plugins/Opus/Opus/OpusDecoder.m b/Plugins/Opus/Opus/OpusDecoder.m index e5ca355e3..8a8d97464 100644 --- a/Plugins/Opus/Opus/OpusDecoder.m +++ b/Plugins/Opus/Opus/OpusDecoder.m @@ -107,7 +107,8 @@ opus_int64 sourceTell(void *_stream) { seekable = op_seekable(opusRef); totalFrames = op_pcm_total(opusRef, -1); - + frame = 0; + const OpusHead *head = op_head(opusRef, -1); const OpusTags *tags = op_tags(opusRef, -1); @@ -289,8 +290,12 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va [self updateIcyMetadata]; } + double streamTimestamp = (double)(frame) / 48000.0; + frame += total / channels; + id audioChunkClass = NSClassFromString(@"AudioChunk"); AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; + [chunk setStreamTimestamp:streamTimestamp]; [chunk assignSamples:buffer frameCount:total / channels]; return chunk; @@ -308,6 +313,8 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va - (long)seek:(long)frame { op_pcm_seek(opusRef, frame); + self->frame = frame; + return frame; } diff --git a/Plugins/Organya/OrganyaDecoder.mm b/Plugins/Organya/OrganyaDecoder.mm index 775fff698..b1887d4be 100644 --- a/Plugins/Organya/OrganyaDecoder.mm +++ b/Plugins/Organya/OrganyaDecoder.mm @@ -397,8 +397,8 @@ namespace Organya { } - (AudioChunk *)readAudio { - int total = 0; - + double streamTimestamp = (double)(m_song->cur_beat) * (double)(m_song->ms_per_beat) * 0.001; + std::vector samples = m_song->Synth(sampleRate); int rendered = (int)(samples.size() / 2); @@ -423,7 +423,9 @@ namespace Organya { id audioChunkClass = NSClassFromString(@"AudioChunk"); AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; - + + [chunk setStreamTimestamp:streamTimestamp]; + if(samplesDiscard) { [chunk assignSamples:&samples[samplesDiscard * 2] frameCount:rendered - samplesDiscard]; samplesDiscard = 0; diff --git a/Plugins/Shorten/ShortenDecoder.h b/Plugins/Shorten/ShortenDecoder.h index d0b2df4be..787100f1e 100644 --- a/Plugins/Shorten/ShortenDecoder.h +++ b/Plugins/Shorten/ShortenDecoder.h @@ -22,6 +22,8 @@ float frequency; long totalFrames; BOOL seekable; + + double seconds; } @end diff --git a/Plugins/Shorten/ShortenDecoder.mm b/Plugins/Shorten/ShortenDecoder.mm index f1fa809c8..3aa9aa018 100644 --- a/Plugins/Shorten/ShortenDecoder.mm +++ b/Plugins/Shorten/ShortenDecoder.mm @@ -33,6 +33,8 @@ totalFrames = (decoder->shn_get_song_length() * frequency) / 1000.0; + seconds = 0.0; + decoder->go(); [self willChangeValueForKey:@"properties"]; @@ -57,8 +59,12 @@ amountRead = decoder->read(buf, frames * bytesPerFrame); } while(amountRead == -1); + [chunk setStreamTimestamp:seconds]; + [chunk assignSamples:buf frameCount:amountRead / bytesPerFrame]; + seconds += [chunk duration]; + return chunk; } @@ -66,6 +72,7 @@ unsigned int sec = sample / frequency; decoder->seek(sec); + seconds = sec; return sample; } diff --git a/Plugins/SilenceDecoder/SilenceDecoder/SilenceDecoder.h b/Plugins/SilenceDecoder/SilenceDecoder/SilenceDecoder.h index 45b57e07d..d051a3bc3 100644 --- a/Plugins/SilenceDecoder/SilenceDecoder/SilenceDecoder.h +++ b/Plugins/SilenceDecoder/SilenceDecoder/SilenceDecoder.h @@ -15,7 +15,9 @@ long length; long remain; - + + double seconds; + float *buffer; } diff --git a/Plugins/SilenceDecoder/SilenceDecoder/SilenceDecoder.m b/Plugins/SilenceDecoder/SilenceDecoder/SilenceDecoder.m index 16cb0030c..1b7db9972 100644 --- a/Plugins/SilenceDecoder/SilenceDecoder/SilenceDecoder.m +++ b/Plugins/SilenceDecoder/SilenceDecoder/SilenceDecoder.m @@ -27,6 +27,7 @@ enum { channels = 2 }; length = seconds * sample_rate; remain = length; + seconds = 0.0; buffer = (float *) calloc(sizeof(float), 1024 * channels); if(!buffer) { @@ -68,8 +69,11 @@ enum { channels = 2 }; id audioChunkClass = NSClassFromString(@"AudioChunk"); AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; + [chunk setStreamTimestamp:seconds]; [chunk assignSamples:buffer frameCount:frames]; + seconds += [chunk duration]; + return chunk; } @@ -79,6 +83,8 @@ enum { channels = 2 }; remain = length - frame; + seconds = (double)(frame) / sample_rate; + return frame; } diff --git a/Plugins/Vorbis/VorbisDecoder.h b/Plugins/Vorbis/VorbisDecoder.h index fef512f31..c1d91ba4a 100644 --- a/Plugins/Vorbis/VorbisDecoder.h +++ b/Plugins/Vorbis/VorbisDecoder.h @@ -31,6 +31,7 @@ int channels; float frequency; long totalFrames; + long frame; int metadataUpdateInterval; int metadataUpdateCount; diff --git a/Plugins/Vorbis/VorbisDecoder.m b/Plugins/Vorbis/VorbisDecoder.m index 58d979dbf..544400354 100644 --- a/Plugins/Vorbis/VorbisDecoder.m +++ b/Plugins/Vorbis/VorbisDecoder.m @@ -99,6 +99,7 @@ long sourceTell(void *datasource) { seekable = ov_seekable(&vorbisRef); totalFrames = ov_pcm_total(&vorbisRef, -1); + frame = 0; [self willChangeValueForKey:@"properties"]; [self didChangeValueForKey:@"properties"]; @@ -221,6 +222,8 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va int total = 0; int frames = 1024; + double streamTimestamp = (double)(frame) / frequency; + if(currentSection != lastSection) { vorbis_info *vi; vi = ov_info(&vorbisRef, -1); @@ -277,6 +280,7 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va [self updateIcyMetadata]; } + [chunk setStreamTimestamp:streamTimestamp]; [chunk assignSamples:buffer frameCount:total]; return chunk; @@ -293,6 +297,8 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va - (long)seek:(long)frame { ov_pcm_seek(&vorbisRef, frame); + self->frame = frame; + return frame; } diff --git a/Plugins/WavPack/WavPackDecoder.h b/Plugins/WavPack/WavPackDecoder.h index e635dfa3e..1ec80d281 100644 --- a/Plugins/WavPack/WavPackDecoder.h +++ b/Plugins/WavPack/WavPackDecoder.h @@ -44,6 +44,7 @@ int bitrate; float frequency; long totalFrames; + long frame; } @end diff --git a/Plugins/WavPack/WavPackDecoder.m b/Plugins/WavPack/WavPackDecoder.m index 99d41b5c1..afb194d7f 100644 --- a/Plugins/WavPack/WavPackDecoder.m +++ b/Plugins/WavPack/WavPackDecoder.m @@ -150,6 +150,7 @@ int32_t WriteBytesProc(void *ds, void *data, int32_t bcount) { frequency = WavpackGetSampleRate(wpc); totalFrames = WavpackGetNumSamples(wpc); + frame = 0; isDSD = NO; @@ -257,6 +258,10 @@ int32_t WriteBytesProc(void *ds, void *data, int32_t bcount) { ALog(@"Unsupported sample size: %d", bitsPerSample); } + double streamTimestamp = (double)(frame) / frequency; + frame += samplesRead; + + [chunk setStreamTimestamp:streamTimestamp]; [chunk assignSamples:buffer frameCount:samplesRead]; return chunk; @@ -270,6 +275,8 @@ int32_t WriteBytesProc(void *ds, void *data, int32_t bcount) { } WavpackSeekSample(wpc, trueFrame); + self->frame = frame; + return frame; } diff --git a/Plugins/libvgmPlayer/libvgmDecoder.mm b/Plugins/libvgmPlayer/libvgmDecoder.mm index eec9ecb14..36c86045d 100644 --- a/Plugins/libvgmPlayer/libvgmDecoder.mm +++ b/Plugins/libvgmPlayer/libvgmDecoder.mm @@ -231,6 +231,8 @@ const int masterVol = 0x10000; // Fixed point 16.16 mainPlr->SetLoopCount(vgmplay->GetModifiedLoopCount(maxLoops)); } + double streamTimestamp = mainPlr->GetCurTime(0); + UInt32 framesDone = 0; while(framesDone < frames) { @@ -247,6 +249,7 @@ const int masterVol = 0x10000; // Fixed point 16.16 framesDone += framesToDo; } + [chunk setStreamTimestamp:streamTimestamp]; [chunk assignSamples:buffer frameCount:framesDone]; return chunk; diff --git a/Plugins/sidplay/SidDecoder.mm b/Plugins/sidplay/SidDecoder.mm index 9bbe4b741..de0ffb0ec 100644 --- a/Plugins/sidplay/SidDecoder.mm +++ b/Plugins/sidplay/SidDecoder.mm @@ -307,6 +307,8 @@ static void sidTuneLoader(const char *fileName, std::vector &bufferRef) id audioChunkClass = NSClassFromString(@"AudioChunk"); AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; + double streamTimestamp = (double)(renderedTotal) / sampleRate; + int16_t buffer[1024 * n_channels]; int framesToRender = 1024; @@ -353,6 +355,8 @@ static void sidTuneLoader(const char *fileName, std::vector &bufferRef) fadeRemain = fadeEnd; } + [chunk setStreamTimestamp:streamTimestamp]; + [chunk assignSamples:buffer frameCount:rendered]; return chunk; diff --git a/Plugins/vgmstream/vgmstream/VGMDecoder.m b/Plugins/vgmstream/vgmstream/VGMDecoder.m index 8bdcda599..5f1290616 100644 --- a/Plugins/vgmstream/vgmstream/VGMDecoder.m +++ b/Plugins/vgmstream/vgmstream/VGMDecoder.m @@ -318,6 +318,8 @@ static NSString *get_description_tag(const char *description, const char *tag, c UInt32 framesMax = frames; UInt32 framesDone = 0; + double streamTimestamp = (double)(stream->pstate.play_position) / sampleRate; + id audioChunkClass = NSClassFromString(@"AudioChunk"); AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; @@ -374,6 +376,7 @@ static NSString *get_description_tag(const char *description, const char *tag, c frames -= frames_done; } + [chunk setStreamTimestamp:streamTimestamp]; [chunk assignSamples:sample_buf frameCount:framesDone]; return chunk;