Audio: Add full timestamp accounting to playback

Audio Chunks now have full timestamp accounting, including DSP playback
speed ratio for the one DSP that can change play ratio, Rubber Band.
Inputs which support looping and actually reporting the absolute play
position now do so.

Signed-off-by: Christopher Snowhill <kode54@gmail.com>
This commit is contained in:
Christopher Snowhill 2025-02-12 04:41:11 -08:00
parent b858a48032
commit ee7aae922d
48 changed files with 269 additions and 20 deletions

View file

@ -65,6 +65,8 @@ enum {
AudioStreamBasicDescription format; AudioStreamBasicDescription format;
NSMutableData *chunkData; NSMutableData *chunkData;
uint32_t channelConfig; uint32_t channelConfig;
double streamTimestamp;
double streamTimeRatio;
BOOL formatAssigned; BOOL formatAssigned;
BOOL lossless; BOOL lossless;
BOOL hdcd; BOOL hdcd;
@ -72,6 +74,8 @@ enum {
@property AudioStreamBasicDescription format; @property AudioStreamBasicDescription format;
@property uint32_t channelConfig; @property uint32_t channelConfig;
@property double streamTimestamp;
@property double streamTimeRatio;
@property BOOL lossless; @property BOOL lossless;
+ (uint32_t)guessChannelConfig:(uint32_t)channelCount; + (uint32_t)guessChannelConfig:(uint32_t)channelCount;
@ -94,6 +98,7 @@ enum {
- (void)setFrameCount:(size_t)count; // For truncation only - (void)setFrameCount:(size_t)count; // For truncation only
- (double)duration; - (double)duration;
- (double)durationRatioed;
- (BOOL)isHDCD; - (BOOL)isHDCD;
- (void)setHDCD; - (void)setHDCD;

View file

@ -19,6 +19,8 @@
formatAssigned = NO; formatAssigned = NO;
lossless = NO; lossless = NO;
hdcd = NO; hdcd = NO;
streamTimestamp = 0.0;
streamTimeRatio = 1.0;
} }
return self; return self;
@ -31,6 +33,9 @@
chunkData = [[NSMutableData alloc] init]; chunkData = [[NSMutableData alloc] init];
[self setFormat:propertiesToASBD(properties)]; [self setFormat:propertiesToASBD(properties)];
lossless = [[properties objectForKey:@"encoding"] isEqualToString:@"lossless"]; lossless = [[properties objectForKey:@"encoding"] isEqualToString:@"lossless"];
hdcd = NO;
streamTimestamp = 0.0;
streamTimeRatio = 1.0;
} }
return self; return self;
@ -117,6 +122,8 @@ static const uint32_t AudioChannelConfigTable[] = {
} }
@synthesize lossless; @synthesize lossless;
@synthesize streamTimestamp;
@synthesize streamTimeRatio;
- (AudioStreamBasicDescription)format { - (AudioStreamBasicDescription)format {
return format; return format;
@ -155,10 +162,12 @@ static const uint32_t AudioChannelConfigTable[] = {
- (NSData *)removeSamples:(size_t)frameCount { - (NSData *)removeSamples:(size_t)frameCount {
if(formatAssigned) { if(formatAssigned) {
@autoreleasepool { @autoreleasepool {
const double framesDuration = (double)(frameCount) / format.mSampleRate;
const size_t bytesPerPacket = format.mBytesPerPacket; const size_t bytesPerPacket = format.mBytesPerPacket;
const size_t byteCount = bytesPerPacket * frameCount; const size_t byteCount = bytesPerPacket * frameCount;
NSData *ret = [chunkData subdataWithRange:NSMakeRange(0, byteCount)]; NSData *ret = [chunkData subdataWithRange:NSMakeRange(0, byteCount)];
[chunkData replaceBytesInRange:NSMakeRange(0, byteCount) withBytes:NULL length:0]; [chunkData replaceBytesInRange:NSMakeRange(0, byteCount) withBytes:NULL length:0];
streamTimestamp += framesDuration * streamTimeRatio;
return ret; return ret;
} }
} }
@ -196,6 +205,10 @@ static const uint32_t AudioChannelConfigTable[] = {
return 0.0; return 0.0;
} }
- (double)durationRatioed {
return [self duration] * streamTimeRatio;
}
- (BOOL)isHDCD { - (BOOL)isHDCD {
return hdcd; return hdcd;
} }

View file

@ -19,6 +19,7 @@ NS_ASSUME_NONNULL_BEGIN
@interface ChunkList : NSObject { @interface ChunkList : NSObject {
NSMutableArray<AudioChunk *> *chunkList; NSMutableArray<AudioChunk *> *chunkList;
double listDuration; double listDuration;
double listDurationRatioed;
double maxDuration; double maxDuration;
BOOL inAdder; BOOL inAdder;
@ -53,6 +54,7 @@ NS_ASSUME_NONNULL_BEGIN
} }
@property(readonly) double listDuration; @property(readonly) double listDuration;
@property(readonly) double listDurationRatioed;
@property(readonly) double maxDuration; @property(readonly) double maxDuration;
- (id)initWithMaximumDuration:(double)duration; - (id)initWithMaximumDuration:(double)duration;
@ -69,6 +71,8 @@ NS_ASSUME_NONNULL_BEGIN
- (BOOL)peekFormat:(nonnull AudioStreamBasicDescription *)format channelConfig:(nonnull uint32_t *)config; - (BOOL)peekFormat:(nonnull AudioStreamBasicDescription *)format channelConfig:(nonnull uint32_t *)config;
- (BOOL)peekTimestamp:(nonnull double *)timestamp timeRatio:(nonnull double *)timeRatio;
@end @end
NS_ASSUME_NONNULL_END NS_ASSUME_NONNULL_END

View file

@ -369,6 +369,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
@implementation ChunkList @implementation ChunkList
@synthesize listDuration; @synthesize listDuration;
@synthesize listDurationRatioed;
@synthesize maxDuration; @synthesize maxDuration;
- (id)initWithMaximumDuration:(double)duration { - (id)initWithMaximumDuration:(double)duration {
@ -377,6 +378,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
if(self) { if(self) {
chunkList = [[NSMutableArray alloc] init]; chunkList = [[NSMutableArray alloc] init];
listDuration = 0.0; listDuration = 0.0;
listDurationRatioed = 0.0;
maxDuration = duration; maxDuration = duration;
inAdder = NO; inAdder = NO;
@ -463,10 +465,12 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
inAdder = YES; inAdder = YES;
const double chunkDuration = [chunk duration]; const double chunkDuration = [chunk duration];
const double chunkDurationRatioed = [chunk durationRatioed];
@synchronized(chunkList) { @synchronized(chunkList) {
[chunkList addObject:chunk]; [chunkList addObject:chunk];
listDuration += chunkDuration; listDuration += chunkDuration;
listDurationRatioed += chunkDurationRatioed;
} }
inAdder = NO; inAdder = NO;
@ -487,6 +491,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
if([chunk frameCount] <= maxFrameCount) { if([chunk frameCount] <= maxFrameCount) {
[chunkList removeObjectAtIndex:0]; [chunkList removeObjectAtIndex:0];
listDuration -= [chunk duration]; listDuration -= [chunk duration];
listDurationRatioed -= [chunk durationRatioed];
inRemover = NO; inRemover = NO;
return chunk; return chunk;
} }
@ -495,8 +500,11 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
[ret setFormat:[chunk format]]; [ret setFormat:[chunk format]];
[ret setChannelConfig:[chunk channelConfig]]; [ret setChannelConfig:[chunk channelConfig]];
[ret setLossless:[chunk lossless]]; [ret setLossless:[chunk lossless]];
[ret setStreamTimestamp:[chunk streamTimestamp]];
[ret setStreamTimeRatio:[chunk streamTimeRatio]];
[ret assignData:removedData]; [ret assignData:removedData];
listDuration -= [ret duration]; listDuration -= [ret duration];
listDurationRatioed -= [ret durationRatioed];
inRemover = NO; inRemover = NO;
return ret; return ret;
} }
@ -523,6 +531,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
if([chunk frameCount] <= maxFrameCount) { if([chunk frameCount] <= maxFrameCount) {
[chunkList removeObjectAtIndex:0]; [chunkList removeObjectAtIndex:0];
listDuration -= [chunk duration]; listDuration -= [chunk duration];
listDurationRatioed -= [chunk durationRatioed];
inRemover = NO; inRemover = NO;
return [self convertChunk:chunk]; return [self convertChunk:chunk];
} }
@ -531,8 +540,11 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
[ret setFormat:[chunk format]]; [ret setFormat:[chunk format]];
[ret setChannelConfig:[chunk channelConfig]]; [ret setChannelConfig:[chunk channelConfig]];
[ret setLossless:[chunk lossless]]; [ret setLossless:[chunk lossless]];
[ret setStreamTimestamp:[chunk streamTimestamp]];
[ret setStreamTimeRatio:[chunk streamTimeRatio]];
[ret assignData:removedData]; [ret assignData:removedData];
listDuration -= [ret duration]; listDuration -= [ret duration];
listDurationRatioed -= [ret durationRatioed];
inRemover = NO; inRemover = NO;
return [self convertChunk:ret]; return [self convertChunk:ret];
} }
@ -607,6 +619,8 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
size_t bitsPerSample = inputFormat.mBitsPerChannel; size_t bitsPerSample = inputFormat.mBitsPerChannel;
BOOL isBigEndian = !!(inputFormat.mFormatFlags & kAudioFormatFlagIsBigEndian); BOOL isBigEndian = !!(inputFormat.mFormatFlags & kAudioFormatFlagIsBigEndian);
double streamTimestamp = [inChunk streamTimestamp];
NSData *inputData = [inChunk removeSamples:samplesRead]; NSData *inputData = [inChunk removeSamples:samplesRead];
#if DSD_DECIMATE #if DSD_DECIMATE
@ -772,6 +786,8 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
[outChunk setFormat:floatFormat]; [outChunk setFormat:floatFormat];
[outChunk setChannelConfig:inputChannelConfig]; [outChunk setChannelConfig:inputChannelConfig];
[outChunk setLossless:inputLossless]; [outChunk setLossless:inputLossless];
[outChunk setStreamTimestamp:streamTimestamp];
[outChunk setStreamTimeRatio:[inChunk streamTimeRatio]];
if(hdcdSustained) [outChunk setHDCD]; if(hdcdSustained) [outChunk setHDCD];
[outChunk assignSamples:inputBuffer frameCount:bytesReadFromInput / floatFormat.mBytesPerPacket]; [outChunk assignSamples:inputBuffer frameCount:bytesReadFromInput / floatFormat.mBytesPerPacket];
@ -792,4 +808,19 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
return NO; return NO;
} }
- (BOOL)peekTimestamp:(double *)timestamp timeRatio:(double *)timeRatio {
if(stopping) return NO;
@synchronized (chunkList) {
if([chunkList count]) {
AudioChunk *chunk = [chunkList objectAtIndex:0];
*timestamp = [chunk streamTimestamp];
*timeRatio = [chunk streamTimeRatio];
return YES;
}
}
*timestamp = 0.0;
*timeRatio = 1.0;
return NO;
}
@end @end

View file

@ -25,6 +25,8 @@
size_t inputBufferSize; size_t inputBufferSize;
size_t inpSize, inpOffset; size_t inpSize, inpOffset;
double streamTimestamp, streamTimeRatio;
BOOL stopping; BOOL stopping;
BOOL convertEntered; BOOL convertEntered;
BOOL paused; BOOL paused;

View file

@ -133,6 +133,12 @@ void scale_by_volume(float *buffer, size_t count, float volume) {
return nil; return nil;
} }
if(inpOffset == inpSize) {
streamTimestamp = 0.0;
streamTimeRatio = 1.0;
[self peekTimestamp:&streamTimestamp timeRatio:&streamTimeRatio];
}
while(inpOffset == inpSize) { while(inpOffset == inpSize) {
// Approximately the most we want on input // Approximately the most we want on input
ioNumberPackets = 4096; ioNumberPackets = 4096;
@ -315,7 +321,10 @@ void scale_by_volume(float *buffer, size_t count, float volume) {
[chunk setChannelConfig:nodeChannelConfig]; [chunk setChannelConfig:nodeChannelConfig];
} }
scale_by_volume(floatBuffer, ioNumberPackets / sizeof(float), volumeScale); scale_by_volume(floatBuffer, ioNumberPackets / sizeof(float), volumeScale);
[chunk setStreamTimestamp:streamTimestamp];
[chunk setStreamTimeRatio:streamTimeRatio];
[chunk assignSamples:floatBuffer frameCount:ioNumberPackets / floatFormat.mBytesPerPacket]; [chunk assignSamples:floatBuffer frameCount:ioNumberPackets / floatFormat.mBytesPerPacket];
streamTimestamp += [chunk durationRatioed];
convertEntered = NO; convertEntered = NO;
return chunk; return chunk;
} }

View file

@ -354,6 +354,13 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA
return nil; return nil;
} }
double streamTimestamp;
double streamTimeRatio;
if(![self peekTimestamp:&streamTimestamp timeRatio:&streamTimeRatio]) {
processEntered = NO;
return nil;
}
if((enableEqualizer && !equalizerInitialized) || if((enableEqualizer && !equalizerInitialized) ||
memcmp(&inputFormat, &lastInputFormat, sizeof(inputFormat)) != 0 || memcmp(&inputFormat, &lastInputFormat, sizeof(inputFormat)) != 0 ||
inputChannelConfig != lastInputChannelConfig) { inputChannelConfig != lastInputChannelConfig) {
@ -377,6 +384,8 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA
samplePtr = &inBuffer[0]; samplePtr = &inBuffer[0];
size_t channels = inputFormat.mChannelsPerFrame; size_t channels = inputFormat.mChannelsPerFrame;
BOOL isHDCD = NO;
while(!stopping && totalFrameCount < 4096) { while(!stopping && totalFrameCount < 4096) {
AudioStreamBasicDescription newInputFormat; AudioStreamBasicDescription newInputFormat;
uint32_t newChannelConfig; uint32_t newChannelConfig;
@ -391,6 +400,10 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA
break; break;
} }
if([chunk isHDCD]) {
isHDCD = YES;
}
size_t frameCount = [chunk frameCount]; size_t frameCount = [chunk frameCount];
NSData *sampleData = [chunk removeSamples:frameCount]; NSData *sampleData = [chunk removeSamples:frameCount];
@ -437,6 +450,9 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA
if(outputChannelConfig) { if(outputChannelConfig) {
[outputChunk setChannelConfig:inputChannelConfig]; [outputChunk setChannelConfig:inputChannelConfig];
} }
if(isHDCD) [outputChunk setHDCD];
[outputChunk setStreamTimestamp:streamTimestamp];
[outputChunk setStreamTimeRatio:streamTimeRatio];
[outputChunk assignSamples:&outBuffer[0] frameCount:totalFrameCount]; [outputChunk assignSamples:&outBuffer[0] frameCount:totalFrameCount];
} }

View file

@ -173,6 +173,13 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext;
return nil; return nil;
} }
double streamTimestamp;
double streamTimeRatio;
if(![self peekTimestamp:&streamTimestamp timeRatio:&streamTimeRatio]) {
processEntered = NO;
return nil;
}
if((enableFSurround && !fsurround) || if((enableFSurround && !fsurround) ||
memcmp(&inputFormat, &lastInputFormat, sizeof(inputFormat)) != 0 || memcmp(&inputFormat, &lastInputFormat, sizeof(inputFormat)) != 0 ||
inputChannelConfig != lastInputChannelConfig) { inputChannelConfig != lastInputChannelConfig) {
@ -197,6 +204,8 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext;
float *samplePtr = resetStreamFormat ? &inBuffer[2048 * 2] : &inBuffer[0]; float *samplePtr = resetStreamFormat ? &inBuffer[2048 * 2] : &inBuffer[0];
BOOL isHDCD = NO;
while(!stopping && totalFrameCount < totalRequestedSamples) { while(!stopping && totalFrameCount < totalRequestedSamples) {
AudioStreamBasicDescription newInputFormat; AudioStreamBasicDescription newInputFormat;
uint32_t newChannelConfig; uint32_t newChannelConfig;
@ -211,6 +220,10 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext;
break; break;
} }
if([chunk isHDCD]) {
isHDCD = YES;
}
size_t frameCount = [chunk frameCount]; size_t frameCount = [chunk frameCount];
NSData *sampleData = [chunk removeSamples:frameCount]; NSData *sampleData = [chunk removeSamples:frameCount];
@ -262,6 +275,9 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext;
if(outputChannelConfig) { if(outputChannelConfig) {
[outputChunk setChannelConfig:outputChannelConfig]; [outputChunk setChannelConfig:outputChannelConfig];
} }
if(isHDCD) [outputChunk setHDCD];
[outputChunk setStreamTimestamp:streamTimestamp];
[outputChunk setStreamTimeRatio:streamTimeRatio];
[outputChunk assignSamples:samplePtr frameCount:samplesRendered]; [outputChunk assignSamples:samplePtr frameCount:samplesRendered];
} }

View file

@ -336,6 +336,8 @@ static void unregisterMotionListener(void) {
[hrtf reloadWithMatrix:matrix]; [hrtf reloadWithMatrix:matrix];
} }
double streamTimestamp = [chunk streamTimestamp];
size_t frameCount = [chunk frameCount]; size_t frameCount = [chunk frameCount];
NSData *sampleData = [chunk removeSamples:frameCount]; NSData *sampleData = [chunk removeSamples:frameCount];
@ -346,6 +348,9 @@ static void unregisterMotionListener(void) {
if(outputChannelConfig) { if(outputChannelConfig) {
[outputChunk setChannelConfig:outputChannelConfig]; [outputChunk setChannelConfig:outputChannelConfig];
} }
if([chunk isHDCD]) [outputChunk setHDCD];
[outputChunk setStreamTimestamp:streamTimestamp];
[outputChunk setStreamTimeRatio:[chunk streamTimeRatio]];
[outputChunk assignSamples:&outBuffer[0] frameCount:frameCount]; [outputChunk assignSamples:&outBuffer[0] frameCount:frameCount];
processEntered = NO; processEntered = NO;

View file

@ -400,6 +400,8 @@ static void * kDSPRubberbandNodeContext = &kDSPRubberbandNodeContext;
size_t frameCount = [chunk frameCount]; size_t frameCount = [chunk frameCount];
double streamTimestamp = [chunk streamTimestamp];
int len = (int)frameCount; int len = (int)frameCount;
int channels = (int)(inputFormat.mChannelsPerFrame); int channels = (int)(inputFormat.mChannelsPerFrame);
NSData *samples = [chunk removeSamples:frameCount]; NSData *samples = [chunk removeSamples:frameCount];
@ -459,6 +461,9 @@ static void * kDSPRubberbandNodeContext = &kDSPRubberbandNodeContext;
if(inputChannelConfig) { if(inputChannelConfig) {
[outputChunk setChannelConfig:inputChannelConfig]; [outputChunk setChannelConfig:inputChannelConfig];
} }
if([chunk isHDCD]) [outputChunk setHDCD];
[outputChunk setStreamTimestamp:streamTimestamp];
[outputChunk setStreamTimeRatio:[chunk streamTimeRatio] * tempo];
[outputChunk assignSamples:rsOutBuffer frameCount:samplesBuffered]; [outputChunk assignSamples:rsOutBuffer frameCount:samplesBuffered];
samplesBuffered = 0; samplesBuffered = 0;
stretchOut += [outputChunk duration]; stretchOut += [outputChunk duration];

View file

@ -44,6 +44,7 @@
- (AudioChunk *_Nonnull)readChunkAsFloat32:(size_t)maxFrames; - (AudioChunk *_Nonnull)readChunkAsFloat32:(size_t)maxFrames;
- (BOOL)peekFormat:(AudioStreamBasicDescription *_Nonnull)format channelConfig:(uint32_t *_Nonnull)config; - (BOOL)peekFormat:(AudioStreamBasicDescription *_Nonnull)format channelConfig:(uint32_t *_Nonnull)config;
- (BOOL)peekTimestamp:(double *_Nonnull)timestamp timeRatio:(double *_Nonnull)timeRatio;
- (void)process; // Should be overwriten by subclass - (void)process; // Should be overwriten by subclass
- (void)threadEntry:(id _Nullable)arg; - (void)threadEntry:(id _Nullable)arg;

View file

@ -153,6 +153,22 @@
return ret; return ret;
} }
- (BOOL)peekTimestamp:(double *_Nonnull)timestamp timeRatio:(double *_Nonnull)timeRatio {
[accessLock lock];
if([[previousNode buffer] isEmpty] && [previousNode endOfStream] == YES) {
endOfStream = YES;
[accessLock unlock];
return NO;
}
BOOL ret = [[previousNode buffer] peekTimestamp:timestamp timeRatio:timeRatio];
[accessLock unlock];
return ret;
}
- (AudioChunk *)readChunk:(size_t)maxFrames { - (AudioChunk *)readChunk:(size_t)maxFrames {
[accessLock lock]; [accessLock lock];

View file

@ -32,6 +32,7 @@
- (double)amountPlayedInterval; - (double)amountPlayedInterval;
- (void)incrementAmountPlayed:(double)seconds; - (void)incrementAmountPlayed:(double)seconds;
- (void)setAmountPlayed:(double)seconds;
- (void)resetAmountPlayed; - (void)resetAmountPlayed;
- (void)resetAmountPlayedInterval; - (void)resetAmountPlayedInterval;

View file

@ -59,6 +59,15 @@
} }
} }
- (void)setAmountPlayed:(double)seconds {
double delta = seconds - amountPlayed;
if(delta > 0.0 && delta < 5.0) {
[self incrementAmountPlayed:delta];
} else {
amountPlayed = seconds;
}
}
- (void)resetAmountPlayed { - (void)resetAmountPlayed {
amountPlayed = 0; amountPlayed = 0;
} }

View file

@ -50,7 +50,7 @@ using std::atomic_long;
double secondsLatency; double secondsLatency;
double visPushed; double visPushed;
double tempo; double streamTimestamp;
double lastClippedSampleRate; double lastClippedSampleRate;

View file

@ -86,6 +86,8 @@ static void *kOutputCoreAudioContext = &kOutputCoreAudioContext;
config = [chunk channelConfig]; config = [chunk channelConfig];
double chunkDuration = 0; double chunkDuration = 0;
streamTimestamp = [chunk streamTimestamp] + [chunk durationRatioed];
if(frameCount) { if(frameCount) {
chunkDuration = [chunk duration]; chunkDuration = [chunk duration];
@ -211,8 +213,6 @@ static void *kOutputCoreAudioContext = &kOutputCoreAudioContext;
secondsHdcdSustained = 0; secondsHdcdSustained = 0;
tempo = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] doubleForKey:@"tempo"];
outputLock = [[NSLock alloc] init]; outputLock = [[NSLock alloc] init];
#ifdef OUTPUT_LOG #ifdef OUTPUT_LOG
@ -257,11 +257,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
NSDictionary *device = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] objectForKey:@"outputDevice"]; NSDictionary *device = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] objectForKey:@"outputDevice"];
[self setOutputDeviceWithDeviceDict:device]; [self setOutputDeviceWithDeviceDict:device];
} else if([keyPath isEqualToString:@"values.eqPreamp"]) {
float preamp = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] floatForKey:@"eqPreamp"];
eqPreamp = pow(10.0, preamp / 20.0);
} else if([keyPath isEqualToString:@"values.tempo"]) {
tempo = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] doubleForKey:@"tempo"];
} }
} }
@ -846,8 +841,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
visController = [VisualizationController sharedController]; visController = [VisualizationController sharedController];
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.outputDevice" options:0 context:kOutputCoreAudioContext]; [[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.outputDevice" options:0 context:kOutputCoreAudioContext];
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.eqPreamp" options:(NSKeyValueObservingOptionInitial | NSKeyValueObservingOptionNew) context:kOutputCoreAudioContext];
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.tempo" options:(NSKeyValueObservingOptionInitial | NSKeyValueObservingOptionNew) context:kOutputCoreAudioContext];
observersapplied = YES; observersapplied = YES;
@ -857,7 +850,7 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
- (void)updateLatency:(double)secondsPlayed { - (void)updateLatency:(double)secondsPlayed {
if(secondsPlayed > 0) { if(secondsPlayed > 0) {
[outputController incrementAmountPlayed:secondsPlayed * tempo]; [outputController setAmountPlayed:streamTimestamp];
} }
double visLatency = visPushed; double visLatency = visPushed;
visPushed -= secondsPlayed; visPushed -= secondsPlayed;
@ -895,7 +888,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
stopInvoked = YES; stopInvoked = YES;
if(observersapplied) { if(observersapplied) {
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.outputDevice" context:kOutputCoreAudioContext]; [[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.outputDevice" context:kOutputCoreAudioContext];
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.tempo" context:kOutputCoreAudioContext];
observersapplied = NO; observersapplied = NO;
} }
stopping = YES; stopping = YES;

View file

@ -135,6 +135,9 @@ static CAdPlugDatabase *g_database = NULL;
total += samples_now; total += samples_now;
} }
double streamTimestamp = (double)(current_pos) / sampleRate;
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:total]; [chunk assignSamples:buffer frameCount:total];
return chunk; return chunk;

View file

@ -46,6 +46,7 @@
uint32_t channelConfig; uint32_t channelConfig;
float frequency; float frequency;
long totalFrames; long totalFrames;
long frame;
NSString* codec; NSString* codec;
} }

View file

@ -150,6 +150,8 @@ static SInt64 getSizeProc(void *clientData) {
_in_opened = YES; _in_opened = YES;
frame = 0;
return [self readInfoFromExtAudioFileRef]; return [self readInfoFromExtAudioFileRef];
} }
@ -330,6 +332,10 @@ static SInt64 getSizeProc(void *clientData) {
id audioChunkClass = NSClassFromString(@"AudioChunk"); id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
double streamTimestamp = (double)(frame) / frequency;
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:frameCount]; [chunk assignSamples:buffer frameCount:frameCount];
return chunk; return chunk;
@ -343,6 +349,8 @@ static SInt64 getSizeProc(void *clientData) {
return -1; return -1;
} }
self->frame = frame;
return frame; return frame;
} }

View file

@ -361,6 +361,9 @@ static void *kCueSheetDecoderContext = &kCueSheetDecoderContext;
[chunk setFrameCount:frames / frameScale]; [chunk setFrameCount:frames / frameScale];
} }
double streamTimestamp = (double)(framePosition - trackStart) / [chunk format].mSampleRate;
[chunk setStreamTimestamp:streamTimestamp];
framePosition += chunk.frameCount * frameScale; framePosition += chunk.frameCount * frameScale;
return chunk; return chunk;

View file

@ -921,6 +921,8 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va
if(totalFrames && (framesRead + framesReadNow > totalFrames)) if(totalFrames && (framesRead + framesReadNow > totalFrames))
framesReadNow = (int)(totalFrames - framesRead); framesReadNow = (int)(totalFrames - framesRead);
double streamTimestamp = (double)(framesRead) / frequency;
framesRead += framesReadNow; framesRead += framesReadNow;
metadataUpdateCount += framesReadNow; metadataUpdateCount += framesReadNow;
@ -931,6 +933,7 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va
id audioChunkClass = NSClassFromString(@"AudioChunk"); id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:framesRead];
[chunk assignSamples:sampleBuffer frameCount:framesReadNow]; [chunk assignSamples:sampleBuffer frameCount:framesReadNow];
return chunk; return chunk;

View file

@ -30,6 +30,8 @@
uint32_t channelConfig; uint32_t channelConfig;
float frequency; float frequency;
long totalFrames; long totalFrames;
long frame;
double seconds;
long fileSize; long fileSize;

View file

@ -372,6 +372,9 @@ void ErrorCallback(const FLAC__StreamDecoder *decoder, FLAC__StreamDecoderErrorS
blockBuffer = malloc(SAMPLE_blockBuffer_SIZE); blockBuffer = malloc(SAMPLE_blockBuffer_SIZE);
frame = 0;
seconds = 0.0;
return YES; return YES;
} }
@ -391,8 +394,14 @@ void ErrorCallback(const FLAC__StreamDecoder *decoder, FLAC__StreamDecoderErrorS
if(blockBufferFrames > 0) { if(blockBufferFrames > 0) {
chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
frame += blockBufferFrames;
[chunk setStreamTimestamp:seconds];
[chunk assignSamples:blockBuffer frameCount:blockBufferFrames]; [chunk assignSamples:blockBuffer frameCount:blockBufferFrames];
seconds += [chunk duration];
blockBufferFrames = 0; blockBufferFrames = 0;
} }
@ -453,6 +462,9 @@ void ErrorCallback(const FLAC__StreamDecoder *decoder, FLAC__StreamDecoderErrorS
if(!FLAC__stream_decoder_seek_absolute(decoder, sample)) if(!FLAC__stream_decoder_seek_absolute(decoder, sample))
return -1; return -1;
frame = sample;
seconds = (double)(sample) / frequency;
return sample; return sample;
} }

View file

@ -192,6 +192,8 @@ gme_err_t readCallback(void *data, void *out, int count) {
else else
gme_set_fade(emu, (int)(length - fade), (int)fade); gme_set_fade(emu, (int)(length - fade), (int)fade);
double streamTimestamp = (double)(gme_tell(emu)) * 0.001;
gme_play(emu, numSamples, (short int *)buf); gme_play(emu, numSamples, (short int *)buf);
// Some formats support length, but we'll add that in the future. // Some formats support length, but we'll add that in the future.
@ -199,6 +201,8 @@ gme_err_t readCallback(void *data, void *out, int count) {
// GME will always generate samples. There's no real EOS. // GME will always generate samples. There's no real EOS.
// Addendum: The above gme_track_ended() call has been in place for years now // Addendum: The above gme_track_ended() call has been in place for years now
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:sampleBuffer frameCount:frames]; [chunk assignSamples:sampleBuffer frameCount:frames];
return chunk; return chunk;

View file

@ -1362,10 +1362,13 @@ static int usf_info(void *context, const char *name, const char *value) {
} }
} }
double streamTimestamp = (double)(framesRead) / (double)(sampleRate);
framesRead += written; framesRead += written;
id audioChunkClass = NSClassFromString(@"AudioChunk"); id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:written]; [chunk assignSamples:buffer frameCount:written];
return chunk; return chunk;

View file

@ -158,10 +158,13 @@ static void oneTimeInit(void) {
total = (int)(fadePos - fadeStart); total = (int)(fadePos - fadeStart);
} }
double streamTimestamp = (double)(framesRead) / sampleRate;
framesRead += total; framesRead += total;
id audioChunkClass = NSClassFromString(@"AudioChunk"); id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:sampleBuffer frameCount:total]; [chunk assignSamples:sampleBuffer frameCount:total];
return chunk; return chunk;

View file

@ -25,6 +25,8 @@
long _currentOutputFrames; long _currentOutputFrames;
long _fileSize; long _fileSize;
double seconds;
id<CogSource> _source; id<CogSource> _source;
BOOL _firstFrame; BOOL _firstFrame;

View file

@ -476,6 +476,8 @@ error:
_endPadding = 0; _endPadding = 0;
// DLog(@"OPEN: %i", _firstFrame); // DLog(@"OPEN: %i", _firstFrame);
seconds = 0.0;
inputEOF = NO; inputEOF = NO;
genre = @""; genre = @"";
@ -748,7 +750,9 @@ error:
if(framesToCopy) { if(framesToCopy) {
chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:seconds];
[chunk assignSamples:_outputBuffer frameCount:framesToCopy]; [chunk assignSamples:_outputBuffer frameCount:framesToCopy];
seconds += [chunk duration];
_outputFrames = 0; _outputFrames = 0;
break; break;
} }
@ -812,6 +816,7 @@ error:
if(frame < _framesDecoded) { if(frame < _framesDecoded) {
_framesDecoded = 0; _framesDecoded = 0;
seconds = 0.0;
_firstFrame = YES; _firstFrame = YES;
if(_foundLAMEHeader || _foundiTunSMPB) if(_foundLAMEHeader || _foundiTunSMPB)
framesToSkip = _startPadding; framesToSkip = _startPadding;
@ -821,6 +826,7 @@ error:
} }
framesToSkip += frame - _framesDecoded; framesToSkip += frame - _framesDecoded;
seconds += (double)(frame - _framesDecoded) / sampleRate;
return frame; return frame;
} }

View file

@ -300,6 +300,8 @@ static OSType getOSType(const char *in_) {
return nil; return nil;
} }
double streamTimestamp = 0.0;
try { try {
player->setLoopMode((repeatone || isLooped) ? (MIDIPlayer::loop_mode_enable | MIDIPlayer::loop_mode_force) : 0); player->setLoopMode((repeatone || isLooped) ? (MIDIPlayer::loop_mode_enable | MIDIPlayer::loop_mode_force) : 0);
@ -317,6 +319,8 @@ static OSType getOSType(const char *in_) {
soundFontsAssigned = YES; soundFontsAssigned = YES;
} }
streamTimestamp = (double)(player->Tell()) / sampleRate;
int frames = 1024; int frames = 1024;
float buffer[frames * 2]; float buffer[frames * 2];
@ -358,6 +362,7 @@ static OSType getOSType(const char *in_) {
id audioChunkClass = NSClassFromString(@"AudioChunk"); id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:frames]; [chunk assignSamples:buffer frameCount:frames];
return chunk; return chunk;

View file

@ -589,3 +589,7 @@ void MIDIPlayer::send_sysex_time_filtered(const uint8_t *data, size_t size, size
bool MIDIPlayer::GetLastError(std::string &p_out) { bool MIDIPlayer::GetLastError(std::string &p_out) {
return get_last_error(p_out); return get_last_error(p_out);
} }
unsigned long MIDIPlayer::Tell() const {
return uTimeCurrent;
}

View file

@ -35,6 +35,7 @@ class MIDIPlayer {
bool Load(const midi_container& midi_file, unsigned subsong, unsigned loop_mode, unsigned clean_flags); bool Load(const midi_container& midi_file, unsigned subsong, unsigned loop_mode, unsigned clean_flags);
unsigned long Play(float* out, unsigned long count); unsigned long Play(float* out, unsigned long count);
void Seek(unsigned long sample); void Seek(unsigned long sample);
unsigned long Tell() const;
bool GetLastError(std::string& p_out); bool GetLastError(std::string& p_out);

View file

@ -27,6 +27,7 @@
int bitrate; int bitrate;
float frequency; float frequency;
long totalFrames; long totalFrames;
long frame;
} }
- (BOOL)writeToBuffer:(float *)sample_buffer fromBuffer:(const MPC_SAMPLE_FORMAT *)p_buffer frames:(unsigned)frames; - (BOOL)writeToBuffer:(float *)sample_buffer fromBuffer:(const MPC_SAMPLE_FORMAT *)p_buffer frames:(unsigned)frames;

View file

@ -79,6 +79,8 @@ mpc_bool_t CanSeekProc(mpc_reader *p_reader) {
totalFrames = mpc_streaminfo_get_length_samples(&info); totalFrames = mpc_streaminfo_get_length_samples(&info);
frame = 0;
[self willChangeValueForKey:@"properties"]; [self willChangeValueForKey:@"properties"];
[self didChangeValueForKey:@"properties"]; [self didChangeValueForKey:@"properties"];
@ -151,8 +153,12 @@ mpc_bool_t CanSeekProc(mpc_reader *p_reader) {
} }
} }
double streamTimestamp = (double)(frame) / frequency;
frame += framesRead;
id audioChunkClass = NSClassFromString(@"AudioChunk"); id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:floatBuffer frameCount:framesRead]; [chunk assignSamples:floatBuffer frameCount:framesRead];
return chunk; return chunk;
@ -172,6 +178,8 @@ mpc_bool_t CanSeekProc(mpc_reader *p_reader) {
- (long)seek:(long)sample { - (long)seek:(long)sample {
mpc_demux_seek_sample(demux, sample); mpc_demux_seek_sample(demux, sample);
frame = sample;
return sample; return sample;
} }

View file

@ -120,6 +120,8 @@ static void g_push_archive_extensions(std::vector<std::string> &list) {
try { try {
mod->set_repeat_count(IsRepeatOneSet() ? -1 : 0); mod->set_repeat_count(IsRepeatOneSet() ? -1 : 0);
double streamTimestamp = mod->get_position_seconds();
int frames = 1024; int frames = 1024;
float buffer[frames * 2]; float buffer[frames * 2];
void *buf = (void *)buffer; void *buf = (void *)buffer;
@ -142,6 +144,7 @@ static void g_push_archive_extensions(std::vector<std::string> &list) {
id audioChunkClass = NSClassFromString(@"AudioChunk"); id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:total]; [chunk assignSamples:buffer frameCount:total];
return chunk; return chunk;

View file

@ -22,6 +22,7 @@
int bitrate; int bitrate;
int channels; int channels;
long totalFrames; long totalFrames;
long frame;
int metadataUpdateInterval; int metadataUpdateInterval;
int metadataUpdateCount; int metadataUpdateCount;

View file

@ -107,6 +107,7 @@ opus_int64 sourceTell(void *_stream) {
seekable = op_seekable(opusRef); seekable = op_seekable(opusRef);
totalFrames = op_pcm_total(opusRef, -1); totalFrames = op_pcm_total(opusRef, -1);
frame = 0;
const OpusHead *head = op_head(opusRef, -1); const OpusHead *head = op_head(opusRef, -1);
const OpusTags *tags = op_tags(opusRef, -1); const OpusTags *tags = op_tags(opusRef, -1);
@ -289,8 +290,12 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va
[self updateIcyMetadata]; [self updateIcyMetadata];
} }
double streamTimestamp = (double)(frame) / 48000.0;
frame += total / channels;
id audioChunkClass = NSClassFromString(@"AudioChunk"); id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:total / channels]; [chunk assignSamples:buffer frameCount:total / channels];
return chunk; return chunk;
@ -308,6 +313,8 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va
- (long)seek:(long)frame { - (long)seek:(long)frame {
op_pcm_seek(opusRef, frame); op_pcm_seek(opusRef, frame);
self->frame = frame;
return frame; return frame;
} }

View file

@ -397,7 +397,7 @@ namespace Organya {
} }
- (AudioChunk *)readAudio { - (AudioChunk *)readAudio {
int total = 0; double streamTimestamp = (double)(m_song->cur_beat) * (double)(m_song->ms_per_beat) * 0.001;
std::vector<float> samples = m_song->Synth(sampleRate); std::vector<float> samples = m_song->Synth(sampleRate);
@ -424,6 +424,8 @@ namespace Organya {
id audioChunkClass = NSClassFromString(@"AudioChunk"); id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:streamTimestamp];
if(samplesDiscard) { if(samplesDiscard) {
[chunk assignSamples:&samples[samplesDiscard * 2] frameCount:rendered - samplesDiscard]; [chunk assignSamples:&samples[samplesDiscard * 2] frameCount:rendered - samplesDiscard];
samplesDiscard = 0; samplesDiscard = 0;

View file

@ -22,6 +22,8 @@
float frequency; float frequency;
long totalFrames; long totalFrames;
BOOL seekable; BOOL seekable;
double seconds;
} }
@end @end

View file

@ -33,6 +33,8 @@
totalFrames = (decoder->shn_get_song_length() * frequency) / 1000.0; totalFrames = (decoder->shn_get_song_length() * frequency) / 1000.0;
seconds = 0.0;
decoder->go(); decoder->go();
[self willChangeValueForKey:@"properties"]; [self willChangeValueForKey:@"properties"];
@ -57,8 +59,12 @@
amountRead = decoder->read(buf, frames * bytesPerFrame); amountRead = decoder->read(buf, frames * bytesPerFrame);
} while(amountRead == -1); } while(amountRead == -1);
[chunk setStreamTimestamp:seconds];
[chunk assignSamples:buf frameCount:amountRead / bytesPerFrame]; [chunk assignSamples:buf frameCount:amountRead / bytesPerFrame];
seconds += [chunk duration];
return chunk; return chunk;
} }
@ -66,6 +72,7 @@
unsigned int sec = sample / frequency; unsigned int sec = sample / frequency;
decoder->seek(sec); decoder->seek(sec);
seconds = sec;
return sample; return sample;
} }

View file

@ -16,6 +16,8 @@
long length; long length;
long remain; long remain;
double seconds;
float *buffer; float *buffer;
} }

View file

@ -27,6 +27,7 @@ enum { channels = 2 };
length = seconds * sample_rate; length = seconds * sample_rate;
remain = length; remain = length;
seconds = 0.0;
buffer = (float *) calloc(sizeof(float), 1024 * channels); buffer = (float *) calloc(sizeof(float), 1024 * channels);
if(!buffer) { if(!buffer) {
@ -68,8 +69,11 @@ enum { channels = 2 };
id audioChunkClass = NSClassFromString(@"AudioChunk"); id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:seconds];
[chunk assignSamples:buffer frameCount:frames]; [chunk assignSamples:buffer frameCount:frames];
seconds += [chunk duration];
return chunk; return chunk;
} }
@ -79,6 +83,8 @@ enum { channels = 2 };
remain = length - frame; remain = length - frame;
seconds = (double)(frame) / sample_rate;
return frame; return frame;
} }

View file

@ -31,6 +31,7 @@
int channels; int channels;
float frequency; float frequency;
long totalFrames; long totalFrames;
long frame;
int metadataUpdateInterval; int metadataUpdateInterval;
int metadataUpdateCount; int metadataUpdateCount;

View file

@ -99,6 +99,7 @@ long sourceTell(void *datasource) {
seekable = ov_seekable(&vorbisRef); seekable = ov_seekable(&vorbisRef);
totalFrames = ov_pcm_total(&vorbisRef, -1); totalFrames = ov_pcm_total(&vorbisRef, -1);
frame = 0;
[self willChangeValueForKey:@"properties"]; [self willChangeValueForKey:@"properties"];
[self didChangeValueForKey:@"properties"]; [self didChangeValueForKey:@"properties"];
@ -221,6 +222,8 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va
int total = 0; int total = 0;
int frames = 1024; int frames = 1024;
double streamTimestamp = (double)(frame) / frequency;
if(currentSection != lastSection) { if(currentSection != lastSection) {
vorbis_info *vi; vorbis_info *vi;
vi = ov_info(&vorbisRef, -1); vi = ov_info(&vorbisRef, -1);
@ -277,6 +280,7 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va
[self updateIcyMetadata]; [self updateIcyMetadata];
} }
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:total]; [chunk assignSamples:buffer frameCount:total];
return chunk; return chunk;
@ -293,6 +297,8 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va
- (long)seek:(long)frame { - (long)seek:(long)frame {
ov_pcm_seek(&vorbisRef, frame); ov_pcm_seek(&vorbisRef, frame);
self->frame = frame;
return frame; return frame;
} }

View file

@ -44,6 +44,7 @@
int bitrate; int bitrate;
float frequency; float frequency;
long totalFrames; long totalFrames;
long frame;
} }
@end @end

View file

@ -150,6 +150,7 @@ int32_t WriteBytesProc(void *ds, void *data, int32_t bcount) {
frequency = WavpackGetSampleRate(wpc); frequency = WavpackGetSampleRate(wpc);
totalFrames = WavpackGetNumSamples(wpc); totalFrames = WavpackGetNumSamples(wpc);
frame = 0;
isDSD = NO; isDSD = NO;
@ -257,6 +258,10 @@ int32_t WriteBytesProc(void *ds, void *data, int32_t bcount) {
ALog(@"Unsupported sample size: %d", bitsPerSample); ALog(@"Unsupported sample size: %d", bitsPerSample);
} }
double streamTimestamp = (double)(frame) / frequency;
frame += samplesRead;
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:samplesRead]; [chunk assignSamples:buffer frameCount:samplesRead];
return chunk; return chunk;
@ -270,6 +275,8 @@ int32_t WriteBytesProc(void *ds, void *data, int32_t bcount) {
} }
WavpackSeekSample(wpc, trueFrame); WavpackSeekSample(wpc, trueFrame);
self->frame = frame;
return frame; return frame;
} }

View file

@ -231,6 +231,8 @@ const int masterVol = 0x10000; // Fixed point 16.16
mainPlr->SetLoopCount(vgmplay->GetModifiedLoopCount(maxLoops)); mainPlr->SetLoopCount(vgmplay->GetModifiedLoopCount(maxLoops));
} }
double streamTimestamp = mainPlr->GetCurTime(0);
UInt32 framesDone = 0; UInt32 framesDone = 0;
while(framesDone < frames) { while(framesDone < frames) {
@ -247,6 +249,7 @@ const int masterVol = 0x10000; // Fixed point 16.16
framesDone += framesToDo; framesDone += framesToDo;
} }
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:framesDone]; [chunk assignSamples:buffer frameCount:framesDone];
return chunk; return chunk;

View file

@ -307,6 +307,8 @@ static void sidTuneLoader(const char *fileName, std::vector<uint8_t> &bufferRef)
id audioChunkClass = NSClassFromString(@"AudioChunk"); id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
double streamTimestamp = (double)(renderedTotal) / sampleRate;
int16_t buffer[1024 * n_channels]; int16_t buffer[1024 * n_channels];
int framesToRender = 1024; int framesToRender = 1024;
@ -353,6 +355,8 @@ static void sidTuneLoader(const char *fileName, std::vector<uint8_t> &bufferRef)
fadeRemain = fadeEnd; fadeRemain = fadeEnd;
} }
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:rendered]; [chunk assignSamples:buffer frameCount:rendered];
return chunk; return chunk;

View file

@ -318,6 +318,8 @@ static NSString *get_description_tag(const char *description, const char *tag, c
UInt32 framesMax = frames; UInt32 framesMax = frames;
UInt32 framesDone = 0; UInt32 framesDone = 0;
double streamTimestamp = (double)(stream->pstate.play_position) / sampleRate;
id audioChunkClass = NSClassFromString(@"AudioChunk"); id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]]; AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
@ -374,6 +376,7 @@ static NSString *get_description_tag(const char *description, const char *tag, c
frames -= frames_done; frames -= frames_done;
} }
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:sample_buf frameCount:framesDone]; [chunk assignSamples:sample_buf frameCount:framesDone];
return chunk; return chunk;