Audio: Add full timestamp accounting to playback

Audio Chunks now have full timestamp accounting, including DSP playback
speed ratio for the one DSP that can change play ratio, Rubber Band.
Inputs which support looping and actually reporting the absolute play
position now do so.

Signed-off-by: Christopher Snowhill <kode54@gmail.com>
This commit is contained in:
Christopher Snowhill 2025-02-12 04:41:11 -08:00
parent 1e1ee2fbe2
commit 7994929a80
48 changed files with 269 additions and 20 deletions

View file

@ -65,6 +65,8 @@ enum {
AudioStreamBasicDescription format;
NSMutableData *chunkData;
uint32_t channelConfig;
double streamTimestamp;
double streamTimeRatio;
BOOL formatAssigned;
BOOL lossless;
BOOL hdcd;
@ -72,6 +74,8 @@ enum {
@property AudioStreamBasicDescription format;
@property uint32_t channelConfig;
@property double streamTimestamp;
@property double streamTimeRatio;
@property BOOL lossless;
+ (uint32_t)guessChannelConfig:(uint32_t)channelCount;
@ -94,6 +98,7 @@ enum {
- (void)setFrameCount:(size_t)count; // For truncation only
- (double)duration;
- (double)durationRatioed;
- (BOOL)isHDCD;
- (void)setHDCD;

View file

@ -19,6 +19,8 @@
formatAssigned = NO;
lossless = NO;
hdcd = NO;
streamTimestamp = 0.0;
streamTimeRatio = 1.0;
}
return self;
@ -31,6 +33,9 @@
chunkData = [[NSMutableData alloc] init];
[self setFormat:propertiesToASBD(properties)];
lossless = [[properties objectForKey:@"encoding"] isEqualToString:@"lossless"];
hdcd = NO;
streamTimestamp = 0.0;
streamTimeRatio = 1.0;
}
return self;
@ -117,6 +122,8 @@ static const uint32_t AudioChannelConfigTable[] = {
}
@synthesize lossless;
@synthesize streamTimestamp;
@synthesize streamTimeRatio;
- (AudioStreamBasicDescription)format {
return format;
@ -155,10 +162,12 @@ static const uint32_t AudioChannelConfigTable[] = {
- (NSData *)removeSamples:(size_t)frameCount {
if(formatAssigned) {
@autoreleasepool {
const double framesDuration = (double)(frameCount) / format.mSampleRate;
const size_t bytesPerPacket = format.mBytesPerPacket;
const size_t byteCount = bytesPerPacket * frameCount;
NSData *ret = [chunkData subdataWithRange:NSMakeRange(0, byteCount)];
[chunkData replaceBytesInRange:NSMakeRange(0, byteCount) withBytes:NULL length:0];
streamTimestamp += framesDuration * streamTimeRatio;
return ret;
}
}
@ -196,6 +205,10 @@ static const uint32_t AudioChannelConfigTable[] = {
return 0.0;
}
- (double)durationRatioed {
return [self duration] * streamTimeRatio;
}
- (BOOL)isHDCD {
return hdcd;
}

View file

@ -19,6 +19,7 @@ NS_ASSUME_NONNULL_BEGIN
@interface ChunkList : NSObject {
NSMutableArray<AudioChunk *> *chunkList;
double listDuration;
double listDurationRatioed;
double maxDuration;
BOOL inAdder;
@ -53,6 +54,7 @@ NS_ASSUME_NONNULL_BEGIN
}
@property(readonly) double listDuration;
@property(readonly) double listDurationRatioed;
@property(readonly) double maxDuration;
- (id)initWithMaximumDuration:(double)duration;
@ -69,6 +71,8 @@ NS_ASSUME_NONNULL_BEGIN
- (BOOL)peekFormat:(nonnull AudioStreamBasicDescription *)format channelConfig:(nonnull uint32_t *)config;
- (BOOL)peekTimestamp:(nonnull double *)timestamp timeRatio:(nonnull double *)timeRatio;
@end
NS_ASSUME_NONNULL_END

View file

@ -369,6 +369,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
@implementation ChunkList
@synthesize listDuration;
@synthesize listDurationRatioed;
@synthesize maxDuration;
- (id)initWithMaximumDuration:(double)duration {
@ -377,6 +378,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
if(self) {
chunkList = [[NSMutableArray alloc] init];
listDuration = 0.0;
listDurationRatioed = 0.0;
maxDuration = duration;
inAdder = NO;
@ -394,9 +396,9 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
dsd2pcmCount = 0;
dsd2pcmLatency = 0;
#endif
halveDSDVolume = NO;
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.halveDSDVolume" options:(NSKeyValueObservingOptionInitial | NSKeyValueObservingOptionNew) context:kChunkListContext];
}
@ -463,10 +465,12 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
inAdder = YES;
const double chunkDuration = [chunk duration];
const double chunkDurationRatioed = [chunk durationRatioed];
@synchronized(chunkList) {
[chunkList addObject:chunk];
listDuration += chunkDuration;
listDurationRatioed += chunkDurationRatioed;
}
inAdder = NO;
@ -487,6 +491,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
if([chunk frameCount] <= maxFrameCount) {
[chunkList removeObjectAtIndex:0];
listDuration -= [chunk duration];
listDurationRatioed -= [chunk durationRatioed];
inRemover = NO;
return chunk;
}
@ -495,8 +500,11 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
[ret setFormat:[chunk format]];
[ret setChannelConfig:[chunk channelConfig]];
[ret setLossless:[chunk lossless]];
[ret setStreamTimestamp:[chunk streamTimestamp]];
[ret setStreamTimeRatio:[chunk streamTimeRatio]];
[ret assignData:removedData];
listDuration -= [ret duration];
listDurationRatioed -= [ret durationRatioed];
inRemover = NO;
return ret;
}
@ -523,6 +531,7 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
if([chunk frameCount] <= maxFrameCount) {
[chunkList removeObjectAtIndex:0];
listDuration -= [chunk duration];
listDurationRatioed -= [chunk durationRatioed];
inRemover = NO;
return [self convertChunk:chunk];
}
@ -531,8 +540,11 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
[ret setFormat:[chunk format]];
[ret setChannelConfig:[chunk channelConfig]];
[ret setLossless:[chunk lossless]];
[ret setStreamTimestamp:[chunk streamTimestamp]];
[ret setStreamTimeRatio:[chunk streamTimeRatio]];
[ret assignData:removedData];
listDuration -= [ret duration];
listDurationRatioed -= [ret durationRatioed];
inRemover = NO;
return [self convertChunk:ret];
}
@ -607,6 +619,8 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
size_t bitsPerSample = inputFormat.mBitsPerChannel;
BOOL isBigEndian = !!(inputFormat.mFormatFlags & kAudioFormatFlagIsBigEndian);
double streamTimestamp = [inChunk streamTimestamp];
NSData *inputData = [inChunk removeSamples:samplesRead];
#if DSD_DECIMATE
@ -772,6 +786,8 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
[outChunk setFormat:floatFormat];
[outChunk setChannelConfig:inputChannelConfig];
[outChunk setLossless:inputLossless];
[outChunk setStreamTimestamp:streamTimestamp];
[outChunk setStreamTimeRatio:[inChunk streamTimeRatio]];
if(hdcdSustained) [outChunk setHDCD];
[outChunk assignSamples:inputBuffer frameCount:bytesReadFromInput / floatFormat.mBytesPerPacket];
@ -792,4 +808,19 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
return NO;
}
- (BOOL)peekTimestamp:(double *)timestamp timeRatio:(double *)timeRatio {
if(stopping) return NO;
@synchronized (chunkList) {
if([chunkList count]) {
AudioChunk *chunk = [chunkList objectAtIndex:0];
*timestamp = [chunk streamTimestamp];
*timeRatio = [chunk streamTimeRatio];
return YES;
}
}
*timestamp = 0.0;
*timeRatio = 1.0;
return NO;
}
@end

View file

@ -25,6 +25,8 @@
size_t inputBufferSize;
size_t inpSize, inpOffset;
double streamTimestamp, streamTimeRatio;
BOOL stopping;
BOOL convertEntered;
BOOL paused;

View file

@ -133,6 +133,12 @@ void scale_by_volume(float *buffer, size_t count, float volume) {
return nil;
}
if(inpOffset == inpSize) {
streamTimestamp = 0.0;
streamTimeRatio = 1.0;
[self peekTimestamp:&streamTimestamp timeRatio:&streamTimeRatio];
}
while(inpOffset == inpSize) {
// Approximately the most we want on input
ioNumberPackets = 4096;
@ -315,7 +321,10 @@ void scale_by_volume(float *buffer, size_t count, float volume) {
[chunk setChannelConfig:nodeChannelConfig];
}
scale_by_volume(floatBuffer, ioNumberPackets / sizeof(float), volumeScale);
[chunk setStreamTimestamp:streamTimestamp];
[chunk setStreamTimeRatio:streamTimeRatio];
[chunk assignSamples:floatBuffer frameCount:ioNumberPackets / floatFormat.mBytesPerPacket];
streamTimestamp += [chunk durationRatioed];
convertEntered = NO;
return chunk;
}

View file

@ -354,6 +354,13 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA
return nil;
}
double streamTimestamp;
double streamTimeRatio;
if(![self peekTimestamp:&streamTimestamp timeRatio:&streamTimeRatio]) {
processEntered = NO;
return nil;
}
if((enableEqualizer && !equalizerInitialized) ||
memcmp(&inputFormat, &lastInputFormat, sizeof(inputFormat)) != 0 ||
inputChannelConfig != lastInputChannelConfig) {
@ -376,7 +383,9 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA
samplePtr = &inBuffer[0];
size_t channels = inputFormat.mChannelsPerFrame;
BOOL isHDCD = NO;
while(!stopping && totalFrameCount < 4096) {
AudioStreamBasicDescription newInputFormat;
uint32_t newChannelConfig;
@ -391,6 +400,10 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA
break;
}
if([chunk isHDCD]) {
isHDCD = YES;
}
size_t frameCount = [chunk frameCount];
NSData *sampleData = [chunk removeSamples:frameCount];
@ -437,6 +450,9 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA
if(outputChannelConfig) {
[outputChunk setChannelConfig:inputChannelConfig];
}
if(isHDCD) [outputChunk setHDCD];
[outputChunk setStreamTimestamp:streamTimestamp];
[outputChunk setStreamTimeRatio:streamTimeRatio];
[outputChunk assignSamples:&outBuffer[0] frameCount:totalFrameCount];
}

View file

@ -173,6 +173,13 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext;
return nil;
}
double streamTimestamp;
double streamTimeRatio;
if(![self peekTimestamp:&streamTimestamp timeRatio:&streamTimeRatio]) {
processEntered = NO;
return nil;
}
if((enableFSurround && !fsurround) ||
memcmp(&inputFormat, &lastInputFormat, sizeof(inputFormat)) != 0 ||
inputChannelConfig != lastInputChannelConfig) {
@ -197,6 +204,8 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext;
float *samplePtr = resetStreamFormat ? &inBuffer[2048 * 2] : &inBuffer[0];
BOOL isHDCD = NO;
while(!stopping && totalFrameCount < totalRequestedSamples) {
AudioStreamBasicDescription newInputFormat;
uint32_t newChannelConfig;
@ -211,6 +220,10 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext;
break;
}
if([chunk isHDCD]) {
isHDCD = YES;
}
size_t frameCount = [chunk frameCount];
NSData *sampleData = [chunk removeSamples:frameCount];
@ -262,6 +275,9 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext;
if(outputChannelConfig) {
[outputChunk setChannelConfig:outputChannelConfig];
}
if(isHDCD) [outputChunk setHDCD];
[outputChunk setStreamTimestamp:streamTimestamp];
[outputChunk setStreamTimeRatio:streamTimeRatio];
[outputChunk assignSamples:samplePtr frameCount:samplesRendered];
}

View file

@ -336,6 +336,8 @@ static void unregisterMotionListener(void) {
[hrtf reloadWithMatrix:matrix];
}
double streamTimestamp = [chunk streamTimestamp];
size_t frameCount = [chunk frameCount];
NSData *sampleData = [chunk removeSamples:frameCount];
@ -346,6 +348,9 @@ static void unregisterMotionListener(void) {
if(outputChannelConfig) {
[outputChunk setChannelConfig:outputChannelConfig];
}
if([chunk isHDCD]) [outputChunk setHDCD];
[outputChunk setStreamTimestamp:streamTimestamp];
[outputChunk setStreamTimeRatio:[chunk streamTimeRatio]];
[outputChunk assignSamples:&outBuffer[0] frameCount:frameCount];
processEntered = NO;

View file

@ -400,6 +400,8 @@ static void * kDSPRubberbandNodeContext = &kDSPRubberbandNodeContext;
size_t frameCount = [chunk frameCount];
double streamTimestamp = [chunk streamTimestamp];
int len = (int)frameCount;
int channels = (int)(inputFormat.mChannelsPerFrame);
NSData *samples = [chunk removeSamples:frameCount];
@ -459,6 +461,9 @@ static void * kDSPRubberbandNodeContext = &kDSPRubberbandNodeContext;
if(inputChannelConfig) {
[outputChunk setChannelConfig:inputChannelConfig];
}
if([chunk isHDCD]) [outputChunk setHDCD];
[outputChunk setStreamTimestamp:streamTimestamp];
[outputChunk setStreamTimeRatio:[chunk streamTimeRatio] * tempo];
[outputChunk assignSamples:rsOutBuffer frameCount:samplesBuffered];
samplesBuffered = 0;
stretchOut += [outputChunk duration];

View file

@ -44,6 +44,7 @@
- (AudioChunk *_Nonnull)readChunkAsFloat32:(size_t)maxFrames;
- (BOOL)peekFormat:(AudioStreamBasicDescription *_Nonnull)format channelConfig:(uint32_t *_Nonnull)config;
- (BOOL)peekTimestamp:(double *_Nonnull)timestamp timeRatio:(double *_Nonnull)timeRatio;
- (void)process; // Should be overwriten by subclass
- (void)threadEntry:(id _Nullable)arg;

View file

@ -153,6 +153,22 @@
return ret;
}
- (BOOL)peekTimestamp:(double *_Nonnull)timestamp timeRatio:(double *_Nonnull)timeRatio {
[accessLock lock];
if([[previousNode buffer] isEmpty] && [previousNode endOfStream] == YES) {
endOfStream = YES;
[accessLock unlock];
return NO;
}
BOOL ret = [[previousNode buffer] peekTimestamp:timestamp timeRatio:timeRatio];
[accessLock unlock];
return ret;
}
- (AudioChunk *)readChunk:(size_t)maxFrames {
[accessLock lock];

View file

@ -32,6 +32,7 @@
- (double)amountPlayedInterval;
- (void)incrementAmountPlayed:(double)seconds;
- (void)setAmountPlayed:(double)seconds;
- (void)resetAmountPlayed;
- (void)resetAmountPlayedInterval;

View file

@ -59,6 +59,15 @@
}
}
- (void)setAmountPlayed:(double)seconds {
double delta = seconds - amountPlayed;
if(delta > 0.0 && delta < 5.0) {
[self incrementAmountPlayed:delta];
} else {
amountPlayed = seconds;
}
}
- (void)resetAmountPlayed {
amountPlayed = 0;
}

View file

@ -50,7 +50,7 @@ using std::atomic_long;
double secondsLatency;
double visPushed;
double tempo;
double streamTimestamp;
double lastClippedSampleRate;

View file

@ -86,6 +86,8 @@ static void *kOutputCoreAudioContext = &kOutputCoreAudioContext;
config = [chunk channelConfig];
double chunkDuration = 0;
streamTimestamp = [chunk streamTimestamp] + [chunk durationRatioed];
if(frameCount) {
chunkDuration = [chunk duration];
@ -211,8 +213,6 @@ static void *kOutputCoreAudioContext = &kOutputCoreAudioContext;
secondsHdcdSustained = 0;
tempo = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] doubleForKey:@"tempo"];
outputLock = [[NSLock alloc] init];
#ifdef OUTPUT_LOG
@ -257,11 +257,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
NSDictionary *device = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] objectForKey:@"outputDevice"];
[self setOutputDeviceWithDeviceDict:device];
} else if([keyPath isEqualToString:@"values.eqPreamp"]) {
float preamp = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] floatForKey:@"eqPreamp"];
eqPreamp = pow(10.0, preamp / 20.0);
} else if([keyPath isEqualToString:@"values.tempo"]) {
tempo = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] doubleForKey:@"tempo"];
}
}
@ -846,8 +841,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
visController = [VisualizationController sharedController];
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.outputDevice" options:0 context:kOutputCoreAudioContext];
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.eqPreamp" options:(NSKeyValueObservingOptionInitial | NSKeyValueObservingOptionNew) context:kOutputCoreAudioContext];
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.tempo" options:(NSKeyValueObservingOptionInitial | NSKeyValueObservingOptionNew) context:kOutputCoreAudioContext];
observersapplied = YES;
@ -857,7 +850,7 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
- (void)updateLatency:(double)secondsPlayed {
if(secondsPlayed > 0) {
[outputController incrementAmountPlayed:secondsPlayed * tempo];
[outputController setAmountPlayed:streamTimestamp];
}
double visLatency = visPushed;
visPushed -= secondsPlayed;
@ -895,7 +888,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
stopInvoked = YES;
if(observersapplied) {
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.outputDevice" context:kOutputCoreAudioContext];
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.tempo" context:kOutputCoreAudioContext];
observersapplied = NO;
}
stopping = YES;

View file

@ -135,6 +135,9 @@ static CAdPlugDatabase *g_database = NULL;
total += samples_now;
}
double streamTimestamp = (double)(current_pos) / sampleRate;
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:total];
return chunk;

View file

@ -46,6 +46,7 @@
uint32_t channelConfig;
float frequency;
long totalFrames;
long frame;
NSString* codec;
}

View file

@ -150,6 +150,8 @@ static SInt64 getSizeProc(void *clientData) {
_in_opened = YES;
frame = 0;
return [self readInfoFromExtAudioFileRef];
}
@ -330,6 +332,10 @@ static SInt64 getSizeProc(void *clientData) {
id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
double streamTimestamp = (double)(frame) / frequency;
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:frameCount];
return chunk;
@ -343,6 +349,8 @@ static SInt64 getSizeProc(void *clientData) {
return -1;
}
self->frame = frame;
return frame;
}

View file

@ -361,6 +361,9 @@ static void *kCueSheetDecoderContext = &kCueSheetDecoderContext;
[chunk setFrameCount:frames / frameScale];
}
double streamTimestamp = (double)(framePosition - trackStart) / [chunk format].mSampleRate;
[chunk setStreamTimestamp:streamTimestamp];
framePosition += chunk.frameCount * frameScale;
return chunk;

View file

@ -921,6 +921,8 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va
if(totalFrames && (framesRead + framesReadNow > totalFrames))
framesReadNow = (int)(totalFrames - framesRead);
double streamTimestamp = (double)(framesRead) / frequency;
framesRead += framesReadNow;
metadataUpdateCount += framesReadNow;
@ -931,6 +933,7 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va
id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:framesRead];
[chunk assignSamples:sampleBuffer frameCount:framesReadNow];
return chunk;

View file

@ -30,6 +30,8 @@
uint32_t channelConfig;
float frequency;
long totalFrames;
long frame;
double seconds;
long fileSize;

View file

@ -372,6 +372,9 @@ void ErrorCallback(const FLAC__StreamDecoder *decoder, FLAC__StreamDecoderErrorS
blockBuffer = malloc(SAMPLE_blockBuffer_SIZE);
frame = 0;
seconds = 0.0;
return YES;
}
@ -391,8 +394,14 @@ void ErrorCallback(const FLAC__StreamDecoder *decoder, FLAC__StreamDecoderErrorS
if(blockBufferFrames > 0) {
chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
frame += blockBufferFrames;
[chunk setStreamTimestamp:seconds];
[chunk assignSamples:blockBuffer frameCount:blockBufferFrames];
seconds += [chunk duration];
blockBufferFrames = 0;
}
@ -453,6 +462,9 @@ void ErrorCallback(const FLAC__StreamDecoder *decoder, FLAC__StreamDecoderErrorS
if(!FLAC__stream_decoder_seek_absolute(decoder, sample))
return -1;
frame = sample;
seconds = (double)(sample) / frequency;
return sample;
}

View file

@ -192,6 +192,8 @@ gme_err_t readCallback(void *data, void *out, int count) {
else
gme_set_fade(emu, (int)(length - fade), (int)fade);
double streamTimestamp = (double)(gme_tell(emu)) * 0.001;
gme_play(emu, numSamples, (short int *)buf);
// Some formats support length, but we'll add that in the future.
@ -199,6 +201,8 @@ gme_err_t readCallback(void *data, void *out, int count) {
// GME will always generate samples. There's no real EOS.
// Addendum: The above gme_track_ended() call has been in place for years now
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:sampleBuffer frameCount:frames];
return chunk;

View file

@ -1362,10 +1362,13 @@ static int usf_info(void *context, const char *name, const char *value) {
}
}
double streamTimestamp = (double)(framesRead) / (double)(sampleRate);
framesRead += written;
id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:written];
return chunk;

View file

@ -158,10 +158,13 @@ static void oneTimeInit(void) {
total = (int)(fadePos - fadeStart);
}
double streamTimestamp = (double)(framesRead) / sampleRate;
framesRead += total;
id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:sampleBuffer frameCount:total];
return chunk;

View file

@ -25,6 +25,8 @@
long _currentOutputFrames;
long _fileSize;
double seconds;
id<CogSource> _source;
BOOL _firstFrame;

View file

@ -476,6 +476,8 @@ error:
_endPadding = 0;
// DLog(@"OPEN: %i", _firstFrame);
seconds = 0.0;
inputEOF = NO;
genre = @"";
@ -748,7 +750,9 @@ error:
if(framesToCopy) {
chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:seconds];
[chunk assignSamples:_outputBuffer frameCount:framesToCopy];
seconds += [chunk duration];
_outputFrames = 0;
break;
}
@ -812,6 +816,7 @@ error:
if(frame < _framesDecoded) {
_framesDecoded = 0;
seconds = 0.0;
_firstFrame = YES;
if(_foundLAMEHeader || _foundiTunSMPB)
framesToSkip = _startPadding;
@ -821,6 +826,7 @@ error:
}
framesToSkip += frame - _framesDecoded;
seconds += (double)(frame - _framesDecoded) / sampleRate;
return frame;
}

View file

@ -299,6 +299,8 @@ static OSType getOSType(const char *in_) {
if(![self initDecoder])
return nil;
}
double streamTimestamp = 0.0;
try {
player->setLoopMode((repeatone || isLooped) ? (MIDIPlayer::loop_mode_enable | MIDIPlayer::loop_mode_force) : 0);
@ -317,6 +319,8 @@ static OSType getOSType(const char *in_) {
soundFontsAssigned = YES;
}
streamTimestamp = (double)(player->Tell()) / sampleRate;
int frames = 1024;
float buffer[frames * 2];
@ -358,6 +362,7 @@ static OSType getOSType(const char *in_) {
id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:frames];
return chunk;

View file

@ -589,3 +589,7 @@ void MIDIPlayer::send_sysex_time_filtered(const uint8_t *data, size_t size, size
bool MIDIPlayer::GetLastError(std::string &p_out) {
return get_last_error(p_out);
}
unsigned long MIDIPlayer::Tell() const {
return uTimeCurrent;
}

View file

@ -35,6 +35,7 @@ class MIDIPlayer {
bool Load(const midi_container& midi_file, unsigned subsong, unsigned loop_mode, unsigned clean_flags);
unsigned long Play(float* out, unsigned long count);
void Seek(unsigned long sample);
unsigned long Tell() const;
bool GetLastError(std::string& p_out);

View file

@ -27,6 +27,7 @@
int bitrate;
float frequency;
long totalFrames;
long frame;
}
- (BOOL)writeToBuffer:(float *)sample_buffer fromBuffer:(const MPC_SAMPLE_FORMAT *)p_buffer frames:(unsigned)frames;

View file

@ -79,6 +79,8 @@ mpc_bool_t CanSeekProc(mpc_reader *p_reader) {
totalFrames = mpc_streaminfo_get_length_samples(&info);
frame = 0;
[self willChangeValueForKey:@"properties"];
[self didChangeValueForKey:@"properties"];
@ -151,8 +153,12 @@ mpc_bool_t CanSeekProc(mpc_reader *p_reader) {
}
}
double streamTimestamp = (double)(frame) / frequency;
frame += framesRead;
id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:floatBuffer frameCount:framesRead];
return chunk;
@ -172,6 +178,8 @@ mpc_bool_t CanSeekProc(mpc_reader *p_reader) {
- (long)seek:(long)sample {
mpc_demux_seek_sample(demux, sample);
frame = sample;
return sample;
}

View file

@ -120,6 +120,8 @@ static void g_push_archive_extensions(std::vector<std::string> &list) {
try {
mod->set_repeat_count(IsRepeatOneSet() ? -1 : 0);
double streamTimestamp = mod->get_position_seconds();
int frames = 1024;
float buffer[frames * 2];
void *buf = (void *)buffer;
@ -142,6 +144,7 @@ static void g_push_archive_extensions(std::vector<std::string> &list) {
id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:total];
return chunk;

View file

@ -22,6 +22,7 @@
int bitrate;
int channels;
long totalFrames;
long frame;
int metadataUpdateInterval;
int metadataUpdateCount;

View file

@ -107,7 +107,8 @@ opus_int64 sourceTell(void *_stream) {
seekable = op_seekable(opusRef);
totalFrames = op_pcm_total(opusRef, -1);
frame = 0;
const OpusHead *head = op_head(opusRef, -1);
const OpusTags *tags = op_tags(opusRef, -1);
@ -289,8 +290,12 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va
[self updateIcyMetadata];
}
double streamTimestamp = (double)(frame) / 48000.0;
frame += total / channels;
id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:total / channels];
return chunk;
@ -308,6 +313,8 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va
- (long)seek:(long)frame {
op_pcm_seek(opusRef, frame);
self->frame = frame;
return frame;
}

View file

@ -397,8 +397,8 @@ namespace Organya {
}
- (AudioChunk *)readAudio {
int total = 0;
double streamTimestamp = (double)(m_song->cur_beat) * (double)(m_song->ms_per_beat) * 0.001;
std::vector<float> samples = m_song->Synth(sampleRate);
int rendered = (int)(samples.size() / 2);
@ -423,7 +423,9 @@ namespace Organya {
id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:streamTimestamp];
if(samplesDiscard) {
[chunk assignSamples:&samples[samplesDiscard * 2] frameCount:rendered - samplesDiscard];
samplesDiscard = 0;

View file

@ -22,6 +22,8 @@
float frequency;
long totalFrames;
BOOL seekable;
double seconds;
}
@end

View file

@ -33,6 +33,8 @@
totalFrames = (decoder->shn_get_song_length() * frequency) / 1000.0;
seconds = 0.0;
decoder->go();
[self willChangeValueForKey:@"properties"];
@ -57,8 +59,12 @@
amountRead = decoder->read(buf, frames * bytesPerFrame);
} while(amountRead == -1);
[chunk setStreamTimestamp:seconds];
[chunk assignSamples:buf frameCount:amountRead / bytesPerFrame];
seconds += [chunk duration];
return chunk;
}
@ -66,6 +72,7 @@
unsigned int sec = sample / frequency;
decoder->seek(sec);
seconds = sec;
return sample;
}

View file

@ -15,7 +15,9 @@
long length;
long remain;
double seconds;
float *buffer;
}

View file

@ -27,6 +27,7 @@ enum { channels = 2 };
length = seconds * sample_rate;
remain = length;
seconds = 0.0;
buffer = (float *) calloc(sizeof(float), 1024 * channels);
if(!buffer) {
@ -68,8 +69,11 @@ enum { channels = 2 };
id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
[chunk setStreamTimestamp:seconds];
[chunk assignSamples:buffer frameCount:frames];
seconds += [chunk duration];
return chunk;
}
@ -79,6 +83,8 @@ enum { channels = 2 };
remain = length - frame;
seconds = (double)(frame) / sample_rate;
return frame;
}

View file

@ -31,6 +31,7 @@
int channels;
float frequency;
long totalFrames;
long frame;
int metadataUpdateInterval;
int metadataUpdateCount;

View file

@ -99,6 +99,7 @@ long sourceTell(void *datasource) {
seekable = ov_seekable(&vorbisRef);
totalFrames = ov_pcm_total(&vorbisRef, -1);
frame = 0;
[self willChangeValueForKey:@"properties"];
[self didChangeValueForKey:@"properties"];
@ -221,6 +222,8 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va
int total = 0;
int frames = 1024;
double streamTimestamp = (double)(frame) / frequency;
if(currentSection != lastSection) {
vorbis_info *vi;
vi = ov_info(&vorbisRef, -1);
@ -277,6 +280,7 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va
[self updateIcyMetadata];
}
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:total];
return chunk;
@ -293,6 +297,8 @@ static void setDictionary(NSMutableDictionary *dict, NSString *tag, NSString *va
- (long)seek:(long)frame {
ov_pcm_seek(&vorbisRef, frame);
self->frame = frame;
return frame;
}

View file

@ -44,6 +44,7 @@
int bitrate;
float frequency;
long totalFrames;
long frame;
}
@end

View file

@ -150,6 +150,7 @@ int32_t WriteBytesProc(void *ds, void *data, int32_t bcount) {
frequency = WavpackGetSampleRate(wpc);
totalFrames = WavpackGetNumSamples(wpc);
frame = 0;
isDSD = NO;
@ -257,6 +258,10 @@ int32_t WriteBytesProc(void *ds, void *data, int32_t bcount) {
ALog(@"Unsupported sample size: %d", bitsPerSample);
}
double streamTimestamp = (double)(frame) / frequency;
frame += samplesRead;
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:samplesRead];
return chunk;
@ -270,6 +275,8 @@ int32_t WriteBytesProc(void *ds, void *data, int32_t bcount) {
}
WavpackSeekSample(wpc, trueFrame);
self->frame = frame;
return frame;
}

View file

@ -231,6 +231,8 @@ const int masterVol = 0x10000; // Fixed point 16.16
mainPlr->SetLoopCount(vgmplay->GetModifiedLoopCount(maxLoops));
}
double streamTimestamp = mainPlr->GetCurTime(0);
UInt32 framesDone = 0;
while(framesDone < frames) {
@ -247,6 +249,7 @@ const int masterVol = 0x10000; // Fixed point 16.16
framesDone += framesToDo;
}
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:framesDone];
return chunk;

View file

@ -307,6 +307,8 @@ static void sidTuneLoader(const char *fileName, std::vector<uint8_t> &bufferRef)
id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
double streamTimestamp = (double)(renderedTotal) / sampleRate;
int16_t buffer[1024 * n_channels];
int framesToRender = 1024;
@ -353,6 +355,8 @@ static void sidTuneLoader(const char *fileName, std::vector<uint8_t> &bufferRef)
fadeRemain = fadeEnd;
}
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:buffer frameCount:rendered];
return chunk;

View file

@ -318,6 +318,8 @@ static NSString *get_description_tag(const char *description, const char *tag, c
UInt32 framesMax = frames;
UInt32 framesDone = 0;
double streamTimestamp = (double)(stream->pstate.play_position) / sampleRate;
id audioChunkClass = NSClassFromString(@"AudioChunk");
AudioChunk *chunk = [[audioChunkClass alloc] initWithProperties:[self properties]];
@ -374,6 +376,7 @@ static NSString *get_description_tag(const char *description, const char *tag, c
frames -= frames_done;
}
[chunk setStreamTimestamp:streamTimestamp];
[chunk assignSamples:sample_buf frameCount:framesDone];
return chunk;