Audio: Attempt to reduce glitching from seeking
Also applies to how output format changes are handled. Signed-off-by: Christopher Snowhill <kode54@gmail.com>
This commit is contained in:
parent
72a4a1c245
commit
cdddcaecd8
8 changed files with 76 additions and 21 deletions
|
@ -107,4 +107,6 @@
|
|||
|
||||
- (double)getPostVisLatency;
|
||||
|
||||
- (void)setVolume:(double)v;
|
||||
|
||||
@end
|
||||
|
|
|
@ -214,6 +214,10 @@
|
|||
- (void)seek:(double)time {
|
||||
long frame = (long)round(time * [[[inputNode properties] objectForKey:@"sampleRate"] floatValue]);
|
||||
|
||||
AudioPlayer * audioPlayer = controller;
|
||||
OutputNode *outputNode = [audioPlayer output];
|
||||
|
||||
[inputNode setLastVolume:[outputNode volume]];
|
||||
[inputNode seek:frame];
|
||||
}
|
||||
|
||||
|
@ -346,4 +350,10 @@
|
|||
return latency;
|
||||
}
|
||||
|
||||
- (void)setVolume:(double)v {
|
||||
AudioPlayer * audioPlayer = controller;
|
||||
OutputNode *outputNode = [audioPlayer output];
|
||||
[outputNode setVolume:v];
|
||||
}
|
||||
|
||||
@end
|
||||
|
|
|
@ -29,6 +29,8 @@
|
|||
BOOL shouldSeek;
|
||||
long seekFrame;
|
||||
|
||||
double lastVolume;
|
||||
|
||||
BOOL observersAdded;
|
||||
|
||||
Semaphore *exitAtTheEndOfTheStream;
|
||||
|
@ -49,4 +51,6 @@
|
|||
|
||||
- (id<CogDecoder>)decoder;
|
||||
|
||||
- (void)setLastVolume:(double)v;
|
||||
|
||||
@end
|
||||
|
|
|
@ -29,6 +29,7 @@ static void *kInputNodeContext = &kInputNodeContext;
|
|||
if(self) {
|
||||
exitAtTheEndOfTheStream = [[Semaphore alloc] init];
|
||||
threadExited = NO;
|
||||
lastVolume = 1.0;
|
||||
}
|
||||
|
||||
return self;
|
||||
|
@ -159,7 +160,9 @@ static void *kInputNodeContext = &kInputNodeContext;
|
|||
|
||||
while([self shouldContinue] == YES && [self endOfStream] == NO) {
|
||||
if(shouldSeek == YES) {
|
||||
BufferChain *bufferChain = [[controller controller] bufferChain];
|
||||
BufferChain *bufferChain = controller;
|
||||
[bufferChain setVolume:0.0];
|
||||
|
||||
ConverterNode *converter = [bufferChain converter];
|
||||
VisualizationNode *visualization = [bufferChain visualization];
|
||||
DSPRubberbandNode *rubberband = [bufferChain rubberband];
|
||||
|
@ -194,6 +197,8 @@ static void *kInputNodeContext = &kInputNodeContext;
|
|||
if(seekError) {
|
||||
[controller setError:YES];
|
||||
}
|
||||
|
||||
[bufferChain setVolume:lastVolume];
|
||||
}
|
||||
|
||||
AudioChunk *chunk;
|
||||
|
@ -295,4 +300,8 @@ static void *kInputNodeContext = &kInputNodeContext;
|
|||
return [buffer listDuration];
|
||||
}
|
||||
|
||||
- (void)setLastVolume:(double)v {
|
||||
lastVolume = v;
|
||||
}
|
||||
|
||||
@end
|
||||
|
|
|
@ -59,6 +59,7 @@
|
|||
- (AudioStreamBasicDescription)deviceFormat;
|
||||
- (uint32_t)deviceChannelConfig;
|
||||
|
||||
- (double)volume;
|
||||
- (void)setVolume:(double)v;
|
||||
|
||||
- (void)setShouldContinue:(BOOL)s;
|
||||
|
|
|
@ -152,26 +152,42 @@
|
|||
BufferChain *bufferChain = [controller bufferChain];
|
||||
if(bufferChain) {
|
||||
ConverterNode *converter = [bufferChain converter];
|
||||
if(converter) {
|
||||
// This clears the resampler buffer, but not the input buffer
|
||||
// We also have to jump the play position ahead accounting for
|
||||
// the data we are flushing
|
||||
amountPlayed += [[converter buffer] listDuration];
|
||||
|
||||
AudioStreamBasicDescription inf = [bufferChain inputFormat];
|
||||
uint32_t config = [bufferChain inputConfig];
|
||||
|
||||
format.mChannelsPerFrame = inf.mChannelsPerFrame;
|
||||
format.mBytesPerFrame = ((inf.mBitsPerChannel + 7) / 8) * format.mChannelsPerFrame;
|
||||
format.mBytesPerPacket = format.mBytesPerFrame * format.mFramesPerPacket;
|
||||
channelConfig = config;
|
||||
|
||||
[converter setOutputFormat:format];
|
||||
[converter inputFormatDidChange:[bufferChain inputFormat] inputConfig:[bufferChain inputConfig]];
|
||||
}
|
||||
DSPDownmixNode *downmix = [bufferChain downmix];
|
||||
if(downmix && output) {
|
||||
[downmix setOutputFormat:[output deviceFormat] withChannelConfig:[output deviceChannelConfig]];
|
||||
AudioStreamBasicDescription outputFormat;
|
||||
uint32_t outputChannelConfig;
|
||||
BOOL formatChanged = NO;
|
||||
if(converter) {
|
||||
AudioStreamBasicDescription converterFormat = [converter nodeFormat];
|
||||
if(memcmp(&converterFormat, &format, sizeof(converterFormat)) != 0) {
|
||||
formatChanged = YES;
|
||||
}
|
||||
}
|
||||
if(downmix && output && !formatChanged) {
|
||||
outputFormat = [output deviceFormat];
|
||||
outputChannelConfig = [output deviceChannelConfig];
|
||||
AudioStreamBasicDescription currentOutputFormat = [downmix nodeFormat];
|
||||
uint32_t currentOutputChannelConfig = [downmix nodeChannelConfig];
|
||||
if(memcmp(¤tOutputFormat, &outputFormat, sizeof(currentOutputFormat)) != 0 ||
|
||||
currentOutputChannelConfig != outputChannelConfig) {
|
||||
formatChanged = YES;
|
||||
}
|
||||
}
|
||||
if(formatChanged) {
|
||||
InputNode *inputNode = [bufferChain inputNode];
|
||||
if(inputNode) {
|
||||
[inputNode setLastVolume:[output volume]];
|
||||
[output setVolume:0.0];
|
||||
}
|
||||
if(converter) {
|
||||
[converter setOutputFormat:format];
|
||||
}
|
||||
if(downmix && output) {
|
||||
[downmix setOutputFormat:[output deviceFormat] withChannelConfig:[output deviceChannelConfig]];
|
||||
}
|
||||
if(inputNode) {
|
||||
AudioStreamBasicDescription inputFormat = [inputNode nodeFormat];
|
||||
[inputNode seek:(long)(amountPlayed * inputFormat.mSampleRate)];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -181,6 +197,10 @@
|
|||
output = nil;
|
||||
}
|
||||
|
||||
- (double)volume {
|
||||
return [output volume];
|
||||
}
|
||||
|
||||
- (void)setVolume:(double)v {
|
||||
[output setVolume:v];
|
||||
}
|
||||
|
|
|
@ -122,6 +122,7 @@ using std::atomic_long;
|
|||
|
||||
- (double)latency;
|
||||
|
||||
- (double)volume;
|
||||
- (void)setVolume:(double)v;
|
||||
|
||||
- (void)setShouldPlayOutBuffer:(BOOL)enabled;
|
||||
|
|
|
@ -621,8 +621,12 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
|||
int inputRemain = _self->inputRemain;
|
||||
while(!inputRemain) {
|
||||
inputRemain = [_self renderAndConvert];
|
||||
if(_self->stopping)
|
||||
if(_self->stopping) {
|
||||
inputData->mBuffers[0].mDataByteSize = frameCount * format->mBytesPerPacket;
|
||||
inputData->mBuffers[0].mNumberChannels = channels;
|
||||
bzero(inputData->mBuffers[0].mData, inputData->mBuffers[0].mDataByteSize);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if(inputRemain) {
|
||||
int inputTodo = MIN(inputRemain, frameCount - renderedSamples);
|
||||
|
@ -728,6 +732,10 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
|||
[visController postLatency:[outputController getPostVisLatency]];
|
||||
}
|
||||
|
||||
- (double)volume {
|
||||
return volume * 100.0f;
|
||||
}
|
||||
|
||||
- (void)setVolume:(double)v {
|
||||
volume = v * 0.01f;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue