Fix further bugs with output switchover
Change some things I missed. Signed-off-by: Christopher Snowhill <kode54@gmail.com>
This commit is contained in:
parent
d364a7ef10
commit
adc4128c28
3 changed files with 40 additions and 20 deletions
|
@ -88,16 +88,12 @@ using std::atomic_long;
|
||||||
AudioStreamBasicDescription deviceFormat;
|
AudioStreamBasicDescription deviceFormat;
|
||||||
AudioStreamBasicDescription realStreamFormat; // stream format pre-hrtf
|
AudioStreamBasicDescription realStreamFormat; // stream format pre-hrtf
|
||||||
AudioStreamBasicDescription streamFormat; // stream format last seen in render callback
|
AudioStreamBasicDescription streamFormat; // stream format last seen in render callback
|
||||||
AudioStreamBasicDescription realNewFormat; // in case of resampler flush
|
|
||||||
AudioStreamBasicDescription newFormat; // in case of resampler flush
|
|
||||||
|
|
||||||
AudioStreamBasicDescription visFormat; // Mono format for vis
|
AudioStreamBasicDescription visFormat; // Mono format for vis
|
||||||
|
|
||||||
uint32_t deviceChannelConfig;
|
uint32_t deviceChannelConfig;
|
||||||
uint32_t realStreamChannelConfig;
|
uint32_t realStreamChannelConfig;
|
||||||
uint32_t streamChannelConfig;
|
uint32_t streamChannelConfig;
|
||||||
uint32_t realNewChannelConfig;
|
|
||||||
uint32_t newChannelConfig;
|
|
||||||
|
|
||||||
AUAudioUnit *_au;
|
AUAudioUnit *_au;
|
||||||
|
|
||||||
|
|
|
@ -74,23 +74,43 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA
|
||||||
if([outputController peekFormat:&format channelConfig:&config]) {
|
if([outputController peekFormat:&format channelConfig:&config]) {
|
||||||
AudioStreamBasicDescription origFormat;
|
AudioStreamBasicDescription origFormat;
|
||||||
uint32_t origConfig = config;
|
uint32_t origConfig = config;
|
||||||
memcpy(&origFormat, &format, sizeof(origFormat));
|
origFormat = format;
|
||||||
|
|
||||||
UInt32 srcChannels = format.mChannelsPerFrame;
|
UInt32 srcChannels = format.mChannelsPerFrame;
|
||||||
|
uint32_t dmConfig = config;
|
||||||
|
uint32_t dmChannels = srcChannels;
|
||||||
|
AudioStreamBasicDescription dmFormat;
|
||||||
|
dmFormat = format;
|
||||||
|
[outputLock lock];
|
||||||
|
if(fsurround) {
|
||||||
|
dmChannels = [fsurround channelCount];
|
||||||
|
dmConfig = [fsurround channelConfig];
|
||||||
|
}
|
||||||
|
if(hrtf) {
|
||||||
|
dmChannels = 2;
|
||||||
|
dmConfig = AudioChannelFrontLeft | AudioChannelFrontRight;
|
||||||
|
}
|
||||||
|
[outputLock unlock];
|
||||||
|
if(dmChannels != srcChannels) {
|
||||||
|
dmFormat.mChannelsPerFrame = dmChannels;
|
||||||
|
dmFormat.mBytesPerFrame = ((dmFormat.mBitsPerChannel + 7) / 8) * dmChannels;
|
||||||
|
dmFormat.mBytesPerPacket = dmFormat.mBytesPerFrame * dmFormat.mFramesPerPacket;
|
||||||
|
}
|
||||||
UInt32 dstChannels = deviceFormat.mChannelsPerFrame;
|
UInt32 dstChannels = deviceFormat.mChannelsPerFrame;
|
||||||
if(srcChannels != dstChannels) {
|
if(srcChannels != dstChannels) {
|
||||||
format.mChannelsPerFrame = dstChannels;
|
format.mChannelsPerFrame = dstChannels;
|
||||||
format.mBytesPerFrame = ((format.mBitsPerChannel + 7) / 8) * dstChannels;
|
format.mBytesPerFrame = ((format.mBitsPerChannel + 7) / 8) * dstChannels;
|
||||||
format.mBytesPerPacket = format.mBytesPerFrame * format.mFramesPerPacket;
|
format.mBytesPerPacket = format.mBytesPerFrame * format.mFramesPerPacket;
|
||||||
config = deviceChannelConfig;
|
downmixer = [[DownmixProcessor alloc] initWithInputFormat:dmFormat inputConfig:dmConfig andOutputFormat:format outputConfig:deviceChannelConfig];
|
||||||
downmixer = [[DownmixProcessor alloc] initWithInputFormat:origFormat inputConfig:origConfig andOutputFormat:format outputConfig:config];
|
format = origFormat;
|
||||||
} else {
|
} else {
|
||||||
downmixer = nil;
|
downmixer = nil;
|
||||||
}
|
}
|
||||||
if(!streamFormatStarted || config != realStreamChannelConfig || memcmp(&newFormat, &format, sizeof(format)) != 0) {
|
if(!streamFormatStarted || config != realStreamChannelConfig || memcmp(&realStreamFormat, &format, sizeof(format)) != 0) {
|
||||||
newFormat = format;
|
realStreamFormat = format;
|
||||||
newChannelConfig = config;
|
realStreamChannelConfig = config;
|
||||||
streamFormatStarted = YES;
|
streamFormatStarted = YES;
|
||||||
|
streamFormatChanged = YES;
|
||||||
|
|
||||||
visFormat = format;
|
visFormat = format;
|
||||||
visFormat.mChannelsPerFrame = 1;
|
visFormat.mChannelsPerFrame = 1;
|
||||||
|
@ -136,8 +156,8 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA
|
||||||
[visController postSampleRate:44100.0];
|
[visController postSampleRate:44100.0];
|
||||||
|
|
||||||
[outputLock lock];
|
[outputLock lock];
|
||||||
if(fabs(newFormat.mSampleRate - 44100.0) > 1e-5) {
|
if(fabs(realStreamFormat.mSampleRate - 44100.0) > 1e-5) {
|
||||||
if(fabs(newFormat.mSampleRate - lastVisRate) > 1e-5) {
|
if(fabs(realStreamFormat.mSampleRate - lastVisRate) > 1e-5) {
|
||||||
if(rsvis) {
|
if(rsvis) {
|
||||||
for(;;) {
|
for(;;) {
|
||||||
int samplesFlushed;
|
int samplesFlushed;
|
||||||
|
@ -152,7 +172,7 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA
|
||||||
rsstate_delete(rsvis);
|
rsstate_delete(rsvis);
|
||||||
rsvis = NULL;
|
rsvis = NULL;
|
||||||
}
|
}
|
||||||
lastVisRate = newFormat.mSampleRate;
|
lastVisRate = realStreamFormat.mSampleRate;
|
||||||
rsvis = rsstate_new(1, lastVisRate, 44100.0);
|
rsvis = rsstate_new(1, lastVisRate, 44100.0);
|
||||||
}
|
}
|
||||||
if(rsvis) {
|
if(rsvis) {
|
||||||
|
@ -201,7 +221,7 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA
|
||||||
}
|
}
|
||||||
[outputLock unlock];
|
[outputLock unlock];
|
||||||
|
|
||||||
cblas_scopy((int)(frameCount * newFormat.mChannelsPerFrame), outputPtr, 1, &buffer[0], 1);
|
cblas_scopy((int)(frameCount * realStreamFormat.mChannelsPerFrame), outputPtr, 1, &buffer[0], 1);
|
||||||
amountRead = frameCount;
|
amountRead = frameCount;
|
||||||
} else {
|
} else {
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -225,7 +245,7 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA
|
||||||
volumeScale *= eqPreamp;
|
volumeScale *= eqPreamp;
|
||||||
}
|
}
|
||||||
|
|
||||||
scale_by_volume(&buffer[0], amountRead * newFormat.mChannelsPerFrame, volumeScale * volume);
|
scale_by_volume(&buffer[0], amountRead * realStreamFormat.mChannelsPerFrame, volumeScale * volume);
|
||||||
|
|
||||||
return amountRead;
|
return amountRead;
|
||||||
}
|
}
|
||||||
|
@ -797,16 +817,20 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
- (int)renderAndConvert {
|
- (int)renderAndConvert {
|
||||||
OSStatus status;
|
OSStatus status;
|
||||||
int inputRendered = inputBufferLastTime;
|
int inputRendered = inputBufferLastTime;
|
||||||
int bytesRendered = inputRendered * newFormat.mBytesPerPacket;
|
int bytesRendered = inputRendered * realStreamFormat.mBytesPerPacket;
|
||||||
|
|
||||||
|
if(resetStreamFormat) {
|
||||||
|
[self updateStreamFormat];
|
||||||
|
}
|
||||||
|
|
||||||
while(inputRendered < 4096) {
|
while(inputRendered < 4096) {
|
||||||
int maxToRender = MIN(4096 - inputRendered, 512);
|
int maxToRender = MIN(4096 - inputRendered, 512);
|
||||||
int rendered = [self renderInput:maxToRender toBuffer:&tempBuffer[0]];
|
int rendered = [self renderInput:maxToRender toBuffer:&tempBuffer[0]];
|
||||||
if(rendered > 0) {
|
if(rendered > 0) {
|
||||||
memcpy(((uint8_t*)&inputBuffer[0]) + bytesRendered, &tempBuffer[0], rendered * newFormat.mBytesPerPacket);
|
memcpy(((uint8_t*)&inputBuffer[0]) + bytesRendered, &tempBuffer[0], rendered * realStreamFormat.mBytesPerPacket);
|
||||||
}
|
}
|
||||||
inputRendered += rendered;
|
inputRendered += rendered;
|
||||||
bytesRendered += rendered * newFormat.mBytesPerPacket;
|
bytesRendered += rendered * realStreamFormat.mBytesPerPacket;
|
||||||
if(streamFormatChanged) {
|
if(streamFormatChanged) {
|
||||||
streamFormatChanged = NO;
|
streamFormatChanged = NO;
|
||||||
if(inputRendered) {
|
if(inputRendered) {
|
||||||
|
@ -920,12 +944,10 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
_au.outputProvider = ^AUAudioUnitStatus(AudioUnitRenderActionFlags *_Nonnull actionFlags, const AudioTimeStamp *_Nonnull timestamp, AUAudioFrameCount frameCount, NSInteger inputBusNumber, AudioBufferList *_Nonnull inputData) {
|
_au.outputProvider = ^AUAudioUnitStatus(AudioUnitRenderActionFlags *_Nonnull actionFlags, const AudioTimeStamp *_Nonnull timestamp, AUAudioFrameCount frameCount, NSInteger inputBusNumber, AudioBufferList *_Nonnull inputData) {
|
||||||
if(!frameCount) return 0;
|
if(!frameCount) return 0;
|
||||||
|
|
||||||
int i;
|
|
||||||
const int channels = format->mChannelsPerFrame;
|
const int channels = format->mChannelsPerFrame;
|
||||||
if(!channels) return 0;
|
if(!channels) return 0;
|
||||||
|
|
||||||
OutputCoreAudio *_self = (__bridge OutputCoreAudio *)refCon;
|
OutputCoreAudio *_self = (__bridge OutputCoreAudio *)refCon;
|
||||||
float *samplePtr = nil;
|
|
||||||
int renderedSamples = 0;
|
int renderedSamples = 0;
|
||||||
|
|
||||||
while(renderedSamples < frameCount) {
|
while(renderedSamples < frameCount) {
|
||||||
|
@ -973,6 +995,7 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
||||||
|
|
||||||
resetStreamFormat = NO;
|
resetStreamFormat = NO;
|
||||||
streamFormatChanged = NO;
|
streamFormatChanged = NO;
|
||||||
|
streamFormatStarted = NO;
|
||||||
|
|
||||||
inputBufferLastTime = 0;
|
inputBufferLastTime = 0;
|
||||||
|
|
||||||
|
|
1
Audio/ThirdParty/rsstate.hpp
vendored
1
Audio/ThirdParty/rsstate.hpp
vendored
|
@ -74,6 +74,7 @@ struct rsstate {
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return (int)outTotal;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue