Cog Audio: Implemented device output and input file format changing support
This commit is contained in:
parent
9feaffc92d
commit
5fef62dd03
4 changed files with 91 additions and 57 deletions
|
@ -99,10 +99,10 @@
|
||||||
{
|
{
|
||||||
DLog(@"SOMETHING CHANGED!");
|
DLog(@"SOMETHING CHANGED!");
|
||||||
if ([keyPath isEqual:@"properties"]) {
|
if ([keyPath isEqual:@"properties"]) {
|
||||||
//Setup converter!
|
DLog(@"Input format changed");
|
||||||
//Inform something of properties change
|
// Converter doesn't need resetting for this, as output format hasn't changed
|
||||||
//Disable support until it is properly implemented.
|
ConverterNode *converter = [[[controller controller] bufferChain] converter];
|
||||||
//[controller inputFormatDidChange: propertiesToASBD([decoder properties])];
|
[converter inputFormatDidChange:[[[controller controller] bufferChain] inputFormat]];
|
||||||
}
|
}
|
||||||
else if ([keyPath isEqual:@"metadata"]) {
|
else if ([keyPath isEqual:@"metadata"]) {
|
||||||
//Inform something of metadata change
|
//Inform something of metadata change
|
||||||
|
@ -125,8 +125,8 @@
|
||||||
DLog(@"SEEKING! Resetting Buffer");
|
DLog(@"SEEKING! Resetting Buffer");
|
||||||
|
|
||||||
amountInBuffer = 0;
|
amountInBuffer = 0;
|
||||||
|
// This resets the converter's buffer
|
||||||
[self resetBuffer];
|
[self resetBuffer];
|
||||||
[converter resetBuffer];
|
|
||||||
[converter inputFormatDidChange:[[[controller controller] bufferChain] inputFormat]];
|
[converter inputFormatDidChange:[[[controller controller] bufferChain] inputFormat]];
|
||||||
|
|
||||||
DLog(@"Reset buffer!");
|
DLog(@"Reset buffer!");
|
||||||
|
|
|
@ -89,6 +89,20 @@
|
||||||
- (void)setFormat:(AudioStreamBasicDescription *)f
|
- (void)setFormat:(AudioStreamBasicDescription *)f
|
||||||
{
|
{
|
||||||
format = *f;
|
format = *f;
|
||||||
|
BufferChain *bufferChain = [controller bufferChain];
|
||||||
|
if (bufferChain)
|
||||||
|
{
|
||||||
|
InputNode *input = [bufferChain inputNode];
|
||||||
|
ConverterNode *converter = [bufferChain converter];
|
||||||
|
if (input && converter)
|
||||||
|
{
|
||||||
|
// Need to clear the buffer, as it contains converted output
|
||||||
|
// targeting the previous output format
|
||||||
|
[input resetBuffer];
|
||||||
|
[converter setOutputFormat:format];
|
||||||
|
[converter inputFormatDidChange:[bufferChain inputFormat]];
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)close
|
- (void)close
|
||||||
|
|
|
@ -27,6 +27,8 @@
|
||||||
BOOL listenerapplied;
|
BOOL listenerapplied;
|
||||||
|
|
||||||
float volume;
|
float volume;
|
||||||
|
|
||||||
|
AVAudioFormat *_deviceFormat;
|
||||||
|
|
||||||
AudioDeviceID outputDeviceID;
|
AudioDeviceID outputDeviceID;
|
||||||
AudioStreamBasicDescription deviceFormat; // info about the default device
|
AudioStreamBasicDescription deviceFormat; // info about the default device
|
||||||
|
|
|
@ -56,8 +56,13 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
- (void)threadEntry:(id)arg
|
- (void)threadEntry:(id)arg
|
||||||
{
|
{
|
||||||
running = YES;
|
running = YES;
|
||||||
|
size_t eventCount = 0;
|
||||||
while (!stopping) {
|
while (!stopping) {
|
||||||
dispatch_semaphore_wait(_sema, DISPATCH_TIME_FOREVER);
|
dispatch_semaphore_wait(_sema, DISPATCH_TIME_FOREVER);
|
||||||
|
if (++eventCount == 128) {
|
||||||
|
[self updateDeviceFormat];
|
||||||
|
eventCount = 0;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
stopped = YES;
|
stopped = YES;
|
||||||
[self stop];
|
[self stop];
|
||||||
|
@ -229,6 +234,69 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
free(devids);
|
free(devids);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
- (BOOL)updateDeviceFormat
|
||||||
|
{
|
||||||
|
AVAudioFormat *format = _au.outputBusses[0].format;
|
||||||
|
|
||||||
|
if (!_deviceFormat || ![_deviceFormat isEqual:format])
|
||||||
|
{
|
||||||
|
NSError *err;
|
||||||
|
AVAudioFormat *renderFormat;
|
||||||
|
|
||||||
|
_deviceFormat = format;
|
||||||
|
deviceFormat = *(format.streamDescription);
|
||||||
|
|
||||||
|
///Seems some 3rd party devices return incorrect stuff...or I just don't like noninterleaved data.
|
||||||
|
deviceFormat.mFormatFlags &= ~kLinearPCMFormatFlagIsNonInterleaved;
|
||||||
|
// deviceFormat.mFormatFlags &= ~kLinearPCMFormatFlagIsFloat;
|
||||||
|
// deviceFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger;
|
||||||
|
// We don't want more than 8 channels
|
||||||
|
if (deviceFormat.mChannelsPerFrame > 8) {
|
||||||
|
deviceFormat.mChannelsPerFrame = 8;
|
||||||
|
}
|
||||||
|
deviceFormat.mBytesPerFrame = deviceFormat.mChannelsPerFrame*(deviceFormat.mBitsPerChannel/8);
|
||||||
|
deviceFormat.mBytesPerPacket = deviceFormat.mBytesPerFrame * deviceFormat.mFramesPerPacket;
|
||||||
|
|
||||||
|
/* Set the channel layout for the audio queue */
|
||||||
|
AudioChannelLayoutTag tag = 0;
|
||||||
|
switch (deviceFormat.mChannelsPerFrame) {
|
||||||
|
case 1:
|
||||||
|
tag = kAudioChannelLayoutTag_Mono;
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
tag = kAudioChannelLayoutTag_Stereo;
|
||||||
|
break;
|
||||||
|
case 3:
|
||||||
|
tag = kAudioChannelLayoutTag_DVD_4;
|
||||||
|
break;
|
||||||
|
case 4:
|
||||||
|
tag = kAudioChannelLayoutTag_Quadraphonic;
|
||||||
|
break;
|
||||||
|
case 5:
|
||||||
|
tag = kAudioChannelLayoutTag_MPEG_5_0_A;
|
||||||
|
break;
|
||||||
|
case 6:
|
||||||
|
tag = kAudioChannelLayoutTag_MPEG_5_1_A;
|
||||||
|
break;
|
||||||
|
case 7:
|
||||||
|
tag = kAudioChannelLayoutTag_MPEG_6_1_A;
|
||||||
|
break;
|
||||||
|
case 8:
|
||||||
|
tag = kAudioChannelLayoutTag_MPEG_7_1_A;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
renderFormat = [[AVAudioFormat alloc] initWithStreamDescription:&deviceFormat channelLayout:[[AVAudioChannelLayout alloc] initWithLayoutTag:tag]];
|
||||||
|
[_au.inputBusses[0] setFormat:renderFormat error:&err];
|
||||||
|
if (err != nil)
|
||||||
|
return NO;
|
||||||
|
|
||||||
|
[outputController setFormat:&deviceFormat];
|
||||||
|
}
|
||||||
|
|
||||||
|
return YES;
|
||||||
|
}
|
||||||
|
|
||||||
- (BOOL)setup
|
- (BOOL)setup
|
||||||
{
|
{
|
||||||
if (_au)
|
if (_au)
|
||||||
|
@ -239,7 +307,6 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
stopped = NO;
|
stopped = NO;
|
||||||
outputDeviceID = -1;
|
outputDeviceID = -1;
|
||||||
|
|
||||||
AVAudioFormat *format, *renderFormat;
|
|
||||||
AudioComponentDescription desc;
|
AudioComponentDescription desc;
|
||||||
NSError *err;
|
NSError *err;
|
||||||
|
|
||||||
|
@ -268,57 +335,10 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
[self setOutputDeviceWithDeviceDict:nil];
|
[self setOutputDeviceWithDeviceDict:nil];
|
||||||
}
|
}
|
||||||
|
|
||||||
format = _au.outputBusses[0].format;
|
_deviceFormat = nil;
|
||||||
|
|
||||||
deviceFormat = *(format.streamDescription);
|
[self updateDeviceFormat];
|
||||||
|
|
||||||
///Seems some 3rd party devices return incorrect stuff...or I just don't like noninterleaved data.
|
|
||||||
deviceFormat.mFormatFlags &= ~kLinearPCMFormatFlagIsNonInterleaved;
|
|
||||||
// deviceFormat.mFormatFlags &= ~kLinearPCMFormatFlagIsFloat;
|
|
||||||
// deviceFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger;
|
|
||||||
if (deviceFormat.mChannelsPerFrame > 8) {
|
|
||||||
deviceFormat.mChannelsPerFrame = 8;
|
|
||||||
}
|
|
||||||
// And force a default rate for crappy devices
|
|
||||||
if (deviceFormat.mSampleRate < 32000)
|
|
||||||
deviceFormat.mSampleRate = 48000;
|
|
||||||
deviceFormat.mBytesPerFrame = deviceFormat.mChannelsPerFrame*(deviceFormat.mBitsPerChannel/8);
|
|
||||||
deviceFormat.mBytesPerPacket = deviceFormat.mBytesPerFrame * deviceFormat.mFramesPerPacket;
|
|
||||||
|
|
||||||
/* Set the channel layout for the audio queue */
|
|
||||||
AudioChannelLayoutTag tag = 0;
|
|
||||||
switch (deviceFormat.mChannelsPerFrame) {
|
|
||||||
case 1:
|
|
||||||
tag = kAudioChannelLayoutTag_Mono;
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
tag = kAudioChannelLayoutTag_Stereo;
|
|
||||||
break;
|
|
||||||
case 3:
|
|
||||||
tag = kAudioChannelLayoutTag_DVD_4;
|
|
||||||
break;
|
|
||||||
case 4:
|
|
||||||
tag = kAudioChannelLayoutTag_Quadraphonic;
|
|
||||||
break;
|
|
||||||
case 5:
|
|
||||||
tag = kAudioChannelLayoutTag_MPEG_5_0_A;
|
|
||||||
break;
|
|
||||||
case 6:
|
|
||||||
tag = kAudioChannelLayoutTag_MPEG_5_1_A;
|
|
||||||
break;
|
|
||||||
case 7:
|
|
||||||
tag = kAudioChannelLayoutTag_MPEG_6_1_A;
|
|
||||||
break;
|
|
||||||
case 8:
|
|
||||||
tag = kAudioChannelLayoutTag_MPEG_7_1_A;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
renderFormat = [[AVAudioFormat alloc] initWithStreamDescription:&deviceFormat channelLayout:[[AVAudioChannelLayout alloc] initWithLayoutTag:tag]];
|
|
||||||
[_au.inputBusses[0] setFormat:renderFormat error:&err];
|
|
||||||
if (err != nil)
|
|
||||||
return NO;
|
|
||||||
|
|
||||||
__block dispatch_semaphore_t sema = _sema;
|
__block dispatch_semaphore_t sema = _sema;
|
||||||
__block OutputNode * outputController = self->outputController;
|
__block OutputNode * outputController = self->outputController;
|
||||||
__block float * volume = &self->volume;
|
__block float * volume = &self->volume;
|
||||||
|
@ -368,8 +388,6 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
|
|
||||||
[NSThread detachNewThreadSelector:@selector(threadEntry:) toTarget:self withObject:nil];
|
[NSThread detachNewThreadSelector:@selector(threadEntry:) toTarget:self withObject:nil];
|
||||||
|
|
||||||
[outputController setFormat:&deviceFormat];
|
|
||||||
|
|
||||||
return (err == nil);
|
return (err == nil);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue