Revert "Core Audio output: Rewrote major portions"
This reverts commit 637ea4efe1
.
This commit is contained in:
parent
4cdca2f5f8
commit
0131f7c925
17 changed files with 317 additions and 655 deletions
|
@ -93,7 +93,7 @@
|
||||||
bufferChain = [[BufferChain alloc] initWithController:self];
|
bufferChain = [[BufferChain alloc] initWithController:self];
|
||||||
[self notifyStreamChanged:userInfo];
|
[self notifyStreamChanged:userInfo];
|
||||||
|
|
||||||
while (![bufferChain open:url withOutputFormatHint:[output format] withRGInfo:rgi])
|
while (![bufferChain open:url withOutputFormat:[output format] withRGInfo:rgi])
|
||||||
{
|
{
|
||||||
bufferChain = nil;
|
bufferChain = nil;
|
||||||
|
|
||||||
|
@ -401,7 +401,7 @@
|
||||||
&& [[nextStream path] isEqualToString:[[lastChain streamURL] path]]))
|
&& [[nextStream path] isEqualToString:[[lastChain streamURL] path]]))
|
||||||
{
|
{
|
||||||
if ([lastChain setTrack:nextStream]
|
if ([lastChain setTrack:nextStream]
|
||||||
&& [newChain openWithInput:[lastChain inputNode] withOutputFormatHint:[output format] withRGInfo:nextStreamRGInfo])
|
&& [newChain openWithInput:[lastChain inputNode] withOutputFormat:[output format] withRGInfo:nextStreamRGInfo])
|
||||||
{
|
{
|
||||||
[newChain setStreamURL:nextStream];
|
[newChain setStreamURL:nextStream];
|
||||||
[newChain setUserInfo:nextStreamUserInfo];
|
[newChain setUserInfo:nextStreamUserInfo];
|
||||||
|
@ -418,7 +418,7 @@
|
||||||
|
|
||||||
lastChain = nil;
|
lastChain = nil;
|
||||||
|
|
||||||
while (shouldContinue && ![newChain open:nextStream withOutputFormatHint:[output format] withRGInfo:nextStreamRGInfo])
|
while (shouldContinue && ![newChain open:nextStream withOutputFormat:[output format] withRGInfo:nextStreamRGInfo])
|
||||||
{
|
{
|
||||||
if (nextStream == nil)
|
if (nextStream == nil)
|
||||||
{
|
{
|
||||||
|
|
|
@ -30,14 +30,14 @@
|
||||||
- (id)initWithController:(id)c;
|
- (id)initWithController:(id)c;
|
||||||
- (void)buildChain;
|
- (void)buildChain;
|
||||||
|
|
||||||
- (BOOL)open:(NSURL *)url withOutputFormatHint:(AudioStreamBasicDescription)outputFormat withRGInfo:(NSDictionary*)rgi;
|
- (BOOL)open:(NSURL *)url withOutputFormat:(AudioStreamBasicDescription)outputFormat withRGInfo:(NSDictionary*)rgi;
|
||||||
|
|
||||||
//Used when changing tracks to reuse the same decoder
|
//Used when changing tracks to reuse the same decoder
|
||||||
- (BOOL)openWithInput:(InputNode *)i withOutputFormatHint:(AudioStreamBasicDescription)outputFormat withRGInfo:(NSDictionary*)rgi;
|
- (BOOL)openWithInput:(InputNode *)i withOutputFormat:(AudioStreamBasicDescription)outputFormat withRGInfo:(NSDictionary*)rgi;
|
||||||
|
|
||||||
//Used when resetting the decoder on seek
|
//Used when resetting the decoder on seek
|
||||||
- (BOOL)openWithDecoder:(id<CogDecoder>)decoder
|
- (BOOL)openWithDecoder:(id<CogDecoder>)decoder
|
||||||
withOutputFormatHint:(AudioStreamBasicDescription)outputFormat
|
withOutputFormat:(AudioStreamBasicDescription)outputFormat
|
||||||
withRGInfo:(NSDictionary*)rgi;
|
withRGInfo:(NSDictionary*)rgi;
|
||||||
|
|
||||||
- (void)seek:(double)time;
|
- (void)seek:(double)time;
|
||||||
|
|
|
@ -43,7 +43,7 @@
|
||||||
finalNode = converterNode;
|
finalNode = converterNode;
|
||||||
}
|
}
|
||||||
|
|
||||||
- (BOOL)open:(NSURL *)url withOutputFormatHint:(AudioStreamBasicDescription)outputFormat withRGInfo:(NSDictionary *)rgi
|
- (BOOL)open:(NSURL *)url withOutputFormat:(AudioStreamBasicDescription)outputFormat withRGInfo:(NSDictionary *)rgi
|
||||||
{
|
{
|
||||||
[self setStreamURL:url];
|
[self setStreamURL:url];
|
||||||
|
|
||||||
|
@ -65,13 +65,7 @@
|
||||||
|
|
||||||
NSDictionary * properties = [inputNode properties];
|
NSDictionary * properties = [inputNode properties];
|
||||||
|
|
||||||
inputFormat = [inputNode nodeFormat];
|
if (![converterNode setupWithInputFormat:(inputFormat = propertiesToASBD(properties)) outputFormat:outputFormat isLossless:[[properties valueForKey:@"encoding"] isEqualToString:@"lossless"]])
|
||||||
|
|
||||||
outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
|
|
||||||
outputFormat.mBytesPerFrame = ((outputFormat.mBitsPerChannel + 7) / 8) * outputFormat.mChannelsPerFrame;
|
|
||||||
outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame * outputFormat.mFramesPerPacket;
|
|
||||||
|
|
||||||
if (![converterNode setupWithInputFormat:inputFormat outputFormat:outputFormat isLossless:[[properties valueForKey:@"encoding"] isEqualToString:@"lossless"]])
|
|
||||||
return NO;
|
return NO;
|
||||||
|
|
||||||
[self setRGInfo:rgi];
|
[self setRGInfo:rgi];
|
||||||
|
@ -81,7 +75,7 @@
|
||||||
return YES;
|
return YES;
|
||||||
}
|
}
|
||||||
|
|
||||||
- (BOOL)openWithInput:(InputNode *)i withOutputFormatHint:(AudioStreamBasicDescription)outputFormat withRGInfo:(NSDictionary *)rgi
|
- (BOOL)openWithInput:(InputNode *)i withOutputFormat:(AudioStreamBasicDescription)outputFormat withRGInfo:(NSDictionary *)rgi
|
||||||
{
|
{
|
||||||
DLog(@"New buffer chain!");
|
DLog(@"New buffer chain!");
|
||||||
[self buildChain];
|
[self buildChain];
|
||||||
|
@ -92,14 +86,7 @@
|
||||||
NSDictionary * properties = [inputNode properties];
|
NSDictionary * properties = [inputNode properties];
|
||||||
|
|
||||||
DLog(@"Input Properties: %@", properties);
|
DLog(@"Input Properties: %@", properties);
|
||||||
|
if (![converterNode setupWithInputFormat:(inputFormat = propertiesToASBD(properties)) outputFormat:outputFormat isLossless:[[properties objectForKey:@"encoding"] isEqualToString:@"lossless"]])
|
||||||
inputFormat = [inputNode nodeFormat];
|
|
||||||
|
|
||||||
outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
|
|
||||||
outputFormat.mBytesPerFrame = ((outputFormat.mBitsPerChannel + 7) / 8) * outputFormat.mChannelsPerFrame;
|
|
||||||
outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame * outputFormat.mFramesPerPacket;
|
|
||||||
|
|
||||||
if (![converterNode setupWithInputFormat:inputFormat outputFormat:outputFormat isLossless:[[properties objectForKey:@"encoding"] isEqualToString:@"lossless"]])
|
|
||||||
return NO;
|
return NO;
|
||||||
|
|
||||||
[self setRGInfo:rgi];
|
[self setRGInfo:rgi];
|
||||||
|
@ -108,7 +95,7 @@
|
||||||
}
|
}
|
||||||
|
|
||||||
- (BOOL)openWithDecoder:(id<CogDecoder>)decoder
|
- (BOOL)openWithDecoder:(id<CogDecoder>)decoder
|
||||||
withOutputFormatHint:(AudioStreamBasicDescription)outputFormat
|
withOutputFormat:(AudioStreamBasicDescription)outputFormat
|
||||||
withRGInfo:(NSDictionary*)rgi;
|
withRGInfo:(NSDictionary*)rgi;
|
||||||
{
|
{
|
||||||
DLog(@"New buffer chain!");
|
DLog(@"New buffer chain!");
|
||||||
|
@ -120,14 +107,7 @@
|
||||||
NSDictionary * properties = [inputNode properties];
|
NSDictionary * properties = [inputNode properties];
|
||||||
|
|
||||||
DLog(@"Input Properties: %@", properties);
|
DLog(@"Input Properties: %@", properties);
|
||||||
|
if (![converterNode setupWithInputFormat:(inputFormat = propertiesToASBD(properties)) outputFormat:outputFormat isLossless:[[properties objectForKey:@"encoding"] isEqualToString:@"lossless"]])
|
||||||
inputFormat = [inputNode nodeFormat];
|
|
||||||
|
|
||||||
outputFormat.mChannelsPerFrame = inputFormat.mChannelsPerFrame;
|
|
||||||
outputFormat.mBytesPerFrame = ((outputFormat.mBitsPerChannel + 7) / 8) * outputFormat.mChannelsPerFrame;
|
|
||||||
outputFormat.mBytesPerPacket = outputFormat.mBytesPerFrame * outputFormat.mFramesPerPacket;
|
|
||||||
|
|
||||||
if (![converterNode setupWithInputFormat:inputFormat outputFormat:outputFormat isLossless:[[properties objectForKey:@"encoding"] isEqualToString:@"lossless"]])
|
|
||||||
return NO;
|
return NO;
|
||||||
|
|
||||||
[self setRGInfo:rgi];
|
[self setRGInfo:rgi];
|
||||||
|
|
|
@ -74,6 +74,8 @@
|
||||||
id __weak originalPreviousNode;
|
id __weak originalPreviousNode;
|
||||||
|
|
||||||
void *hdcd_decoder;
|
void *hdcd_decoder;
|
||||||
|
|
||||||
|
HeadphoneFilter *hFilter;
|
||||||
}
|
}
|
||||||
|
|
||||||
@property AudioStreamBasicDescription inputFormat;
|
@property AudioStreamBasicDescription inputFormat;
|
||||||
|
|
|
@ -72,11 +72,189 @@ void PrintStreamDesc (AudioStreamBasicDescription *inDesc)
|
||||||
hdcd_decoder = NULL;
|
hdcd_decoder = NULL;
|
||||||
|
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.volumeScaling" options:0 context:nil];
|
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.volumeScaling" options:0 context:nil];
|
||||||
|
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.headphoneVirtualization" options:0 context:nil];
|
||||||
|
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.hrirPath" options:0 context:nil];
|
||||||
}
|
}
|
||||||
|
|
||||||
return self;
|
return self;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const float STEREO_DOWNMIX[8-2][8][2]={
|
||||||
|
/*3.0*/
|
||||||
|
{
|
||||||
|
{0.5858F,0.0F},{0.0F,0.5858F},{0.4142F,0.4142F}
|
||||||
|
},
|
||||||
|
/*quadrophonic*/
|
||||||
|
{
|
||||||
|
{0.4226F,0.0F},{0.0F,0.4226F},{0.366F,0.2114F},{0.2114F,0.336F}
|
||||||
|
},
|
||||||
|
/*5.0*/
|
||||||
|
{
|
||||||
|
{0.651F,0.0F},{0.0F,0.651F},{0.46F,0.46F},{0.5636F,0.3254F},
|
||||||
|
{0.3254F,0.5636F}
|
||||||
|
},
|
||||||
|
/*5.1*/
|
||||||
|
{
|
||||||
|
{0.529F,0.0F},{0.0F,0.529F},{0.3741F,0.3741F},{0.3741F,0.3741F},{0.4582F,0.2645F},
|
||||||
|
{0.2645F,0.4582F}
|
||||||
|
},
|
||||||
|
/*6.1*/
|
||||||
|
{
|
||||||
|
{0.4553F,0.0F},{0.0F,0.4553F},{0.322F,0.322F},{0.322F,0.322F},{0.2788F,0.2788F},
|
||||||
|
{0.3943F,0.2277F},{0.2277F,0.3943F}
|
||||||
|
},
|
||||||
|
/*7.1*/
|
||||||
|
{
|
||||||
|
{0.3886F,0.0F},{0.0F,0.3886F},{0.2748F,0.2748F},{0.2748F,0.2748F},{0.3366F,0.1943F},
|
||||||
|
{0.1943F,0.3366F},{0.3366F,0.1943F},{0.1943F,0.3366F}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static void downmix_to_stereo(float * buffer, int channels, size_t count)
|
||||||
|
{
|
||||||
|
if (channels >= 3 && channels <= 8)
|
||||||
|
for (size_t i = 0; i < count; ++i)
|
||||||
|
{
|
||||||
|
float left = 0, right = 0;
|
||||||
|
for (int j = 0; j < channels; ++j)
|
||||||
|
{
|
||||||
|
left += buffer[i * channels + j] * STEREO_DOWNMIX[channels - 3][j][0];
|
||||||
|
right += buffer[i * channels + j] * STEREO_DOWNMIX[channels - 3][j][1];
|
||||||
|
}
|
||||||
|
buffer[i * 2 + 0] = left;
|
||||||
|
buffer[i * 2 + 1] = right;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void downmix_to_mono(float * buffer, int channels, size_t count)
|
||||||
|
{
|
||||||
|
if (channels >= 3 && channels <= 8)
|
||||||
|
{
|
||||||
|
downmix_to_stereo(buffer, channels, count);
|
||||||
|
channels = 2;
|
||||||
|
}
|
||||||
|
float invchannels = 1.0 / (float)channels;
|
||||||
|
for (size_t i = 0; i < count; ++i)
|
||||||
|
{
|
||||||
|
float sample = 0;
|
||||||
|
for (int j = 0; j < channels; ++j)
|
||||||
|
{
|
||||||
|
sample += buffer[i * channels + j];
|
||||||
|
}
|
||||||
|
buffer[i] = sample * invchannels;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void upmix(float * buffer, int inchannels, int outchannels, size_t count)
|
||||||
|
{
|
||||||
|
for (ssize_t i = count - 1; i >= 0; --i)
|
||||||
|
{
|
||||||
|
if (inchannels == 1 && outchannels == 2)
|
||||||
|
{
|
||||||
|
// upmix mono to stereo
|
||||||
|
float sample = buffer[i];
|
||||||
|
buffer[i * 2 + 0] = sample;
|
||||||
|
buffer[i * 2 + 1] = sample;
|
||||||
|
}
|
||||||
|
else if (inchannels == 1 && outchannels == 4)
|
||||||
|
{
|
||||||
|
// upmix mono to quad
|
||||||
|
float sample = buffer[i];
|
||||||
|
buffer[i * 4 + 0] = sample;
|
||||||
|
buffer[i * 4 + 1] = sample;
|
||||||
|
buffer[i * 4 + 2] = 0;
|
||||||
|
buffer[i * 4 + 3] = 0;
|
||||||
|
}
|
||||||
|
else if (inchannels == 1 && (outchannels == 3 || outchannels >= 5))
|
||||||
|
{
|
||||||
|
// upmix mono to center channel
|
||||||
|
float sample = buffer[i];
|
||||||
|
buffer[i * outchannels + 2] = sample;
|
||||||
|
for (int j = 0; j < 2; ++j)
|
||||||
|
{
|
||||||
|
buffer[i * outchannels + j] = 0;
|
||||||
|
}
|
||||||
|
for (int j = 3; j < outchannels; ++j)
|
||||||
|
{
|
||||||
|
buffer[i * outchannels + j] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (inchannels == 4 && outchannels >= 5)
|
||||||
|
{
|
||||||
|
float fl = buffer[i * 4 + 0];
|
||||||
|
float fr = buffer[i * 4 + 1];
|
||||||
|
float bl = buffer[i * 4 + 2];
|
||||||
|
float br = buffer[i * 4 + 3];
|
||||||
|
const int skipclfe = (outchannels == 5) ? 1 : 2;
|
||||||
|
buffer[i * outchannels + 0] = fl;
|
||||||
|
buffer[i * outchannels + 1] = fr;
|
||||||
|
buffer[i * outchannels + skipclfe + 2] = bl;
|
||||||
|
buffer[i * outchannels + skipclfe + 3] = br;
|
||||||
|
for (int j = 0; j < skipclfe; ++j)
|
||||||
|
{
|
||||||
|
buffer[i * outchannels + 2 + j] = 0;
|
||||||
|
}
|
||||||
|
for (int j = 4 + skipclfe; j < outchannels; ++j)
|
||||||
|
{
|
||||||
|
buffer[i * outchannels + j] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (inchannels == 5 && outchannels >= 6)
|
||||||
|
{
|
||||||
|
float fl = buffer[i * 5 + 0];
|
||||||
|
float fr = buffer[i * 5 + 1];
|
||||||
|
float c = buffer[i * 5 + 2];
|
||||||
|
float bl = buffer[i * 5 + 3];
|
||||||
|
float br = buffer[i * 5 + 4];
|
||||||
|
buffer[i * outchannels + 0] = fl;
|
||||||
|
buffer[i * outchannels + 1] = fr;
|
||||||
|
buffer[i * outchannels + 2] = c;
|
||||||
|
buffer[i * outchannels + 3] = 0;
|
||||||
|
buffer[i * outchannels + 4] = bl;
|
||||||
|
buffer[i * outchannels + 5] = br;
|
||||||
|
for (int j = 6; j < outchannels; ++j)
|
||||||
|
{
|
||||||
|
buffer[i * outchannels + j] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (inchannels == 7 && outchannels == 8)
|
||||||
|
{
|
||||||
|
float fl = buffer[i * 7 + 0];
|
||||||
|
float fr = buffer[i * 7 + 1];
|
||||||
|
float c = buffer[i * 7 + 2];
|
||||||
|
float lfe = buffer[i * 7 + 3];
|
||||||
|
float sl = buffer[i * 7 + 4];
|
||||||
|
float sr = buffer[i * 7 + 5];
|
||||||
|
float bc = buffer[i * 7 + 6];
|
||||||
|
buffer[i * 8 + 0] = fl;
|
||||||
|
buffer[i * 8 + 1] = fr;
|
||||||
|
buffer[i * 8 + 2] = c;
|
||||||
|
buffer[i * 8 + 3] = lfe;
|
||||||
|
buffer[i * 8 + 4] = bc;
|
||||||
|
buffer[i * 8 + 5] = bc;
|
||||||
|
buffer[i * 8 + 6] = sl;
|
||||||
|
buffer[i * 8 + 7] = sr;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// upmix N channels to N channels plus silence the empty channels
|
||||||
|
float samples[inchannels];
|
||||||
|
for (int j = 0; j < inchannels; ++j)
|
||||||
|
{
|
||||||
|
samples[j] = buffer[i * inchannels + j];
|
||||||
|
}
|
||||||
|
for (int j = 0; j < inchannels; ++j)
|
||||||
|
{
|
||||||
|
buffer[i * outchannels + j] = samples[j];
|
||||||
|
}
|
||||||
|
for (int j = inchannels; j < outchannels; ++j)
|
||||||
|
{
|
||||||
|
buffer[i * outchannels + j] = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void scale_by_volume(float * buffer, size_t count, float volume)
|
void scale_by_volume(float * buffer, size_t count, float volume)
|
||||||
{
|
{
|
||||||
if ( volume != 1.0 )
|
if ( volume != 1.0 )
|
||||||
|
@ -842,6 +1020,31 @@ tryagain:
|
||||||
|
|
||||||
scale_by_volume( (float*) floatBuffer, amountReadFromFC / sizeof(float), volumeScale);
|
scale_by_volume( (float*) floatBuffer, amountReadFromFC / sizeof(float), volumeScale);
|
||||||
|
|
||||||
|
if ( hFilter ) {
|
||||||
|
int samples = amountReadFromFC / floatFormat.mBytesPerFrame;
|
||||||
|
[hFilter process:floatBuffer sampleCount:samples toBuffer:floatBuffer + amountReadFromFC];
|
||||||
|
memmove(floatBuffer, floatBuffer + amountReadFromFC, samples * sizeof(float) * 2);
|
||||||
|
amountReadFromFC = samples * sizeof(float) * 2;
|
||||||
|
}
|
||||||
|
else if ( inputFormat.mChannelsPerFrame > 2 && outputFormat.mChannelsPerFrame == 2 )
|
||||||
|
{
|
||||||
|
int samples = amountReadFromFC / floatFormat.mBytesPerFrame;
|
||||||
|
downmix_to_stereo( (float*) floatBuffer, inputFormat.mChannelsPerFrame, samples );
|
||||||
|
amountReadFromFC = samples * sizeof(float) * 2;
|
||||||
|
}
|
||||||
|
else if ( inputFormat.mChannelsPerFrame > 1 && outputFormat.mChannelsPerFrame == 1 )
|
||||||
|
{
|
||||||
|
int samples = amountReadFromFC / floatFormat.mBytesPerFrame;
|
||||||
|
downmix_to_mono( (float*) floatBuffer, inputFormat.mChannelsPerFrame, samples );
|
||||||
|
amountReadFromFC = samples * sizeof(float);
|
||||||
|
}
|
||||||
|
else if ( inputFormat.mChannelsPerFrame < outputFormat.mChannelsPerFrame )
|
||||||
|
{
|
||||||
|
int samples = amountReadFromFC / floatFormat.mBytesPerFrame;
|
||||||
|
upmix( (float*) floatBuffer, inputFormat.mChannelsPerFrame, outputFormat.mChannelsPerFrame, samples );
|
||||||
|
amountReadFromFC = samples * sizeof(float) * outputFormat.mChannelsPerFrame;
|
||||||
|
}
|
||||||
|
|
||||||
floatSize = amountReadFromFC;
|
floatSize = amountReadFromFC;
|
||||||
floatOffset = 0;
|
floatOffset = 0;
|
||||||
}
|
}
|
||||||
|
@ -872,6 +1075,15 @@ tryagain:
|
||||||
//User reset the volume scaling option
|
//User reset the volume scaling option
|
||||||
[self refreshVolumeScaling];
|
[self refreshVolumeScaling];
|
||||||
}
|
}
|
||||||
|
else if ([keyPath isEqualToString:@"values.headphoneVirtualization"] ||
|
||||||
|
[keyPath isEqualToString:@"values.hrirPath"]) {
|
||||||
|
// Reset the converter, without rebuffering
|
||||||
|
if (outputFormat.mChannelsPerFrame == 2 &&
|
||||||
|
inputFormat.mChannelsPerFrame >= 1 &&
|
||||||
|
inputFormat.mChannelsPerFrame <= 8) {
|
||||||
|
[self inputFormatDidChange:inputFormat];
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static float db_to_scale(float db)
|
static float db_to_scale(float db)
|
||||||
|
@ -929,8 +1141,6 @@ static float db_to_scale(float db)
|
||||||
inputFormat = inf;
|
inputFormat = inf;
|
||||||
outputFormat = outf;
|
outputFormat = outf;
|
||||||
|
|
||||||
nodeFormat = outputFormat;
|
|
||||||
|
|
||||||
rememberedLossless = lossless;
|
rememberedLossless = lossless;
|
||||||
|
|
||||||
// These are the only sample formats we support translating
|
// These are the only sample formats we support translating
|
||||||
|
@ -982,6 +1192,33 @@ static float db_to_scale(float db)
|
||||||
dmFloatFormat.mBytesPerFrame = (32/8)*dmFloatFormat.mChannelsPerFrame;
|
dmFloatFormat.mBytesPerFrame = (32/8)*dmFloatFormat.mChannelsPerFrame;
|
||||||
dmFloatFormat.mBytesPerPacket = dmFloatFormat.mBytesPerFrame * floatFormat.mFramesPerPacket;
|
dmFloatFormat.mBytesPerPacket = dmFloatFormat.mBytesPerFrame * floatFormat.mFramesPerPacket;
|
||||||
|
|
||||||
|
BOOL hVirt = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] boolForKey:@"headphoneVirtualization"];
|
||||||
|
|
||||||
|
if (hVirt &&
|
||||||
|
outputFormat.mChannelsPerFrame == 2 &&
|
||||||
|
inputFormat.mChannelsPerFrame >= 1 &&
|
||||||
|
inputFormat.mChannelsPerFrame <= 8) {
|
||||||
|
NSString * userPreset = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] stringForKey:@"hrirPath"];
|
||||||
|
|
||||||
|
NSURL * presetUrl = nil;
|
||||||
|
|
||||||
|
if (userPreset && ![userPreset isEqualToString:@""]) {
|
||||||
|
presetUrl = [NSURL fileURLWithPath:userPreset];
|
||||||
|
if (![HeadphoneFilter validateImpulseFile:presetUrl])
|
||||||
|
presetUrl = nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!presetUrl) {
|
||||||
|
presetUrl = [[NSBundle mainBundle] URLForResource:@"gsx" withExtension:@"wv"];
|
||||||
|
if (![HeadphoneFilter validateImpulseFile:presetUrl])
|
||||||
|
presetUrl = nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (presetUrl) {
|
||||||
|
hFilter = [[HeadphoneFilter alloc] initWithImpulseFile:presetUrl forSampleRate:outputFormat.mSampleRate withInputChannels:inputFormat.mChannelsPerFrame];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
skipResampler = outputFormat.mSampleRate == floatFormat.mSampleRate;
|
skipResampler = outputFormat.mSampleRate == floatFormat.mSampleRate;
|
||||||
|
|
||||||
sampleRatio = (double)outputFormat.mSampleRate / (double)floatFormat.mSampleRate;
|
sampleRatio = (double)outputFormat.mSampleRate / (double)floatFormat.mSampleRate;
|
||||||
|
@ -1034,6 +1271,8 @@ static float db_to_scale(float db)
|
||||||
DLog(@"Decoder dealloc");
|
DLog(@"Decoder dealloc");
|
||||||
|
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.volumeScaling"];
|
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.volumeScaling"];
|
||||||
|
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.headphoneVirtualization"];
|
||||||
|
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.hrirPath"];
|
||||||
|
|
||||||
paused = NO;
|
paused = NO;
|
||||||
[self cleanUp];
|
[self cleanUp];
|
||||||
|
@ -1098,6 +1337,9 @@ static float db_to_scale(float db)
|
||||||
{
|
{
|
||||||
usleep(500);
|
usleep(500);
|
||||||
}
|
}
|
||||||
|
if (hFilter) {
|
||||||
|
hFilter = nil;
|
||||||
|
}
|
||||||
if (hdcd_decoder)
|
if (hdcd_decoder)
|
||||||
{
|
{
|
||||||
free(hdcd_decoder);
|
free(hdcd_decoder);
|
||||||
|
|
|
@ -1,26 +0,0 @@
|
||||||
//
|
|
||||||
// Downmix.h
|
|
||||||
// Cog
|
|
||||||
//
|
|
||||||
// Created by Christopher Snowhill on 2/05/22.
|
|
||||||
// Copyright 2022 __LoSnoCo__. All rights reserved.
|
|
||||||
//
|
|
||||||
|
|
||||||
#import <Foundation/Foundation.h>
|
|
||||||
#import <CoreAudio/CoreAudio.h>
|
|
||||||
|
|
||||||
#import "HeadphoneFilter.h"
|
|
||||||
|
|
||||||
@interface DownmixProcessor : NSObject {
|
|
||||||
HeadphoneFilter *hFilter;
|
|
||||||
|
|
||||||
AudioStreamBasicDescription inputFormat;
|
|
||||||
AudioStreamBasicDescription outputFormat;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (id) initWithInputFormat:(AudioStreamBasicDescription)inf andOutputFormat:(AudioStreamBasicDescription)outf;
|
|
||||||
|
|
||||||
- (void) process:(const void*)inBuffer frameCount:(size_t)frames output:(void *)outBuffer;
|
|
||||||
|
|
||||||
@end
|
|
||||||
|
|
|
@ -1,302 +0,0 @@
|
||||||
//
|
|
||||||
// Downmix.m
|
|
||||||
// Cog
|
|
||||||
//
|
|
||||||
// Created by Christopher Snowhill on 2/05/22.
|
|
||||||
// Copyright 2022 __LoSnoCo__. All rights reserved.
|
|
||||||
//
|
|
||||||
|
|
||||||
#import "Downmix.h"
|
|
||||||
|
|
||||||
#import "Logging.h"
|
|
||||||
|
|
||||||
static const float STEREO_DOWNMIX[8-2][8][2]={
|
|
||||||
/*3.0*/
|
|
||||||
{
|
|
||||||
{0.5858F,0.0F},{0.0F,0.5858F},{0.4142F,0.4142F}
|
|
||||||
},
|
|
||||||
/*quadrophonic*/
|
|
||||||
{
|
|
||||||
{0.4226F,0.0F},{0.0F,0.4226F},{0.366F,0.2114F},{0.2114F,0.336F}
|
|
||||||
},
|
|
||||||
/*5.0*/
|
|
||||||
{
|
|
||||||
{0.651F,0.0F},{0.0F,0.651F},{0.46F,0.46F},{0.5636F,0.3254F},
|
|
||||||
{0.3254F,0.5636F}
|
|
||||||
},
|
|
||||||
/*5.1*/
|
|
||||||
{
|
|
||||||
{0.529F,0.0F},{0.0F,0.529F},{0.3741F,0.3741F},{0.3741F,0.3741F},{0.4582F,0.2645F},
|
|
||||||
{0.2645F,0.4582F}
|
|
||||||
},
|
|
||||||
/*6.1*/
|
|
||||||
{
|
|
||||||
{0.4553F,0.0F},{0.0F,0.4553F},{0.322F,0.322F},{0.322F,0.322F},{0.2788F,0.2788F},
|
|
||||||
{0.3943F,0.2277F},{0.2277F,0.3943F}
|
|
||||||
},
|
|
||||||
/*7.1*/
|
|
||||||
{
|
|
||||||
{0.3886F,0.0F},{0.0F,0.3886F},{0.2748F,0.2748F},{0.2748F,0.2748F},{0.3366F,0.1943F},
|
|
||||||
{0.1943F,0.3366F},{0.3366F,0.1943F},{0.1943F,0.3366F}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
static void downmix_to_stereo(const float * inBuffer, int channels, float * outBuffer, size_t count)
|
|
||||||
{
|
|
||||||
if (channels >= 3 && channels <= 8)
|
|
||||||
for (size_t i = 0; i < count; ++i)
|
|
||||||
{
|
|
||||||
float left = 0, right = 0;
|
|
||||||
for (int j = 0; j < channels; ++j)
|
|
||||||
{
|
|
||||||
left += inBuffer[i * channels + j] * STEREO_DOWNMIX[channels - 3][j][0];
|
|
||||||
right += inBuffer[i * channels + j] * STEREO_DOWNMIX[channels - 3][j][1];
|
|
||||||
}
|
|
||||||
outBuffer[i * 2 + 0] = left;
|
|
||||||
outBuffer[i * 2 + 1] = right;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void downmix_to_mono(const float * inBuffer, int channels, float * outBuffer, size_t count)
|
|
||||||
{
|
|
||||||
float tempBuffer[count * 2];
|
|
||||||
if (channels >= 3 && channels <= 8)
|
|
||||||
{
|
|
||||||
downmix_to_stereo(inBuffer, channels, tempBuffer, count);
|
|
||||||
inBuffer = tempBuffer;
|
|
||||||
channels = 2;
|
|
||||||
}
|
|
||||||
float invchannels = 1.0 / (float)channels;
|
|
||||||
for (size_t i = 0; i < count; ++i)
|
|
||||||
{
|
|
||||||
float sample = 0;
|
|
||||||
for (int j = 0; j < channels; ++j)
|
|
||||||
{
|
|
||||||
sample += inBuffer[i * channels + j];
|
|
||||||
}
|
|
||||||
outBuffer[i] = sample * invchannels;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void upmix(const float * inBuffer, int inchannels, float * outBuffer, int outchannels, size_t count)
|
|
||||||
{
|
|
||||||
for (ssize_t i = 0; i < count; ++i)
|
|
||||||
{
|
|
||||||
if (inchannels == 1 && outchannels == 2)
|
|
||||||
{
|
|
||||||
// upmix mono to stereo
|
|
||||||
float sample = inBuffer[i];
|
|
||||||
outBuffer[i * 2 + 0] = sample;
|
|
||||||
outBuffer[i * 2 + 1] = sample;
|
|
||||||
}
|
|
||||||
else if (inchannels == 1 && outchannels == 4)
|
|
||||||
{
|
|
||||||
// upmix mono to quad
|
|
||||||
float sample = inBuffer[i];
|
|
||||||
outBuffer[i * 4 + 0] = sample;
|
|
||||||
outBuffer[i * 4 + 1] = sample;
|
|
||||||
outBuffer[i * 4 + 2] = 0;
|
|
||||||
outBuffer[i * 4 + 3] = 0;
|
|
||||||
}
|
|
||||||
else if (inchannels == 1 && (outchannels == 3 || outchannels >= 5))
|
|
||||||
{
|
|
||||||
// upmix mono to center channel
|
|
||||||
float sample = inBuffer[i];
|
|
||||||
outBuffer[i * outchannels + 2] = sample;
|
|
||||||
for (int j = 0; j < 2; ++j)
|
|
||||||
{
|
|
||||||
outBuffer[i * outchannels + j] = 0;
|
|
||||||
}
|
|
||||||
for (int j = 3; j < outchannels; ++j)
|
|
||||||
{
|
|
||||||
outBuffer[i * outchannels + j] = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else if (inchannels == 4 && outchannels >= 5)
|
|
||||||
{
|
|
||||||
float fl = inBuffer[i * 4 + 0];
|
|
||||||
float fr = inBuffer[i * 4 + 1];
|
|
||||||
float bl = inBuffer[i * 4 + 2];
|
|
||||||
float br = inBuffer[i * 4 + 3];
|
|
||||||
const int skipclfe = (outchannels == 5) ? 1 : 2;
|
|
||||||
outBuffer[i * outchannels + 0] = fl;
|
|
||||||
outBuffer[i * outchannels + 1] = fr;
|
|
||||||
outBuffer[i * outchannels + skipclfe + 2] = bl;
|
|
||||||
outBuffer[i * outchannels + skipclfe + 3] = br;
|
|
||||||
for (int j = 0; j < skipclfe; ++j)
|
|
||||||
{
|
|
||||||
outBuffer[i * outchannels + 2 + j] = 0;
|
|
||||||
}
|
|
||||||
for (int j = 4 + skipclfe; j < outchannels; ++j)
|
|
||||||
{
|
|
||||||
outBuffer[i * outchannels + j] = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else if (inchannels == 5 && outchannels >= 6)
|
|
||||||
{
|
|
||||||
float fl = inBuffer[i * 5 + 0];
|
|
||||||
float fr = inBuffer[i * 5 + 1];
|
|
||||||
float c = inBuffer[i * 5 + 2];
|
|
||||||
float bl = inBuffer[i * 5 + 3];
|
|
||||||
float br = inBuffer[i * 5 + 4];
|
|
||||||
outBuffer[i * outchannels + 0] = fl;
|
|
||||||
outBuffer[i * outchannels + 1] = fr;
|
|
||||||
outBuffer[i * outchannels + 2] = c;
|
|
||||||
outBuffer[i * outchannels + 3] = 0;
|
|
||||||
outBuffer[i * outchannels + 4] = bl;
|
|
||||||
outBuffer[i * outchannels + 5] = br;
|
|
||||||
for (int j = 6; j < outchannels; ++j)
|
|
||||||
{
|
|
||||||
outBuffer[i * outchannels + j] = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else if (inchannels == 7 && outchannels == 8)
|
|
||||||
{
|
|
||||||
float fl = inBuffer[i * 7 + 0];
|
|
||||||
float fr = inBuffer[i * 7 + 1];
|
|
||||||
float c = inBuffer[i * 7 + 2];
|
|
||||||
float lfe = inBuffer[i * 7 + 3];
|
|
||||||
float sl = inBuffer[i * 7 + 4];
|
|
||||||
float sr = inBuffer[i * 7 + 5];
|
|
||||||
float bc = inBuffer[i * 7 + 6];
|
|
||||||
outBuffer[i * 8 + 0] = fl;
|
|
||||||
outBuffer[i * 8 + 1] = fr;
|
|
||||||
outBuffer[i * 8 + 2] = c;
|
|
||||||
outBuffer[i * 8 + 3] = lfe;
|
|
||||||
outBuffer[i * 8 + 4] = bc;
|
|
||||||
outBuffer[i * 8 + 5] = bc;
|
|
||||||
outBuffer[i * 8 + 6] = sl;
|
|
||||||
outBuffer[i * 8 + 7] = sr;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
// upmix N channels to N channels plus silence the empty channels
|
|
||||||
float samples[inchannels];
|
|
||||||
for (int j = 0; j < inchannels; ++j)
|
|
||||||
{
|
|
||||||
samples[j] = inBuffer[i * inchannels + j];
|
|
||||||
}
|
|
||||||
for (int j = 0; j < inchannels; ++j)
|
|
||||||
{
|
|
||||||
outBuffer[i * outchannels + j] = samples[j];
|
|
||||||
}
|
|
||||||
for (int j = inchannels; j < outchannels; ++j)
|
|
||||||
{
|
|
||||||
outBuffer[i * outchannels + j] = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@implementation DownmixProcessor
|
|
||||||
|
|
||||||
- (id) initWithInputFormat:(AudioStreamBasicDescription)inf andOutputFormat:(AudioStreamBasicDescription)outf {
|
|
||||||
self = [super init];
|
|
||||||
|
|
||||||
if (self) {
|
|
||||||
if (inf.mFormatID != kAudioFormatLinearPCM ||
|
|
||||||
(inf.mFormatFlags & kAudioFormatFlagsNativeFloatPacked) != kAudioFormatFlagsNativeFloatPacked ||
|
|
||||||
inf.mBitsPerChannel != 32 ||
|
|
||||||
inf.mBytesPerFrame != (4 * inf.mChannelsPerFrame) ||
|
|
||||||
inf.mBytesPerPacket != inf.mFramesPerPacket * inf.mBytesPerFrame)
|
|
||||||
return nil;
|
|
||||||
|
|
||||||
if (outf.mFormatID != kAudioFormatLinearPCM ||
|
|
||||||
(outf.mFormatFlags & kAudioFormatFlagsNativeFloatPacked) != kAudioFormatFlagsNativeFloatPacked ||
|
|
||||||
outf.mBitsPerChannel != 32 ||
|
|
||||||
outf.mBytesPerFrame != (4 * outf.mChannelsPerFrame) ||
|
|
||||||
outf.mBytesPerPacket != outf.mFramesPerPacket * outf.mBytesPerFrame)
|
|
||||||
return nil;
|
|
||||||
|
|
||||||
inputFormat = inf;
|
|
||||||
outputFormat = outf;
|
|
||||||
|
|
||||||
[self setupVirt];
|
|
||||||
|
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.headphoneVirtualization" options:0 context:nil];
|
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] addObserver:self forKeyPath:@"values.hrirPath" options:0 context:nil];
|
|
||||||
}
|
|
||||||
|
|
||||||
return self;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void) dealloc {
|
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.headphoneVirtualization"];
|
|
||||||
[[NSUserDefaultsController sharedUserDefaultsController] removeObserver:self forKeyPath:@"values.hrirPath"];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void) setupVirt {
|
|
||||||
@synchronized(hFilter) {
|
|
||||||
hFilter = nil;
|
|
||||||
}
|
|
||||||
|
|
||||||
BOOL hVirt = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] boolForKey:@"headphoneVirtualization"];
|
|
||||||
|
|
||||||
if (hVirt &&
|
|
||||||
outputFormat.mChannelsPerFrame == 2 &&
|
|
||||||
inputFormat.mChannelsPerFrame >= 1 &&
|
|
||||||
inputFormat.mChannelsPerFrame <= 8) {
|
|
||||||
NSString * userPreset = [[[NSUserDefaultsController sharedUserDefaultsController] defaults] stringForKey:@"hrirPath"];
|
|
||||||
|
|
||||||
NSURL * presetUrl = nil;
|
|
||||||
|
|
||||||
if (userPreset && ![userPreset isEqualToString:@""]) {
|
|
||||||
presetUrl = [NSURL fileURLWithPath:userPreset];
|
|
||||||
if (![HeadphoneFilter validateImpulseFile:presetUrl])
|
|
||||||
presetUrl = nil;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!presetUrl) {
|
|
||||||
presetUrl = [[NSBundle mainBundle] URLForResource:@"gsx" withExtension:@"wv"];
|
|
||||||
if (![HeadphoneFilter validateImpulseFile:presetUrl])
|
|
||||||
presetUrl = nil;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (presetUrl) {
|
|
||||||
@synchronized(hFilter) {
|
|
||||||
hFilter = [[HeadphoneFilter alloc] initWithImpulseFile:presetUrl forSampleRate:outputFormat.mSampleRate withInputChannels:inputFormat.mChannelsPerFrame];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)observeValueForKeyPath:(NSString *)keyPath
|
|
||||||
ofObject:(id)object
|
|
||||||
change:(NSDictionary *)change
|
|
||||||
context:(void *)context
|
|
||||||
{
|
|
||||||
DLog(@"SOMETHING CHANGED!");
|
|
||||||
if ([keyPath isEqualToString:@"values.headphoneVirtualization"] ||
|
|
||||||
[keyPath isEqualToString:@"values.hrirPath"]) {
|
|
||||||
// Reset the converter, without rebuffering
|
|
||||||
[self setupVirt];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void) process:(const void *)inBuffer frameCount:(size_t)frames output:(void *)outBuffer {
|
|
||||||
@synchronized (hFilter) {
|
|
||||||
if ( hFilter ) {
|
|
||||||
[hFilter process:(const float *) inBuffer sampleCount:frames toBuffer:(float *) outBuffer];
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( inputFormat.mChannelsPerFrame > 2 && outputFormat.mChannelsPerFrame == 2 )
|
|
||||||
{
|
|
||||||
downmix_to_stereo( (const float*) inBuffer, inputFormat.mChannelsPerFrame, (float*) outBuffer, frames );
|
|
||||||
}
|
|
||||||
else if ( inputFormat.mChannelsPerFrame > 1 && outputFormat.mChannelsPerFrame == 1 )
|
|
||||||
{
|
|
||||||
downmix_to_mono( (const float*) inBuffer, inputFormat.mChannelsPerFrame, (float*) outBuffer, frames );
|
|
||||||
}
|
|
||||||
else if ( inputFormat.mChannelsPerFrame < outputFormat.mChannelsPerFrame )
|
|
||||||
{
|
|
||||||
upmix( (const float*) inBuffer, inputFormat.mChannelsPerFrame, (float*) outBuffer, outputFormat.mChannelsPerFrame, frames );
|
|
||||||
}
|
|
||||||
else if ( inputFormat.mChannelsPerFrame == outputFormat.mChannelsPerFrame )
|
|
||||||
{
|
|
||||||
memcpy(outBuffer, inBuffer, frames * outputFormat.mBytesPerPacket);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@end
|
|
|
@ -51,8 +51,6 @@
|
||||||
|
|
||||||
bytesPerFrame = ((bitsPerSample + 7) / 8) * channels;
|
bytesPerFrame = ((bitsPerSample + 7) / 8) * channels;
|
||||||
|
|
||||||
nodeFormat = propertiesToASBD(properties);
|
|
||||||
|
|
||||||
shouldContinue = YES;
|
shouldContinue = YES;
|
||||||
shouldSeek = NO;
|
shouldSeek = NO;
|
||||||
|
|
||||||
|
@ -70,8 +68,6 @@
|
||||||
|
|
||||||
bytesPerFrame = ((bitsPerSample + 7) / 8) * channels;
|
bytesPerFrame = ((bitsPerSample + 7) / 8) * channels;
|
||||||
|
|
||||||
nodeFormat = propertiesToASBD(properties);
|
|
||||||
|
|
||||||
[self registerObservers];
|
[self registerObservers];
|
||||||
|
|
||||||
shouldContinue = YES;
|
shouldContinue = YES;
|
||||||
|
|
|
@ -7,7 +7,6 @@
|
||||||
//
|
//
|
||||||
|
|
||||||
#import <Cocoa/Cocoa.h>
|
#import <Cocoa/Cocoa.h>
|
||||||
#import <CoreAudio/CoreAudio.h>
|
|
||||||
#import "VirtualRingBuffer.h"
|
#import "VirtualRingBuffer.h"
|
||||||
#import "Semaphore.h"
|
#import "Semaphore.h"
|
||||||
|
|
||||||
|
@ -26,12 +25,7 @@
|
||||||
BOOL shouldContinue;
|
BOOL shouldContinue;
|
||||||
BOOL endOfStream; //All data is now in buffer
|
BOOL endOfStream; //All data is now in buffer
|
||||||
BOOL initialBufferFilled;
|
BOOL initialBufferFilled;
|
||||||
|
|
||||||
AudioStreamBasicDescription nodeFormat;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@property (readonly) AudioStreamBasicDescription nodeFormat;
|
|
||||||
|
|
||||||
- (id)initWithController:(id)c previous:(id)p;
|
- (id)initWithController:(id)c previous:(id)p;
|
||||||
|
|
||||||
- (int)writeData:(void *)ptr amount:(int)a;
|
- (int)writeData:(void *)ptr amount:(int)a;
|
||||||
|
|
|
@ -13,8 +13,6 @@
|
||||||
|
|
||||||
@implementation Node
|
@implementation Node
|
||||||
|
|
||||||
@synthesize nodeFormat;
|
|
||||||
|
|
||||||
- (id)initWithController:(id)c previous:(id)p
|
- (id)initWithController:(id)c previous:(id)p
|
||||||
{
|
{
|
||||||
self = [super init];
|
self = [super init];
|
||||||
|
@ -43,8 +41,7 @@
|
||||||
|
|
||||||
while (shouldContinue == YES && amountLeft > 0)
|
while (shouldContinue == YES && amountLeft > 0)
|
||||||
{
|
{
|
||||||
BOOL wrapped;
|
availOutput = [buffer lengthAvailableToWriteReturningPointer:&writePtr];
|
||||||
availOutput = [buffer lengthAvailableToWriteReturningPointer:&writePtr bufferWrapped:&wrapped];
|
|
||||||
if (availOutput == 0) {
|
if (availOutput == 0) {
|
||||||
if (initialBufferFilled == NO) {
|
if (initialBufferFilled == NO) {
|
||||||
initialBufferFilled = YES;
|
initialBufferFilled = YES;
|
||||||
|
|
|
@ -24,9 +24,6 @@
|
||||||
|
|
||||||
BOOL paused;
|
BOOL paused;
|
||||||
BOOL started;
|
BOOL started;
|
||||||
|
|
||||||
BOOL formatSetup;
|
|
||||||
BOOL formatChanged;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)beginEqualizer:(AudioUnit)eq;
|
- (void)beginEqualizer:(AudioUnit)eq;
|
||||||
|
@ -65,6 +62,4 @@
|
||||||
|
|
||||||
- (void)sustainHDCD;
|
- (void)sustainHDCD;
|
||||||
|
|
||||||
- (BOOL)formatChanged;
|
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
|
@ -23,9 +23,6 @@
|
||||||
paused = YES;
|
paused = YES;
|
||||||
started = NO;
|
started = NO;
|
||||||
|
|
||||||
formatSetup = NO;
|
|
||||||
formatChanged = NO;
|
|
||||||
|
|
||||||
output = [[OutputCoreAudio alloc] initWithController:self];
|
output = [[OutputCoreAudio alloc] initWithController:self];
|
||||||
|
|
||||||
[output setup];
|
[output setup];
|
||||||
|
@ -131,14 +128,6 @@
|
||||||
if (oldSampleRatio)
|
if (oldSampleRatio)
|
||||||
amountPlayed += oldSampleRatio * [[converter buffer] bufferedLength];
|
amountPlayed += oldSampleRatio * [[converter buffer] bufferedLength];
|
||||||
#endif
|
#endif
|
||||||
AudioStreamBasicDescription inf = [bufferChain inputFormat];
|
|
||||||
|
|
||||||
format.mChannelsPerFrame = inf.mChannelsPerFrame;
|
|
||||||
format.mBytesPerFrame = ((format.mBitsPerChannel + 7) / 8) * format.mChannelsPerFrame;
|
|
||||||
format.mBytesPerPacket = format.mBytesPerFrame * format.mFramesPerPacket;
|
|
||||||
|
|
||||||
sampleRatio = 1.0 / (format.mSampleRate * format.mBytesPerPacket);
|
|
||||||
|
|
||||||
[converter setOutputFormat:format];
|
[converter setOutputFormat:format];
|
||||||
[converter inputFormatDidChange:[bufferChain inputFormat]];
|
[converter inputFormatDidChange:[bufferChain inputFormat]];
|
||||||
}
|
}
|
||||||
|
@ -189,21 +178,4 @@
|
||||||
[output sustainHDCD];
|
[output sustainHDCD];
|
||||||
}
|
}
|
||||||
|
|
||||||
- (BOOL)formatChanged
|
|
||||||
{
|
|
||||||
[self setPreviousNode:[[controller bufferChain] finalNode]];
|
|
||||||
|
|
||||||
AudioStreamBasicDescription inf = [[self previousNode] nodeFormat];
|
|
||||||
|
|
||||||
if (!formatSetup || memcmp(&nodeFormat, &inf, sizeof(nodeFormat)) != 0) {
|
|
||||||
nodeFormat = inf;
|
|
||||||
formatSetup = YES;
|
|
||||||
formatChanged = YES;
|
|
||||||
}
|
|
||||||
|
|
||||||
BOOL copyFormatChanged = formatChanged;
|
|
||||||
formatChanged = NO;
|
|
||||||
return copyFormatChanged;
|
|
||||||
}
|
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
|
@ -59,8 +59,6 @@
|
||||||
83725A8E27AA0DE60003F694 /* libsoxr.0.dylib in CopyFiles */ = {isa = PBXBuildFile; fileRef = 83725A8A27AA0DBF0003F694 /* libsoxr.0.dylib */; settings = {ATTRIBUTES = (CodeSignOnCopy, ); }; };
|
83725A8E27AA0DE60003F694 /* libsoxr.0.dylib in CopyFiles */ = {isa = PBXBuildFile; fileRef = 83725A8A27AA0DBF0003F694 /* libsoxr.0.dylib */; settings = {ATTRIBUTES = (CodeSignOnCopy, ); }; };
|
||||||
83725A9027AA16C90003F694 /* Accelerate.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 83725A7B27AA0D8A0003F694 /* Accelerate.framework */; };
|
83725A9027AA16C90003F694 /* Accelerate.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 83725A7B27AA0D8A0003F694 /* Accelerate.framework */; };
|
||||||
83725A9127AA16D50003F694 /* AVFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 83725A7C27AA0D8E0003F694 /* AVFoundation.framework */; };
|
83725A9127AA16D50003F694 /* AVFoundation.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 83725A7C27AA0D8E0003F694 /* AVFoundation.framework */; };
|
||||||
8380F2D927AE6053009183C1 /* Downmix.m in Sources */ = {isa = PBXBuildFile; fileRef = 8380F2D727AE6053009183C1 /* Downmix.m */; };
|
|
||||||
8380F2DA27AE6053009183C1 /* Downmix.h in Headers */ = {isa = PBXBuildFile; fileRef = 8380F2D827AE6053009183C1 /* Downmix.h */; };
|
|
||||||
8384912718080FF100E7332D /* Logging.h in Headers */ = {isa = PBXBuildFile; fileRef = 8384912618080FF100E7332D /* Logging.h */; };
|
8384912718080FF100E7332D /* Logging.h in Headers */ = {isa = PBXBuildFile; fileRef = 8384912618080FF100E7332D /* Logging.h */; };
|
||||||
839366671815923C006DD712 /* CogPluginMulti.h in Headers */ = {isa = PBXBuildFile; fileRef = 839366651815923C006DD712 /* CogPluginMulti.h */; };
|
839366671815923C006DD712 /* CogPluginMulti.h in Headers */ = {isa = PBXBuildFile; fileRef = 839366651815923C006DD712 /* CogPluginMulti.h */; };
|
||||||
839366681815923C006DD712 /* CogPluginMulti.m in Sources */ = {isa = PBXBuildFile; fileRef = 839366661815923C006DD712 /* CogPluginMulti.m */; };
|
839366681815923C006DD712 /* CogPluginMulti.m in Sources */ = {isa = PBXBuildFile; fileRef = 839366661815923C006DD712 /* CogPluginMulti.m */; };
|
||||||
|
@ -153,8 +151,6 @@
|
||||||
83725A7C27AA0D8E0003F694 /* AVFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AVFoundation.framework; path = System/Library/Frameworks/AVFoundation.framework; sourceTree = SDKROOT; };
|
83725A7C27AA0D8E0003F694 /* AVFoundation.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = AVFoundation.framework; path = System/Library/Frameworks/AVFoundation.framework; sourceTree = SDKROOT; };
|
||||||
83725A8827AA0DBF0003F694 /* soxr.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = soxr.h; sourceTree = "<group>"; };
|
83725A8827AA0DBF0003F694 /* soxr.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = soxr.h; sourceTree = "<group>"; };
|
||||||
83725A8A27AA0DBF0003F694 /* libsoxr.0.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; path = libsoxr.0.dylib; sourceTree = "<group>"; };
|
83725A8A27AA0DBF0003F694 /* libsoxr.0.dylib */ = {isa = PBXFileReference; lastKnownFileType = "compiled.mach-o.dylib"; path = libsoxr.0.dylib; sourceTree = "<group>"; };
|
||||||
8380F2D727AE6053009183C1 /* Downmix.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = Downmix.m; sourceTree = "<group>"; };
|
|
||||||
8380F2D827AE6053009183C1 /* Downmix.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Downmix.h; sourceTree = "<group>"; };
|
|
||||||
8384912618080FF100E7332D /* Logging.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Logging.h; path = ../../Utils/Logging.h; sourceTree = "<group>"; };
|
8384912618080FF100E7332D /* Logging.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Logging.h; path = ../../Utils/Logging.h; sourceTree = "<group>"; };
|
||||||
839366651815923C006DD712 /* CogPluginMulti.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CogPluginMulti.h; sourceTree = "<group>"; };
|
839366651815923C006DD712 /* CogPluginMulti.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CogPluginMulti.h; sourceTree = "<group>"; };
|
||||||
839366661815923C006DD712 /* CogPluginMulti.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = CogPluginMulti.m; sourceTree = "<group>"; };
|
839366661815923C006DD712 /* CogPluginMulti.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = CogPluginMulti.m; sourceTree = "<group>"; };
|
||||||
|
@ -288,8 +284,6 @@
|
||||||
17D21C750B8BE4BA00D1EBDE /* Chain */ = {
|
17D21C750B8BE4BA00D1EBDE /* Chain */ = {
|
||||||
isa = PBXGroup;
|
isa = PBXGroup;
|
||||||
children = (
|
children = (
|
||||||
8380F2D827AE6053009183C1 /* Downmix.h */,
|
|
||||||
8380F2D727AE6053009183C1 /* Downmix.m */,
|
|
||||||
83A44A00279119B50049B6E2 /* RefillNode.h */,
|
83A44A00279119B50049B6E2 /* RefillNode.h */,
|
||||||
83A449FF279119B50049B6E2 /* RefillNode.m */,
|
83A449FF279119B50049B6E2 /* RefillNode.m */,
|
||||||
17D21C760B8BE4BA00D1EBDE /* BufferChain.h */,
|
17D21C760B8BE4BA00D1EBDE /* BufferChain.h */,
|
||||||
|
@ -452,7 +446,6 @@
|
||||||
8347C7412796C58800FA8A7D /* NSFileHandle+CreateFile.h in Headers */,
|
8347C7412796C58800FA8A7D /* NSFileHandle+CreateFile.h in Headers */,
|
||||||
17C940230B900909008627D6 /* AudioMetadataReader.h in Headers */,
|
17C940230B900909008627D6 /* AudioMetadataReader.h in Headers */,
|
||||||
83725A8B27AA0DBF0003F694 /* soxr.h in Headers */,
|
83725A8B27AA0DBF0003F694 /* soxr.h in Headers */,
|
||||||
8380F2DA27AE6053009183C1 /* Downmix.h in Headers */,
|
|
||||||
17B619300B909BC300BC003F /* AudioPropertiesReader.h in Headers */,
|
17B619300B909BC300BC003F /* AudioPropertiesReader.h in Headers */,
|
||||||
835EDD7D279FE307001EDCCE /* HeadphoneFilter.h in Headers */,
|
835EDD7D279FE307001EDCCE /* HeadphoneFilter.h in Headers */,
|
||||||
839366671815923C006DD712 /* CogPluginMulti.h in Headers */,
|
839366671815923C006DD712 /* CogPluginMulti.h in Headers */,
|
||||||
|
@ -553,7 +546,6 @@
|
||||||
8347C7422796C58800FA8A7D /* NSFileHandle+CreateFile.m in Sources */,
|
8347C7422796C58800FA8A7D /* NSFileHandle+CreateFile.m in Sources */,
|
||||||
17D21DC80B8BE79700D1EBDE /* CoreAudioUtils.m in Sources */,
|
17D21DC80B8BE79700D1EBDE /* CoreAudioUtils.m in Sources */,
|
||||||
839366681815923C006DD712 /* CogPluginMulti.m in Sources */,
|
839366681815923C006DD712 /* CogPluginMulti.m in Sources */,
|
||||||
8380F2D927AE6053009183C1 /* Downmix.m in Sources */,
|
|
||||||
835C88AA2797D4D400E28EAE /* lpc.c in Sources */,
|
835C88AA2797D4D400E28EAE /* lpc.c in Sources */,
|
||||||
17D21EBE0B8BF44000D1EBDE /* AudioPlayer.m in Sources */,
|
17D21EBE0B8BF44000D1EBDE /* AudioPlayer.m in Sources */,
|
||||||
17F94DD60B8D0F7000A34E87 /* PluginController.m in Sources */,
|
17F94DD60B8D0F7000A34E87 /* PluginController.m in Sources */,
|
||||||
|
|
|
@ -19,8 +19,6 @@
|
||||||
|
|
||||||
#import "Semaphore.h"
|
#import "Semaphore.h"
|
||||||
|
|
||||||
#import "Downmix.h"
|
|
||||||
|
|
||||||
//#define OUTPUT_LOG
|
//#define OUTPUT_LOG
|
||||||
#ifdef OUTPUT_LOG
|
#ifdef OUTPUT_LOG
|
||||||
#import <stdio.h>
|
#import <stdio.h>
|
||||||
|
@ -44,9 +42,6 @@
|
||||||
|
|
||||||
BOOL eqEnabled;
|
BOOL eqEnabled;
|
||||||
|
|
||||||
BOOL streamFormatSetup;
|
|
||||||
|
|
||||||
atomic_long bytesBuffered;
|
|
||||||
atomic_long bytesRendered;
|
atomic_long bytesRendered;
|
||||||
atomic_long bytesHdcdSustained;
|
atomic_long bytesHdcdSustained;
|
||||||
|
|
||||||
|
@ -59,19 +54,12 @@
|
||||||
|
|
||||||
AudioDeviceID outputDeviceID;
|
AudioDeviceID outputDeviceID;
|
||||||
AudioStreamBasicDescription deviceFormat; // info about the default device
|
AudioStreamBasicDescription deviceFormat; // info about the default device
|
||||||
AudioStreamBasicDescription outerStreamFormat; // this is set when the buffer changes
|
|
||||||
AudioStreamBasicDescription streamFormat; // this is set when the output callback gets it
|
|
||||||
|
|
||||||
AUAudioUnit *_au;
|
AUAudioUnit *_au;
|
||||||
size_t _bufferSize;
|
size_t _bufferSize;
|
||||||
|
|
||||||
AudioUnit _eq;
|
AudioUnit _eq;
|
||||||
|
|
||||||
DownmixProcessor *downmixer;
|
|
||||||
|
|
||||||
void * savedBuffer;
|
|
||||||
size_t savedSize;
|
|
||||||
size_t savedMaxSize;
|
|
||||||
#ifdef OUTPUT_LOG
|
#ifdef OUTPUT_LOG
|
||||||
FILE *_logFile;
|
FILE *_logFile;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -83,132 +83,45 @@ static OSStatus renderCallback( void *inRefCon, AudioUnitRenderActionFlags *ioAc
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_self->savedSize) {
|
void * readPtr;
|
||||||
int readBytes = (int) _self->savedSize;
|
int toRead = [[_self->outputController buffer] lengthAvailableToReadReturningPointer:&readPtr];
|
||||||
|
|
||||||
const int streamchannels = _self->streamFormat.mChannelsPerFrame;
|
if (toRead > amountToRead)
|
||||||
const int streamBytesPerPacket = streamchannels * sizeof(float);
|
toRead = amountToRead;
|
||||||
|
|
||||||
int samplesToRead = readBytes / streamBytesPerPacket;
|
if (toRead) {
|
||||||
|
fillBuffers(ioData, (float*)readPtr, toRead / bytesPerPacket, 0);
|
||||||
if (samplesToRead > (amountToRead / bytesPerPacket))
|
amountRead = toRead;
|
||||||
samplesToRead = amountToRead / bytesPerPacket;
|
[[_self->outputController buffer] didReadLength:toRead];
|
||||||
|
[_self->outputController incrementAmountPlayed:amountRead];
|
||||||
readBytes = samplesToRead * streamBytesPerPacket;
|
atomic_fetch_add(&_self->bytesRendered, amountRead);
|
||||||
|
|
||||||
atomic_fetch_sub(&_self->bytesBuffered, readBytes);
|
|
||||||
|
|
||||||
float downmixBuffer[samplesToRead * channels];
|
|
||||||
[_self->downmixer process:_self->savedBuffer frameCount:samplesToRead output:downmixBuffer];
|
|
||||||
fillBuffers(ioData, downmixBuffer, samplesToRead, 0);
|
|
||||||
amountRead += samplesToRead * bytesPerPacket;
|
|
||||||
[_self->outputController incrementAmountPlayed:samplesToRead * bytesPerPacket];
|
|
||||||
atomic_fetch_add(&_self->bytesRendered, samplesToRead * bytesPerPacket);
|
|
||||||
[_self->writeSemaphore signal];
|
[_self->writeSemaphore signal];
|
||||||
|
|
||||||
if (_self->savedSize > readBytes) {
|
|
||||||
_self->savedSize -= readBytes;
|
|
||||||
memmove(_self->savedBuffer, _self->savedBuffer + readBytes, _self->savedSize);
|
|
||||||
}
|
|
||||||
else {
|
|
||||||
_self->savedSize = 0;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
[[_self->outputController buffer] didReadLength:0];
|
||||||
|
|
||||||
while (amountRead < amountToRead && [_self->outputController shouldContinue])
|
// Try repeatedly! Buffer wraps can cause a slight data shortage, as can
|
||||||
|
// unexpected track changes.
|
||||||
|
while ((amountRead < amountToRead) && [_self->outputController shouldContinue] == YES)
|
||||||
{
|
{
|
||||||
void * readPtr;
|
int amountRead2; //Use this since return type of readdata isnt known...may want to fix then can do a simple += to readdata
|
||||||
int toRead = 0;
|
amountRead2 = [[_self->outputController buffer] lengthAvailableToReadReturningPointer:&readPtr];
|
||||||
do {
|
if (amountRead2 > (amountToRead - amountRead))
|
||||||
toRead = [[_self->outputController buffer] lengthAvailableToReadReturningPointer:&readPtr];
|
amountRead2 = amountToRead - amountRead;
|
||||||
if (toRead && *((uint8_t*)readPtr) == 0xFF) {
|
if (amountRead2) {
|
||||||
size_t toSkip = 0;
|
atomic_fetch_add(&_self->bytesRendered, amountRead2);
|
||||||
while (toRead && *((uint8_t*)readPtr) == 0xFF) {
|
fillBuffers(ioData, (float*)readPtr, amountRead2 / bytesPerPacket, amountRead / bytesPerPacket);
|
||||||
toSkip++;
|
[[_self->outputController buffer] didReadLength:amountRead2];
|
||||||
readPtr++;
|
|
||||||
toRead--;
|
|
||||||
}
|
|
||||||
[[_self->outputController buffer] didReadLength:(int)toSkip];
|
|
||||||
toRead = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
while (!toRead);
|
|
||||||
|
|
||||||
int bytesRead = 0;
|
[_self->outputController incrementAmountPlayed:amountRead2];
|
||||||
|
|
||||||
int32_t chunkId = -1;
|
amountRead += amountRead2;
|
||||||
|
|
||||||
if (toRead >= 4) {
|
|
||||||
memcpy(&chunkId, readPtr, 4);
|
|
||||||
readPtr += 4;
|
|
||||||
toRead -= 4;
|
|
||||||
bytesRead += 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (chunkId == 1 && toRead >= sizeof(AudioStreamBasicDescription)) {
|
|
||||||
AudioStreamBasicDescription inf;
|
|
||||||
memcpy(&inf, readPtr, sizeof(inf));
|
|
||||||
readPtr += sizeof(inf);
|
|
||||||
toRead -= sizeof(inf);
|
|
||||||
bytesRead += sizeof(inf);
|
|
||||||
|
|
||||||
if (!_self->streamFormatSetup || memcmp(&inf, &_self->streamFormat, sizeof(inf)) != 0) {
|
|
||||||
_self->streamFormatSetup = YES;
|
|
||||||
_self->streamFormat = inf;
|
|
||||||
_self->downmixer = [[DownmixProcessor alloc] initWithInputFormat:inf andOutputFormat:_self->deviceFormat];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (toRead >= 4) {
|
|
||||||
memcpy(&chunkId, readPtr, 4);
|
|
||||||
readPtr += 4;
|
|
||||||
toRead -= 4;
|
|
||||||
bytesRead += 4;
|
|
||||||
}
|
|
||||||
else chunkId = -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
const int streamchannels = _self->streamFormat.mChannelsPerFrame;
|
|
||||||
const int streamBytesPerPacket = streamchannels * sizeof(float);
|
|
||||||
|
|
||||||
if (chunkId == 0 && toRead >= 4) {
|
|
||||||
memcpy(&chunkId, readPtr, 4);
|
|
||||||
readPtr += 4;
|
|
||||||
bytesRead += 4;
|
|
||||||
toRead = chunkId;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (toRead) {
|
|
||||||
size_t samplesToRead = toRead / streamBytesPerPacket;
|
|
||||||
size_t saveBytes = 0;
|
|
||||||
|
|
||||||
if (samplesToRead * bytesPerPacket > (amountToRead - amountRead)) {
|
|
||||||
size_t shortToRead = (amountToRead - amountRead) / bytesPerPacket;
|
|
||||||
saveBytes = (samplesToRead - shortToRead) * streamBytesPerPacket;
|
|
||||||
samplesToRead = shortToRead;
|
|
||||||
}
|
|
||||||
float downmixBuffer[samplesToRead * channels];
|
|
||||||
[_self->downmixer process:readPtr frameCount:samplesToRead output:downmixBuffer];
|
|
||||||
fillBuffers(ioData, downmixBuffer, samplesToRead, amountRead / bytesPerPacket);
|
|
||||||
amountRead += samplesToRead * bytesPerPacket;
|
|
||||||
bytesRead += toRead;
|
|
||||||
|
|
||||||
if (saveBytes) {
|
|
||||||
if (!_self->savedBuffer || _self->savedMaxSize < saveBytes) {
|
|
||||||
_self->savedBuffer = realloc(_self->savedBuffer, _self->savedMaxSize = saveBytes * 3);
|
|
||||||
}
|
|
||||||
_self->savedSize = saveBytes;
|
|
||||||
memcpy(_self->savedBuffer, readPtr + toRead - saveBytes, saveBytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
atomic_fetch_sub(&_self->bytesBuffered, toRead - saveBytes);
|
|
||||||
|
|
||||||
[[_self->outputController buffer] didReadLength:bytesRead];
|
|
||||||
[_self->outputController incrementAmountPlayed:samplesToRead * bytesPerPacket];
|
|
||||||
atomic_fetch_add(&_self->bytesRendered, samplesToRead * bytesPerPacket);
|
|
||||||
[_self->writeSemaphore signal];
|
[_self->writeSemaphore signal];
|
||||||
}
|
}
|
||||||
else
|
else {
|
||||||
[[_self->outputController buffer] didReadLength:bytesRead];
|
[[_self->outputController buffer] didReadLength:0];
|
||||||
|
[_self->readSemaphore timedWait:500];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
float volumeScale = 1.0;
|
float volumeScale = 1.0;
|
||||||
|
@ -225,6 +138,14 @@ static OSStatus renderCallback( void *inRefCon, AudioUnitRenderActionFlags *ioAc
|
||||||
|
|
||||||
scaleBuffersByVolume(ioData, _self->volume * volumeScale);
|
scaleBuffersByVolume(ioData, _self->volume * volumeScale);
|
||||||
|
|
||||||
|
if (amountRead < amountToRead)
|
||||||
|
{
|
||||||
|
// Either underrun, or no data at all. Caller output tends to just
|
||||||
|
// buffer loop if it doesn't get anything, so always produce a full
|
||||||
|
// buffer, and silence anything we couldn't supply.
|
||||||
|
clearBuffers(ioData, (amountToRead - amountRead) / bytesPerPacket, amountRead / bytesPerPacket);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -244,15 +165,6 @@ static OSStatus renderCallback( void *inRefCon, AudioUnitRenderActionFlags *ioAc
|
||||||
started = NO;
|
started = NO;
|
||||||
stopNext = NO;
|
stopNext = NO;
|
||||||
|
|
||||||
streamFormatSetup = NO;
|
|
||||||
|
|
||||||
downmixer = nil;
|
|
||||||
|
|
||||||
savedBuffer = NULL;
|
|
||||||
savedSize = 0;
|
|
||||||
savedMaxSize = 0;
|
|
||||||
|
|
||||||
atomic_init(&bytesBuffered, 0);
|
|
||||||
atomic_init(&bytesRendered, 0);
|
atomic_init(&bytesRendered, 0);
|
||||||
atomic_init(&bytesHdcdSustained, 0);
|
atomic_init(&bytesHdcdSustained, 0);
|
||||||
|
|
||||||
|
@ -312,7 +224,6 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
|
|
||||||
if ([outputController shouldReset]) {
|
if ([outputController shouldReset]) {
|
||||||
[[outputController buffer] empty];
|
[[outputController buffer] empty];
|
||||||
atomic_store(&bytesBuffered, 0);
|
|
||||||
[outputController setShouldReset:NO];
|
[outputController setShouldReset:NO];
|
||||||
[delayedEvents removeAllObjects];
|
[delayedEvents removeAllObjects];
|
||||||
delayedEventsPopped = YES;
|
delayedEventsPopped = YES;
|
||||||
|
@ -333,73 +244,13 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
break;
|
break;
|
||||||
|
|
||||||
void *writePtr;
|
void *writePtr;
|
||||||
BOOL wrapped = NO;
|
int toWrite = [[outputController buffer] lengthAvailableToWriteReturningPointer:&writePtr];
|
||||||
int toWrite = [[outputController buffer] lengthAvailableToWriteReturningPointer:&writePtr bufferWrapped:&wrapped];
|
|
||||||
int bytesWritten = 0;
|
|
||||||
if (toWrite >= 4 + sizeof(AudioStreamBasicDescription)) {
|
|
||||||
if ([outputController formatChanged]) {
|
|
||||||
int32_t chunkId = 1; // ASBD
|
|
||||||
memcpy(writePtr, &chunkId, 4);
|
|
||||||
|
|
||||||
writePtr += 4;
|
|
||||||
toWrite -= 4;
|
|
||||||
bytesWritten += 4;
|
|
||||||
|
|
||||||
AudioStreamBasicDescription inf = [outputController nodeFormat];
|
|
||||||
|
|
||||||
outerStreamFormat = inf;
|
|
||||||
|
|
||||||
memcpy(writePtr, &inf, sizeof(inf));
|
|
||||||
|
|
||||||
writePtr += sizeof(inf);
|
|
||||||
toWrite -= sizeof(inf);
|
|
||||||
bytesWritten += sizeof(inf);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
[[outputController buffer] didWriteLength:bytesWritten];
|
|
||||||
|
|
||||||
toWrite = [[outputController buffer] lengthAvailableToWriteReturningPointer:&writePtr bufferWrapped:&wrapped];
|
|
||||||
int bytesRead = 0;
|
int bytesRead = 0;
|
||||||
bytesWritten = 0;
|
if (toWrite > CHUNK_SIZE)
|
||||||
if (toWrite >= 4 + 4 + 512 * outerStreamFormat.mBytesPerPacket) {
|
toWrite = CHUNK_SIZE;
|
||||||
uint8_t buffer[512 * outerStreamFormat.mBytesPerPacket];
|
if (toWrite)
|
||||||
|
bytesRead = [outputController readData:writePtr amount:toWrite];
|
||||||
bytesRead = [outputController readData:buffer amount:(int)sizeof(buffer)];
|
[[outputController buffer] didWriteLength:bytesRead];
|
||||||
|
|
||||||
while (bytesRead < sizeof(buffer) && ![outputController endOfStream]) {
|
|
||||||
int bytesRead2 = [outputController readData:buffer + bytesRead amount:(int)(sizeof(buffer) - bytesRead)];
|
|
||||||
bytesRead += bytesRead2;
|
|
||||||
}
|
|
||||||
|
|
||||||
int32_t chunkId = 0; // audio data
|
|
||||||
memcpy(writePtr, &chunkId, 4);
|
|
||||||
writePtr += 4;
|
|
||||||
toWrite -= 4;
|
|
||||||
bytesWritten += 4;
|
|
||||||
|
|
||||||
chunkId = bytesRead;
|
|
||||||
memcpy(writePtr, &chunkId, 4);
|
|
||||||
writePtr += 4;
|
|
||||||
toWrite -= 4;
|
|
||||||
bytesWritten += 4;
|
|
||||||
|
|
||||||
memcpy(writePtr, buffer, bytesRead);
|
|
||||||
writePtr += bytesRead;
|
|
||||||
toWrite -= bytesRead;
|
|
||||||
bytesWritten += bytesRead;
|
|
||||||
|
|
||||||
atomic_fetch_add(&bytesBuffered, bytesRead);
|
|
||||||
|
|
||||||
[[outputController buffer] didWriteLength:bytesWritten];
|
|
||||||
}
|
|
||||||
else if (wrapped && toWrite > 0) {
|
|
||||||
memset(writePtr, 0xFF, toWrite);
|
|
||||||
[[outputController buffer] didWriteLength:toWrite];
|
|
||||||
}
|
|
||||||
else if (toWrite) {
|
|
||||||
[[outputController buffer] didWriteLength:0];
|
|
||||||
toWrite = 0;
|
|
||||||
}
|
|
||||||
if (bytesRead) {
|
if (bytesRead) {
|
||||||
[readSemaphore signal];
|
[readSemaphore signal];
|
||||||
continue;
|
continue;
|
||||||
|
@ -419,20 +270,20 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
// End of input possibly reached
|
// End of input possibly reached
|
||||||
if (delayedEventsPopped && [outputController endOfStream] == YES)
|
if (delayedEventsPopped && [outputController endOfStream] == YES)
|
||||||
{
|
{
|
||||||
long _bytesBuffered = atomic_load_explicit(&bytesBuffered, memory_order_relaxed) * deviceFormat.mBytesPerPacket / outerStreamFormat.mBytesPerPacket;
|
long bytesBuffered = [[outputController buffer] bufferedLength];
|
||||||
_bytesBuffered += atomic_load_explicit(&bytesRendered, memory_order_relaxed);
|
bytesBuffered += atomic_load_explicit(&bytesRendered, memory_order_relaxed);
|
||||||
if ([outputController chainQueueHasTracks])
|
if ([outputController chainQueueHasTracks])
|
||||||
{
|
{
|
||||||
if (_bytesBuffered < CHUNK_SIZE / 2)
|
if (bytesBuffered < CHUNK_SIZE / 2)
|
||||||
_bytesBuffered = 0;
|
bytesBuffered = 0;
|
||||||
else
|
else
|
||||||
_bytesBuffered -= CHUNK_SIZE / 2;
|
bytesBuffered -= CHUNK_SIZE / 2;
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
stopNext = YES;
|
stopNext = YES;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
[delayedEvents addObject:[NSNumber numberWithLong:_bytesBuffered]];
|
[delayedEvents addObject:[NSNumber numberWithLong:bytesBuffered]];
|
||||||
delayedEventsPopped = NO;
|
delayedEventsPopped = NO;
|
||||||
if (!started) {
|
if (!started) {
|
||||||
started = YES;
|
started = YES;
|
||||||
|
@ -626,9 +477,8 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
NSError *err;
|
NSError *err;
|
||||||
AVAudioFormat *renderFormat;
|
AVAudioFormat *renderFormat;
|
||||||
|
|
||||||
[outputController incrementAmountPlayed:atomic_load_explicit(&bytesBuffered, memory_order_relaxed)];
|
[outputController incrementAmountPlayed:[[outputController buffer] bufferedLength]];
|
||||||
[[outputController buffer] empty];
|
[[outputController buffer] empty];
|
||||||
atomic_store(&bytesBuffered, 0);
|
|
||||||
|
|
||||||
_deviceFormat = format;
|
_deviceFormat = format;
|
||||||
deviceFormat = *(format.streamDescription);
|
deviceFormat = *(format.streamDescription);
|
||||||
|
@ -713,12 +563,6 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
stopNext = NO;
|
stopNext = NO;
|
||||||
outputDeviceID = -1;
|
outputDeviceID = -1;
|
||||||
|
|
||||||
streamFormatSetup = NO;
|
|
||||||
|
|
||||||
savedBuffer = NULL;
|
|
||||||
savedSize = 0;
|
|
||||||
savedMaxSize = 0;
|
|
||||||
|
|
||||||
AudioComponentDescription desc;
|
AudioComponentDescription desc;
|
||||||
NSError *err;
|
NSError *err;
|
||||||
|
|
||||||
|
@ -825,8 +669,6 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
return 0;
|
return 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
[_au setMaximumFramesToRender:512];
|
|
||||||
|
|
||||||
UInt32 value;
|
UInt32 value;
|
||||||
UInt32 size = sizeof(value);
|
UInt32 size = sizeof(value);
|
||||||
|
|
||||||
|
@ -946,14 +788,6 @@ default_device_changed(AudioObjectID inObjectID, UInt32 inNumberAddresses, const
|
||||||
_logFile = NULL;
|
_logFile = NULL;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if (savedBuffer)
|
|
||||||
{
|
|
||||||
free(savedBuffer);
|
|
||||||
savedBuffer = NULL;
|
|
||||||
savedSize = 0;
|
|
||||||
savedMaxSize = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
outputController = nil;
|
outputController = nil;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -83,7 +83,7 @@
|
||||||
// Write operations:
|
// Write operations:
|
||||||
|
|
||||||
// The writing thread must call this method first.
|
// The writing thread must call this method first.
|
||||||
- (UInt32)lengthAvailableToWriteReturningPointer:(void **)returnedWritePointer bufferWrapped:(BOOL*)wrapped;
|
- (UInt32)lengthAvailableToWriteReturningPointer:(void **)returnedWritePointer;
|
||||||
// Iff a value > 0 is returned, the writing thread may then write that much data into the returned pointer.
|
// Iff a value > 0 is returned, the writing thread may then write that much data into the returned pointer.
|
||||||
// Afterwards, the writing thread must call didWriteLength:.
|
// Afterwards, the writing thread must call didWriteLength:.
|
||||||
- (void)didWriteLength:(UInt32)length;
|
- (void)didWriteLength:(UInt32)length;
|
||||||
|
|
|
@ -140,7 +140,7 @@ static void deallocateVirtualBuffer(void *buffer, UInt32 bufferLength);
|
||||||
// Write operations
|
// Write operations
|
||||||
//
|
//
|
||||||
|
|
||||||
- (UInt32)lengthAvailableToWriteReturningPointer:(void **)returnedWritePointer bufferWrapped:(BOOL *)wrapped
|
- (UInt32)lengthAvailableToWriteReturningPointer:(void **)returnedWritePointer
|
||||||
{
|
{
|
||||||
// Assumptions:
|
// Assumptions:
|
||||||
// returnedWritePointer != NULL
|
// returnedWritePointer != NULL
|
||||||
|
@ -152,10 +152,8 @@ static void deallocateVirtualBuffer(void *buffer, UInt32 bufferLength);
|
||||||
int localBufferFilled = atomic_load_explicit(&bufferFilled, memory_order_relaxed);
|
int localBufferFilled = atomic_load_explicit(&bufferFilled, memory_order_relaxed);
|
||||||
|
|
||||||
length = bufferLength - localBufferFilled;
|
length = bufferLength - localBufferFilled;
|
||||||
if (length >= bufferLength - localWritePointer) {
|
if (length > bufferLength - localWritePointer)
|
||||||
*wrapped = YES;
|
|
||||||
length = bufferLength - localWritePointer;
|
length = bufferLength - localWritePointer;
|
||||||
}
|
|
||||||
|
|
||||||
*returnedWritePointer = buffer + localWritePointer;
|
*returnedWritePointer = buffer + localWritePointer;
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue