Audio Processing: Unify sample block merging code
Sample block merging code should not be duplicated across the DSPs that require it, but instead should be a common function. Also added some optimizations to the Float32 converter function, to bypass conversion if the audio format needs no conversion. Signed-off-by: Christopher Snowhill <kode54@gmail.com>
This commit is contained in:
parent
2364a7d469
commit
a82742e689
8 changed files with 194 additions and 134 deletions
|
@ -197,7 +197,7 @@ static const uint32_t AudioChannelConfigTable[] = {
|
||||||
}
|
}
|
||||||
|
|
||||||
- (double)duration {
|
- (double)duration {
|
||||||
if(formatAssigned) {
|
if(formatAssigned && [chunkData length]) {
|
||||||
const size_t bytesPerPacket = format.mBytesPerPacket;
|
const size_t bytesPerPacket = format.mBytesPerPacket;
|
||||||
const double sampleRate = format.mSampleRate;
|
const double sampleRate = format.mSampleRate;
|
||||||
return (double)([chunkData length] / bytesPerPacket) / sampleRate;
|
return (double)([chunkData length] / bytesPerPacket) / sampleRate;
|
||||||
|
|
|
@ -73,6 +73,10 @@ NS_ASSUME_NONNULL_BEGIN
|
||||||
|
|
||||||
- (BOOL)peekTimestamp:(nonnull double *)timestamp timeRatio:(nonnull double *)timeRatio;
|
- (BOOL)peekTimestamp:(nonnull double *)timestamp timeRatio:(nonnull double *)timeRatio;
|
||||||
|
|
||||||
|
// Helpers
|
||||||
|
- (AudioChunk *)removeAndMergeSamples:(size_t)maxFrameCount;
|
||||||
|
- (AudioChunk *)removeAndMergeSamplesAsFloat32:(size_t)maxFrameCount;
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
||||||
NS_ASSUME_NONNULL_END
|
NS_ASSUME_NONNULL_END
|
||||||
|
|
|
@ -550,8 +550,79 @@ static void convert_be_to_le(uint8_t *buffer, size_t bitsPerSample, size_t bytes
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
- (AudioChunk *)removeAndMergeSamples:(size_t)maxFrameCount {
|
||||||
|
BOOL formatSet = NO;
|
||||||
|
AudioStreamBasicDescription currentFormat;
|
||||||
|
uint32_t currentChannelConfig = 0;
|
||||||
|
|
||||||
|
double streamTimestamp = 0.0;
|
||||||
|
double streamTimeRatio = 1.0;
|
||||||
|
if(![self peekTimestamp:&streamTimestamp timeRatio:&streamTimeRatio]) {
|
||||||
|
return [[AudioChunk alloc] init];
|
||||||
|
}
|
||||||
|
|
||||||
|
AudioChunk *chunk;
|
||||||
|
size_t totalFrameCount = 0;
|
||||||
|
AudioChunk *outputChunk = [[AudioChunk alloc] init];
|
||||||
|
|
||||||
|
[outputChunk setStreamTimestamp:streamTimestamp];
|
||||||
|
[outputChunk setStreamTimeRatio:streamTimeRatio];
|
||||||
|
|
||||||
|
while(totalFrameCount < maxFrameCount) {
|
||||||
|
AudioStreamBasicDescription newFormat;
|
||||||
|
uint32_t newChannelConfig;
|
||||||
|
if(![self peekFormat:&newFormat channelConfig:&newChannelConfig]) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if(formatSet &&
|
||||||
|
(memcmp(&newFormat, ¤tFormat, sizeof(newFormat)) != 0 ||
|
||||||
|
newChannelConfig != currentChannelConfig)) {
|
||||||
|
break;
|
||||||
|
} else if(!formatSet) {
|
||||||
|
[outputChunk setFormat:newFormat];
|
||||||
|
[outputChunk setChannelConfig:newChannelConfig];
|
||||||
|
currentFormat = newFormat;
|
||||||
|
currentChannelConfig = newChannelConfig;
|
||||||
|
formatSet = YES;
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk = [self removeSamples:maxFrameCount - totalFrameCount];
|
||||||
|
if(![chunk duration]) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if([chunk isHDCD]) {
|
||||||
|
[outputChunk setHDCD];
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t frameCount = [chunk frameCount];
|
||||||
|
NSData *sampleData = [chunk removeSamples:frameCount];
|
||||||
|
|
||||||
|
[outputChunk assignData:sampleData];
|
||||||
|
|
||||||
|
totalFrameCount += frameCount;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(!totalFrameCount) {
|
||||||
|
return [[AudioChunk alloc] init];
|
||||||
|
}
|
||||||
|
|
||||||
|
return outputChunk;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (AudioChunk *)removeAndMergeSamplesAsFloat32:(size_t)maxFrameCount {
|
||||||
|
AudioChunk *ret = [self removeAndMergeSamples:maxFrameCount];
|
||||||
|
return [self convertChunk:ret];
|
||||||
|
}
|
||||||
|
|
||||||
- (AudioChunk *)convertChunk:(AudioChunk *)inChunk {
|
- (AudioChunk *)convertChunk:(AudioChunk *)inChunk {
|
||||||
AudioStreamBasicDescription chunkFormat = [inChunk format];
|
AudioStreamBasicDescription chunkFormat = [inChunk format];
|
||||||
|
if(![inChunk duration] ||
|
||||||
|
(chunkFormat.mFormatFlags == kAudioFormatFlagsNativeFloatPacked &&
|
||||||
|
chunkFormat.mBitsPerChannel == 32)) {
|
||||||
|
return inChunk;
|
||||||
|
}
|
||||||
|
|
||||||
uint32_t chunkConfig = [inChunk channelConfig];
|
uint32_t chunkConfig = [inChunk channelConfig];
|
||||||
BOOL chunkLossless = [inChunk lossless];
|
BOOL chunkLossless = [inChunk lossless];
|
||||||
if(!formatRead || memcmp(&chunkFormat, &inputFormat, sizeof(chunkFormat)) != 0 ||
|
if(!formatRead || memcmp(&chunkFormat, &inputFormat, sizeof(chunkFormat)) != 0 ||
|
||||||
|
|
|
@ -354,13 +354,6 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA
|
||||||
return nil;
|
return nil;
|
||||||
}
|
}
|
||||||
|
|
||||||
double streamTimestamp;
|
|
||||||
double streamTimeRatio;
|
|
||||||
if(![self peekTimestamp:&streamTimestamp timeRatio:&streamTimeRatio]) {
|
|
||||||
processEntered = NO;
|
|
||||||
return nil;
|
|
||||||
}
|
|
||||||
|
|
||||||
if((enableEqualizer && !equalizerInitialized) ||
|
if((enableEqualizer && !equalizerInitialized) ||
|
||||||
memcmp(&inputFormat, &lastInputFormat, sizeof(inputFormat)) != 0 ||
|
memcmp(&inputFormat, &lastInputFormat, sizeof(inputFormat)) != 0 ||
|
||||||
inputChannelConfig != lastInputChannelConfig) {
|
inputChannelConfig != lastInputChannelConfig) {
|
||||||
|
@ -379,43 +372,21 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t totalFrameCount = 0;
|
size_t totalFrameCount = 0;
|
||||||
AudioChunk *chunk;
|
AudioChunk *chunk = [self readAndMergeChunksAsFloat32:4096];
|
||||||
|
if(![chunk duration]) {
|
||||||
|
processEntered = NO;
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
double streamTimestamp = [chunk streamTimestamp];
|
||||||
|
|
||||||
samplePtr = &inBuffer[0];
|
samplePtr = &inBuffer[0];
|
||||||
size_t channels = inputFormat.mChannelsPerFrame;
|
size_t channels = inputFormat.mChannelsPerFrame;
|
||||||
|
|
||||||
BOOL isHDCD = NO;
|
|
||||||
|
|
||||||
while(!stopping && totalFrameCount < 4096) {
|
|
||||||
AudioStreamBasicDescription newInputFormat;
|
|
||||||
uint32_t newChannelConfig;
|
|
||||||
if(![self peekFormat:&newInputFormat channelConfig:&newChannelConfig] ||
|
|
||||||
memcmp(&newInputFormat, &inputFormat, sizeof(newInputFormat)) != 0 ||
|
|
||||||
newChannelConfig != inputChannelConfig) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
chunk = [self readChunkAsFloat32:4096 - totalFrameCount];
|
|
||||||
if(!chunk) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if([chunk isHDCD]) {
|
|
||||||
isHDCD = YES;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t frameCount = [chunk frameCount];
|
size_t frameCount = [chunk frameCount];
|
||||||
NSData *sampleData = [chunk removeSamples:frameCount];
|
NSData *sampleData = [chunk removeSamples:frameCount];
|
||||||
|
|
||||||
cblas_scopy((int)(frameCount * channels), [sampleData bytes], 1, &inBuffer[totalFrameCount * channels], 1);
|
cblas_scopy((int)(frameCount * channels), [sampleData bytes], 1, &inBuffer[0], 1);
|
||||||
|
|
||||||
totalFrameCount += frameCount;
|
|
||||||
}
|
|
||||||
|
|
||||||
if(!totalFrameCount) {
|
|
||||||
processEntered = NO;
|
|
||||||
return nil;
|
|
||||||
}
|
|
||||||
|
|
||||||
const size_t channelsminusone = channels - 1;
|
const size_t channelsminusone = channels - 1;
|
||||||
uint8_t tempBuffer[sizeof(AudioBufferList) + sizeof(AudioBuffer) * channelsminusone];
|
uint8_t tempBuffer[sizeof(AudioBufferList) + sizeof(AudioBuffer) * channelsminusone];
|
||||||
|
@ -428,21 +399,21 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA
|
||||||
ioData->mBuffers[i].mNumberChannels = 1;
|
ioData->mBuffers[i].mNumberChannels = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
OSStatus status = AudioUnitRender(_eq, NULL, &timeStamp, 0, (UInt32)totalFrameCount, ioData);
|
OSStatus status = AudioUnitRender(_eq, NULL, &timeStamp, 0, (UInt32)frameCount, ioData);
|
||||||
|
|
||||||
if(status != noErr) {
|
if(status != noErr) {
|
||||||
processEntered = NO;
|
processEntered = NO;
|
||||||
return nil;
|
return nil;
|
||||||
}
|
}
|
||||||
|
|
||||||
timeStamp.mSampleTime += ((double)totalFrameCount) / inputFormat.mSampleRate;
|
timeStamp.mSampleTime += ((double)frameCount) / inputFormat.mSampleRate;
|
||||||
|
|
||||||
for(int i = 0; i < channels; ++i) {
|
for(int i = 0; i < channels; ++i) {
|
||||||
cblas_scopy((int)totalFrameCount, &eqBuffer[4096 * i], 1, &outBuffer[i], (int)channels);
|
cblas_scopy((int)frameCount, &eqBuffer[4096 * i], 1, &outBuffer[i], (int)channels);
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioChunk *outputChunk = nil;
|
AudioChunk *outputChunk = nil;
|
||||||
if(totalFrameCount) {
|
if(frameCount) {
|
||||||
scale_by_volume(&outBuffer[0], totalFrameCount * channels, equalizerPreamp);
|
scale_by_volume(&outBuffer[0], totalFrameCount * channels, equalizerPreamp);
|
||||||
|
|
||||||
outputChunk = [[AudioChunk alloc] init];
|
outputChunk = [[AudioChunk alloc] init];
|
||||||
|
@ -450,9 +421,9 @@ static OSStatus eqRenderCallback(void *inRefCon, AudioUnitRenderActionFlags *ioA
|
||||||
if(outputChannelConfig) {
|
if(outputChannelConfig) {
|
||||||
[outputChunk setChannelConfig:inputChannelConfig];
|
[outputChunk setChannelConfig:inputChannelConfig];
|
||||||
}
|
}
|
||||||
if(isHDCD) [outputChunk setHDCD];
|
if([chunk isHDCD]) [outputChunk setHDCD];
|
||||||
[outputChunk setStreamTimestamp:streamTimestamp];
|
[outputChunk setStreamTimestamp:streamTimestamp];
|
||||||
[outputChunk setStreamTimeRatio:streamTimeRatio];
|
[outputChunk setStreamTimeRatio:[chunk streamTimeRatio]];
|
||||||
[outputChunk assignSamples:&outBuffer[0] frameCount:totalFrameCount];
|
[outputChunk assignSamples:&outBuffer[0] frameCount:totalFrameCount];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -173,13 +173,6 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext;
|
||||||
return nil;
|
return nil;
|
||||||
}
|
}
|
||||||
|
|
||||||
double streamTimestamp;
|
|
||||||
double streamTimeRatio;
|
|
||||||
if(![self peekTimestamp:&streamTimestamp timeRatio:&streamTimeRatio]) {
|
|
||||||
processEntered = NO;
|
|
||||||
return nil;
|
|
||||||
}
|
|
||||||
|
|
||||||
if((enableFSurround && !fsurround) ||
|
if((enableFSurround && !fsurround) ||
|
||||||
memcmp(&inputFormat, &lastInputFormat, sizeof(inputFormat)) != 0 ||
|
memcmp(&inputFormat, &lastInputFormat, sizeof(inputFormat)) != 0 ||
|
||||||
inputChannelConfig != lastInputChannelConfig) {
|
inputChannelConfig != lastInputChannelConfig) {
|
||||||
|
@ -200,42 +193,22 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext;
|
||||||
size_t totalRequestedSamples = resetStreamFormat ? 2048 : 4096;
|
size_t totalRequestedSamples = resetStreamFormat ? 2048 : 4096;
|
||||||
|
|
||||||
size_t totalFrameCount = 0;
|
size_t totalFrameCount = 0;
|
||||||
AudioChunk *chunk;
|
AudioChunk *chunk = [self readAndMergeChunksAsFloat32:totalRequestedSamples];
|
||||||
|
if(![chunk duration]) {
|
||||||
|
processEntered = NO;
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
double streamTimestamp = [chunk streamTimestamp];
|
||||||
|
|
||||||
float *samplePtr = resetStreamFormat ? &inBuffer[2048 * 2] : &inBuffer[0];
|
float *samplePtr = resetStreamFormat ? &inBuffer[2048 * 2] : &inBuffer[0];
|
||||||
|
|
||||||
BOOL isHDCD = NO;
|
|
||||||
|
|
||||||
while(!stopping && totalFrameCount < totalRequestedSamples) {
|
|
||||||
AudioStreamBasicDescription newInputFormat;
|
|
||||||
uint32_t newChannelConfig;
|
|
||||||
if(![self peekFormat:&newInputFormat channelConfig:&newChannelConfig] ||
|
|
||||||
memcmp(&newInputFormat, &inputFormat, sizeof(newInputFormat)) != 0 ||
|
|
||||||
newChannelConfig != inputChannelConfig) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
chunk = [self readChunkAsFloat32:totalRequestedSamples - totalFrameCount];
|
|
||||||
if(!chunk) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if([chunk isHDCD]) {
|
|
||||||
isHDCD = YES;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t frameCount = [chunk frameCount];
|
size_t frameCount = [chunk frameCount];
|
||||||
NSData *sampleData = [chunk removeSamples:frameCount];
|
NSData *sampleData = [chunk removeSamples:frameCount];
|
||||||
|
|
||||||
cblas_scopy((int)frameCount * 2, [sampleData bytes], 1, &samplePtr[totalFrameCount * 2], 1);
|
cblas_scopy((int)frameCount * 2, [sampleData bytes], 1, &samplePtr[0], 1);
|
||||||
|
|
||||||
totalFrameCount += frameCount;
|
totalFrameCount = frameCount;
|
||||||
}
|
|
||||||
|
|
||||||
if(!totalFrameCount) {
|
|
||||||
processEntered = NO;
|
|
||||||
return nil;
|
|
||||||
}
|
|
||||||
|
|
||||||
if(resetStreamFormat) {
|
if(resetStreamFormat) {
|
||||||
bzero(&inBuffer[0], 2048 * 2 * sizeof(float));
|
bzero(&inBuffer[0], 2048 * 2 * sizeof(float));
|
||||||
|
@ -275,9 +248,9 @@ static void * kDSPFSurroundNodeContext = &kDSPFSurroundNodeContext;
|
||||||
if(outputChannelConfig) {
|
if(outputChannelConfig) {
|
||||||
[outputChunk setChannelConfig:outputChannelConfig];
|
[outputChunk setChannelConfig:outputChannelConfig];
|
||||||
}
|
}
|
||||||
if(isHDCD) [outputChunk setHDCD];
|
if([chunk isHDCD]) [outputChunk setHDCD];
|
||||||
[outputChunk setStreamTimestamp:streamTimestamp];
|
[outputChunk setStreamTimestamp:streamTimestamp];
|
||||||
[outputChunk setStreamTimeRatio:streamTimeRatio];
|
[outputChunk setStreamTimeRatio:[chunk streamTimeRatio]];
|
||||||
[outputChunk assignSamples:samplePtr frameCount:samplesRendered];
|
[outputChunk assignSamples:samplePtr frameCount:samplesRendered];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -373,13 +373,6 @@ static void * kDSPRubberbandNodeContext = &kDSPRubberbandNodeContext;
|
||||||
return nil;
|
return nil;
|
||||||
}
|
}
|
||||||
|
|
||||||
double streamTimestamp;
|
|
||||||
double streamTimeRatio;
|
|
||||||
if(![self peekTimestamp:&streamTimestamp timeRatio:&streamTimeRatio]) {
|
|
||||||
processEntered = NO;
|
|
||||||
return nil;
|
|
||||||
}
|
|
||||||
|
|
||||||
if(!ts || memcmp(&inputFormat, &lastInputFormat, sizeof(inputFormat)) != 0 ||
|
if(!ts || memcmp(&inputFormat, &lastInputFormat, sizeof(inputFormat)) != 0 ||
|
||||||
inputChannelConfig != lastInputChannelConfig) {
|
inputChannelConfig != lastInputChannelConfig) {
|
||||||
lastInputFormat = inputFormat;
|
lastInputFormat = inputFormat;
|
||||||
|
@ -395,51 +388,26 @@ static void * kDSPRubberbandNodeContext = &kDSPRubberbandNodeContext;
|
||||||
if(samplesToProcess > blockSize)
|
if(samplesToProcess > blockSize)
|
||||||
samplesToProcess = blockSize;
|
samplesToProcess = blockSize;
|
||||||
|
|
||||||
size_t totalFrameCount = 0;
|
AudioChunk *chunk = [self readAndMergeChunksAsFloat32:samplesToProcess];
|
||||||
AudioChunk *chunk;
|
if(![chunk duration]) {
|
||||||
|
processEntered = NO;
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
double streamTimestamp = [chunk streamTimestamp];
|
||||||
|
|
||||||
int channels = (int)(inputFormat.mChannelsPerFrame);
|
int channels = (int)(inputFormat.mChannelsPerFrame);
|
||||||
|
|
||||||
BOOL isHDCD = NO;
|
|
||||||
|
|
||||||
while(!stopping && totalFrameCount < samplesToProcess) {
|
|
||||||
AudioStreamBasicDescription newInputFormat;
|
|
||||||
uint32_t newChannelConfig;
|
|
||||||
if(![self peekFormat:&newInputFormat channelConfig:&newChannelConfig] ||
|
|
||||||
memcmp(&newInputFormat, &inputFormat, sizeof(newInputFormat)) != 0 ||
|
|
||||||
newChannelConfig != inputChannelConfig) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
chunk = [self readChunkAsFloat32:samplesToProcess - totalFrameCount];
|
|
||||||
if(!chunk) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if([chunk isHDCD]) {
|
|
||||||
isHDCD = YES;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t frameCount = [chunk frameCount];
|
size_t frameCount = [chunk frameCount];
|
||||||
NSData *sampleData = [chunk removeSamples:frameCount];
|
NSData *sampleData = [chunk removeSamples:frameCount];
|
||||||
|
|
||||||
for (size_t i = 0; i < channels; ++i) {
|
for (size_t i = 0; i < channels; ++i) {
|
||||||
cblas_scopy((int)frameCount, ((const float *)[sampleData bytes]) + i, channels, rsPtrs[i] + totalFrameCount, 1);
|
cblas_scopy((int)frameCount, ((const float *)[sampleData bytes]) + i, channels, rsPtrs[i], 1);
|
||||||
}
|
|
||||||
|
|
||||||
totalFrameCount += frameCount;
|
|
||||||
}
|
|
||||||
|
|
||||||
if(!totalFrameCount) {
|
|
||||||
processEntered = NO;
|
|
||||||
return nil;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
stretchIn += [chunk duration] / tempo;
|
stretchIn += [chunk duration] / tempo;
|
||||||
|
|
||||||
bool endOfStream = [[previousNode buffer] isEmpty] && [previousNode endOfStream] == YES;
|
bool endOfStream = [[previousNode buffer] isEmpty] && [previousNode endOfStream] == YES;
|
||||||
|
|
||||||
size_t frameCount = totalFrameCount;
|
|
||||||
|
|
||||||
int len = (int)frameCount;
|
int len = (int)frameCount;
|
||||||
|
|
||||||
rubberband_process(ts, (const float * const *)rsPtrs, len, endOfStream);
|
rubberband_process(ts, (const float * const *)rsPtrs, len, endOfStream);
|
||||||
|
@ -492,9 +460,9 @@ static void * kDSPRubberbandNodeContext = &kDSPRubberbandNodeContext;
|
||||||
if(inputChannelConfig) {
|
if(inputChannelConfig) {
|
||||||
[outputChunk setChannelConfig:inputChannelConfig];
|
[outputChunk setChannelConfig:inputChannelConfig];
|
||||||
}
|
}
|
||||||
if(isHDCD) [outputChunk setHDCD];
|
if([chunk isHDCD]) [outputChunk setHDCD];
|
||||||
[outputChunk setStreamTimestamp:streamTimestamp];
|
[outputChunk setStreamTimestamp:streamTimestamp];
|
||||||
[outputChunk setStreamTimeRatio:streamTimeRatio * tempo];
|
[outputChunk setStreamTimeRatio:[chunk streamTimeRatio] * tempo];
|
||||||
[outputChunk assignSamples:rsOutBuffer frameCount:samplesBuffered];
|
[outputChunk assignSamples:rsOutBuffer frameCount:samplesBuffered];
|
||||||
samplesBuffered = 0;
|
samplesBuffered = 0;
|
||||||
stretchOut += [outputChunk duration];
|
stretchOut += [outputChunk duration];
|
||||||
|
|
|
@ -43,6 +43,9 @@
|
||||||
- (AudioChunk *_Nonnull)readChunk:(size_t)maxFrames;
|
- (AudioChunk *_Nonnull)readChunk:(size_t)maxFrames;
|
||||||
- (AudioChunk *_Nonnull)readChunkAsFloat32:(size_t)maxFrames;
|
- (AudioChunk *_Nonnull)readChunkAsFloat32:(size_t)maxFrames;
|
||||||
|
|
||||||
|
- (AudioChunk *_Nonnull)readAndMergeChunks:(size_t)maxFrames;
|
||||||
|
- (AudioChunk *_Nonnull)readAndMergeChunksAsFloat32:(size_t)maxFrames;
|
||||||
|
|
||||||
- (BOOL)peekFormat:(AudioStreamBasicDescription *_Nonnull)format channelConfig:(uint32_t *_Nonnull)config;
|
- (BOOL)peekFormat:(AudioStreamBasicDescription *_Nonnull)format channelConfig:(uint32_t *_Nonnull)config;
|
||||||
- (BOOL)peekTimestamp:(double *_Nonnull)timestamp timeRatio:(double *_Nonnull)timeRatio;
|
- (BOOL)peekTimestamp:(double *_Nonnull)timestamp timeRatio:(double *_Nonnull)timeRatio;
|
||||||
|
|
||||||
|
|
|
@ -239,6 +239,76 @@
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
- (AudioChunk *)readAndMergeChunks:(size_t)maxFrames {
|
||||||
|
[accessLock lock];
|
||||||
|
|
||||||
|
if([[previousNode buffer] isEmpty] && [previousNode endOfStream] == YES) {
|
||||||
|
endOfStream = YES;
|
||||||
|
[accessLock unlock];
|
||||||
|
return [[AudioChunk alloc] init];
|
||||||
|
}
|
||||||
|
|
||||||
|
if([previousNode shouldReset] == YES) {
|
||||||
|
@autoreleasepool {
|
||||||
|
[buffer reset];
|
||||||
|
}
|
||||||
|
|
||||||
|
shouldReset = YES;
|
||||||
|
[previousNode setShouldReset:NO];
|
||||||
|
|
||||||
|
[[previousNode semaphore] signal];
|
||||||
|
}
|
||||||
|
|
||||||
|
AudioChunk *ret;
|
||||||
|
|
||||||
|
@autoreleasepool {
|
||||||
|
ret = [[previousNode buffer] removeAndMergeSamples:maxFrames];
|
||||||
|
}
|
||||||
|
|
||||||
|
[accessLock unlock];
|
||||||
|
|
||||||
|
if([ret frameCount]) {
|
||||||
|
[[previousNode semaphore] signal];
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (AudioChunk *)readAndMergeChunksAsFloat32:(size_t)maxFrames {
|
||||||
|
[accessLock lock];
|
||||||
|
|
||||||
|
if([[previousNode buffer] isEmpty] && [previousNode endOfStream] == YES) {
|
||||||
|
endOfStream = YES;
|
||||||
|
[accessLock unlock];
|
||||||
|
return [[AudioChunk alloc] init];
|
||||||
|
}
|
||||||
|
|
||||||
|
if([previousNode shouldReset] == YES) {
|
||||||
|
@autoreleasepool {
|
||||||
|
[buffer reset];
|
||||||
|
}
|
||||||
|
|
||||||
|
shouldReset = YES;
|
||||||
|
[previousNode setShouldReset:NO];
|
||||||
|
|
||||||
|
[[previousNode semaphore] signal];
|
||||||
|
}
|
||||||
|
|
||||||
|
AudioChunk *ret;
|
||||||
|
|
||||||
|
@autoreleasepool {
|
||||||
|
ret = [[previousNode buffer] removeAndMergeSamplesAsFloat32:maxFrames];
|
||||||
|
}
|
||||||
|
|
||||||
|
[accessLock unlock];
|
||||||
|
|
||||||
|
if([ret frameCount]) {
|
||||||
|
[[previousNode semaphore] signal];
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
- (void)launchThread {
|
- (void)launchThread {
|
||||||
[NSThread detachNewThreadSelector:@selector(threadEntry:) toTarget:self withObject:nil];
|
[NSThread detachNewThreadSelector:@selector(threadEntry:) toTarget:self withObject:nil];
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue