Core Audio: Implement proper fade on seek
Whew, what a mess! And this may pave the way for crossfading. Signed-off-by: Christopher Snowhill <kode54@gmail.com>
This commit is contained in:
parent
9b973a4b53
commit
15eaa877b1
9 changed files with 229 additions and 99 deletions
|
@ -75,14 +75,12 @@
|
|||
|
||||
[self waitUntilCallbacksExit];
|
||||
if(output) {
|
||||
[output fadeOut];
|
||||
[output setShouldContinue:NO];
|
||||
[output close];
|
||||
[output fadeOutBackground];
|
||||
}
|
||||
if(!output) {
|
||||
output = [[OutputNode alloc] initWithController:self previous:nil];
|
||||
[output setupWithInterval:resumeInterval];
|
||||
}
|
||||
[output setupWithInterval:resumeInterval];
|
||||
[output setVolume:volume];
|
||||
@synchronized(chainQueue) {
|
||||
for(id anObject in chainQueue) {
|
||||
|
@ -125,14 +123,15 @@
|
|||
}
|
||||
|
||||
if(time > 0.0) {
|
||||
[output fadeIn];
|
||||
[output seek:time];
|
||||
[bufferChain seek:time];
|
||||
}
|
||||
|
||||
[self setShouldContinue:YES];
|
||||
|
||||
outputLaunched = NO;
|
||||
if(!resumeInterval) {
|
||||
outputLaunched = NO;
|
||||
}
|
||||
startedPaused = paused;
|
||||
initialBufferFilled = NO;
|
||||
previousUserInfo = userInfo;
|
||||
|
@ -144,6 +143,8 @@
|
|||
if(time > 0.0) {
|
||||
[self updatePosition:userInfo];
|
||||
}
|
||||
} else if(resumeInterval) {
|
||||
[output fadeIn];
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
|
||||
- (void)threadEntry:(id _Nullable)arg;
|
||||
|
||||
- (void)setShouldContinue:(BOOL)s;
|
||||
|
||||
- (double)secondsBuffered;
|
||||
|
||||
@end
|
||||
|
|
|
@ -9,7 +9,9 @@
|
|||
|
||||
#import "DSPNode.h"
|
||||
|
||||
@implementation DSPNode
|
||||
@implementation DSPNode {
|
||||
BOOL threadTerminated;
|
||||
}
|
||||
|
||||
- (id _Nullable)initWithController:(id _Nonnull)c previous:(id _Nullable)p latency:(double)latency {
|
||||
self = [super init];
|
||||
|
@ -53,7 +55,17 @@
|
|||
NSThread *currentThread = [NSThread currentThread];
|
||||
[currentThread setThreadPriority:0.75];
|
||||
[currentThread setQualityOfService:NSQualityOfServiceUserInitiated];
|
||||
threadTerminated = NO;
|
||||
[self process];
|
||||
threadTerminated = YES;
|
||||
}
|
||||
}
|
||||
|
||||
- (void)setShouldContinue:(BOOL)s {
|
||||
BOOL currentShouldContinue = shouldContinue;
|
||||
shouldContinue = s;
|
||||
if(!currentShouldContinue && s && threadTerminated) {
|
||||
[self launchThread];
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -39,6 +39,8 @@
|
|||
- (BOOL)selectNextBuffer;
|
||||
- (void)endOfInputPlayed;
|
||||
|
||||
- (BOOL)endOfStream;
|
||||
|
||||
- (BOOL)chainQueueHasTracks;
|
||||
|
||||
- (double)secondsBuffered;
|
||||
|
@ -50,6 +52,7 @@
|
|||
- (void)seek:(double)time;
|
||||
|
||||
- (void)fadeOut;
|
||||
- (void)fadeOutBackground;
|
||||
- (void)fadeIn;
|
||||
|
||||
- (AudioChunk *)readChunk:(size_t)amount;
|
||||
|
|
|
@ -103,7 +103,12 @@
|
|||
[output fadeOut];
|
||||
}
|
||||
|
||||
- (void)fadeOutBackground {
|
||||
[output fadeOutBackground];
|
||||
}
|
||||
|
||||
- (void)fadeIn {
|
||||
[self reconnectInputAndReplumb];
|
||||
[output fadeIn];
|
||||
}
|
||||
|
||||
|
@ -137,9 +142,7 @@
|
|||
- (BOOL)selectNextBuffer {
|
||||
BOOL ret = [controller selectNextBuffer];
|
||||
if(!ret) {
|
||||
Node *finalNode = [[controller bufferChain] finalNode];
|
||||
[rubberbandNode setPreviousNode:finalNode];
|
||||
[self reconnectInput];
|
||||
[self reconnectInputAndReplumb];
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -169,11 +172,28 @@
|
|||
}
|
||||
}
|
||||
|
||||
- (void)reconnectInput {
|
||||
- (BOOL)reconnectInput {
|
||||
Node *finalNode = nil;
|
||||
if(rubberbandNode) {
|
||||
finalNode = [[controller bufferChain] finalNode];
|
||||
[rubberbandNode setPreviousNode:finalNode];
|
||||
}
|
||||
|
||||
return !!finalNode;
|
||||
}
|
||||
|
||||
- (void)reconnectInputAndReplumb {
|
||||
Node *finalNode = nil;
|
||||
if(rubberbandNode) {
|
||||
finalNode = [[controller bufferChain] finalNode];
|
||||
[rubberbandNode setPreviousNode:finalNode];
|
||||
}
|
||||
|
||||
NSArray *DSPs = [self DSPs];
|
||||
|
||||
for (Node *node in DSPs) {
|
||||
[node setEndOfStream:NO];
|
||||
[node setShouldContinue:YES];
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -187,10 +207,7 @@
|
|||
|
||||
- (AudioChunk *)readChunk:(size_t)amount {
|
||||
@autoreleasepool {
|
||||
Node *finalNode = [[controller bufferChain] finalNode];
|
||||
[rubberbandNode setPreviousNode:finalNode];
|
||||
|
||||
if(finalNode) {
|
||||
if([self reconnectInput]) {
|
||||
AudioChunk *ret = [super readChunk:amount];
|
||||
|
||||
if((!ret || ![ret frameCount]) && [previousNode endOfStream]) {
|
||||
|
@ -206,14 +223,15 @@
|
|||
|
||||
- (BOOL)peekFormat:(nonnull AudioStreamBasicDescription *)format channelConfig:(nonnull uint32_t *)config {
|
||||
@autoreleasepool {
|
||||
Node *finalNode = [[controller bufferChain] finalNode];
|
||||
[rubberbandNode setPreviousNode:finalNode];
|
||||
|
||||
BOOL ret = [super peekFormat:format channelConfig:config];
|
||||
if(!ret && [previousNode endOfStream]) {
|
||||
endOfStream = YES;
|
||||
if([self reconnectInput]) {
|
||||
BOOL ret = [super peekFormat:format channelConfig:config];
|
||||
if(!ret && [previousNode endOfStream]) {
|
||||
endOfStream = YES;
|
||||
}
|
||||
return ret;
|
||||
} else {
|
||||
return NO;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,8 +24,7 @@
|
|||
|
||||
- (void)resetBuffer;
|
||||
|
||||
- (void)pop;
|
||||
- (void)replayPreroll;
|
||||
- (void)setShouldContinue:(BOOL)s;
|
||||
|
||||
- (void)process;
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
BOOL processEntered;
|
||||
BOOL stopping;
|
||||
BOOL paused;
|
||||
BOOL threadTerminated;
|
||||
|
||||
AudioStreamBasicDescription inputFormat;
|
||||
AudioStreamBasicDescription visFormat; // Mono format for vis
|
||||
|
@ -95,7 +96,9 @@
|
|||
NSThread *currentThread = [NSThread currentThread];
|
||||
[currentThread setThreadPriority:0.75];
|
||||
[currentThread setQualityOfService:NSQualityOfServiceUserInitiated];
|
||||
threadTerminated = NO;
|
||||
[self process];
|
||||
threadTerminated = YES;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -113,6 +116,14 @@
|
|||
return [buffer listDuration];
|
||||
}
|
||||
|
||||
- (void)setShouldContinue:(BOOL)s {
|
||||
BOOL currentShouldContinue = shouldContinue;
|
||||
shouldContinue = s;
|
||||
if(!currentShouldContinue && s && threadTerminated) {
|
||||
[self launchThread];
|
||||
}
|
||||
}
|
||||
|
||||
- (BOOL)setup {
|
||||
if(fabs(inputFormat.mSampleRate - 44100.0) > 1e-6) {
|
||||
rs = rsstate_new(1, inputFormat.mSampleRate, 44100.0);
|
||||
|
|
|
@ -57,6 +57,7 @@ using std::atomic_long;
|
|||
BOOL commandStop;
|
||||
BOOL resetting;
|
||||
|
||||
BOOL cutOffInput;
|
||||
BOOL fading, faded;
|
||||
float fadeLevel;
|
||||
float fadeStep;
|
||||
|
@ -116,6 +117,7 @@ using std::atomic_long;
|
|||
- (void)stop;
|
||||
|
||||
- (void)fadeOut;
|
||||
- (void)fadeOutBackground;
|
||||
- (void)fadeIn;
|
||||
|
||||
- (double)latency;
|
||||
|
|
|
@ -23,15 +23,89 @@ extern void scale_by_volume(float *buffer, size_t count, float volume);
|
|||
|
||||
static NSString *CogPlaybackDidBeginNotificiation = @"CogPlaybackDidBeginNotificiation";
|
||||
|
||||
static BOOL fadeAudio(const float *inSamples, float *outSamples, size_t channels, size_t count, float *fadeLevel, float fadeStep, float fadeTarget) {
|
||||
float _fadeLevel = *fadeLevel;
|
||||
BOOL towardZero = fadeStep < 0.0;
|
||||
BOOL stopping = NO;
|
||||
for(size_t i = 0; i < count; ++i) {
|
||||
for(size_t j = 0; j < channels; ++j) {
|
||||
outSamples[j] += inSamples[j] * _fadeLevel;
|
||||
}
|
||||
inSamples += channels;
|
||||
outSamples += channels;
|
||||
_fadeLevel += fadeStep;
|
||||
if(towardZero && _fadeLevel <= fadeTarget) {
|
||||
_fadeLevel = fadeTarget;
|
||||
fadeStep = 0.0;
|
||||
stopping = YES;
|
||||
break;
|
||||
} else if(!towardZero && _fadeLevel >= fadeTarget) {
|
||||
_fadeLevel = fadeTarget;
|
||||
fadeStep = 0.0;
|
||||
stopping = YES;
|
||||
}
|
||||
}
|
||||
*fadeLevel = _fadeLevel;
|
||||
return stopping;
|
||||
}
|
||||
|
||||
@interface FadedBuffer : NSObject {
|
||||
float fadeLevel;
|
||||
float fadeStep;
|
||||
float fadeTarget;
|
||||
|
||||
ChunkList *lastBuffer;
|
||||
}
|
||||
|
||||
- (id)initWithBuffer:(ChunkList *)buffer fadeTarget:(float)fadeTarget sampleRate:(double)sampleRate;
|
||||
- (BOOL)mix:(float *)outputBuffer sampleCount:(size_t)samples channelCount:(size_t)channels;
|
||||
|
||||
@end
|
||||
|
||||
@implementation FadedBuffer
|
||||
|
||||
- (id)initWithBuffer:(ChunkList *)buffer fadeTarget:(float)fadeTarget sampleRate:(double)sampleRate {
|
||||
self = [super init];
|
||||
if(self) {
|
||||
fadeLevel = 1.0;
|
||||
self->fadeTarget = fadeTarget;
|
||||
lastBuffer = buffer;
|
||||
const double maxFadeDurationMS = 1000.0 * [buffer listDuration];
|
||||
const double fadeDuration = MIN(125.0f, maxFadeDurationMS);
|
||||
fadeStep = ((fadeTarget - fadeLevel) / sampleRate) * (1000.0f / fadeDuration);
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
- (BOOL)mix:(float *)outputBuffer sampleCount:(size_t)samples channelCount:(size_t)channels {
|
||||
if(lastBuffer) {
|
||||
AudioChunk * chunk = [lastBuffer removeAndMergeSamples:samples callBlock:^BOOL{
|
||||
// Always interrupt if buffer runs empty, because it is not being refilled any more
|
||||
return true;
|
||||
}];
|
||||
if(chunk && [chunk frameCount]) {
|
||||
// Will always be input request size or less
|
||||
size_t samplesToMix = [chunk frameCount];
|
||||
NSData *sampleData = [chunk removeSamples:samplesToMix];
|
||||
return fadeAudio((const float *)[sampleData bytes], outputBuffer, channels, samplesToMix, &fadeLevel, fadeStep, fadeTarget);
|
||||
}
|
||||
}
|
||||
// No buffer or no chunk, stream ended
|
||||
return true;
|
||||
}
|
||||
|
||||
@end
|
||||
|
||||
@implementation OutputCoreAudio {
|
||||
VisualizationController *visController;
|
||||
|
||||
NSLock *fadedBuffersLock;
|
||||
NSMutableArray<FadedBuffer *> *fadedBuffers;
|
||||
}
|
||||
|
||||
static void *kOutputCoreAudioContext = &kOutputCoreAudioContext;
|
||||
|
||||
- (AudioChunk *)renderInput:(int)amountToRead {
|
||||
int amountRead = 0;
|
||||
|
||||
if(stopping == YES || [outputController shouldContinue] == NO) {
|
||||
// Chain is dead, fill out the serial number pointer forever with silence
|
||||
stopping = YES;
|
||||
|
@ -67,6 +141,9 @@ static void *kOutputCoreAudioContext = &kOutputCoreAudioContext;
|
|||
|
||||
outputLock = [[NSLock alloc] init];
|
||||
|
||||
fadedBuffersLock = [[NSLock alloc] init];
|
||||
fadedBuffers = [[NSMutableArray alloc] init];
|
||||
|
||||
#ifdef OUTPUT_LOG
|
||||
NSString *logName = [NSTemporaryDirectory() stringByAppendingPathComponent:@"CogAudioLog.raw"];
|
||||
_logFile = fopen([logName UTF8String], "wb");
|
||||
|
@ -162,22 +239,13 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
|||
if(stopping)
|
||||
break;
|
||||
|
||||
if(![outputBuffer isFull]) {
|
||||
if(!cutOffInput && ![outputBuffer isFull]) {
|
||||
[self renderAndConvert];
|
||||
rendered = YES;
|
||||
} else {
|
||||
rendered = NO;
|
||||
}
|
||||
|
||||
#if 0
|
||||
if(faded && !paused) {
|
||||
resetting = YES;
|
||||
[self pause];
|
||||
started = NO;
|
||||
resetting = NO;
|
||||
}
|
||||
#endif
|
||||
|
||||
if(!started && !paused) {
|
||||
// Prevent this call from hanging when used in this thread, when buffer may be empty
|
||||
// and waiting for this very thread to fill it
|
||||
|
@ -543,30 +611,6 @@ current_device_listener(AudioObjectID inObjectID, UInt32 inNumberAddresses, cons
|
|||
}
|
||||
}
|
||||
|
||||
static BOOL fadeAudio(float * samples, size_t channels, size_t count, float * fadeLevel, float fadeStep, float fadeTarget) {
|
||||
float _fadeLevel = *fadeLevel;
|
||||
BOOL towardZero = fadeStep < 0.0;
|
||||
BOOL stopping = NO;
|
||||
for(size_t i = 0; i < count; ++i) {
|
||||
for(size_t j = 0; j < channels; ++j) {
|
||||
samples[j] *= _fadeLevel;
|
||||
}
|
||||
samples += channels;
|
||||
_fadeLevel += fadeStep;
|
||||
if(towardZero && _fadeLevel <= fadeTarget) {
|
||||
_fadeLevel = fadeTarget;
|
||||
fadeStep = 0.0;
|
||||
stopping = YES;
|
||||
} else if(!towardZero && _fadeLevel >= fadeTarget) {
|
||||
_fadeLevel = fadeTarget;
|
||||
fadeStep = 0.0;
|
||||
stopping = YES;
|
||||
}
|
||||
}
|
||||
*fadeLevel = _fadeLevel;
|
||||
return stopping;
|
||||
}
|
||||
|
||||
- (void)renderAndConvert {
|
||||
if(resetStreamFormat) {
|
||||
[self updateStreamFormat];
|
||||
|
@ -600,6 +644,8 @@ static BOOL fadeAudio(float * samples, size_t channels, size_t count, float * fa
|
|||
__block AudioStreamBasicDescription *format = &deviceFormat;
|
||||
__block void *refCon = (__bridge void *)self;
|
||||
__block NSLock *refLock = self->outputLock;
|
||||
__block NSLock *fadersLock = self->fadedBuffersLock;
|
||||
__block NSMutableArray *faders = self->fadedBuffers;
|
||||
|
||||
#ifdef OUTPUT_LOG
|
||||
__block FILE *logFile = _logFile;
|
||||
|
@ -616,35 +662,55 @@ static BOOL fadeAudio(float * samples, size_t channels, size_t count, float * fa
|
|||
OutputCoreAudio *_self = (__bridge OutputCoreAudio *)refCon;
|
||||
int renderedSamples = 0;
|
||||
|
||||
if(_self->resetting || _self->faded) {
|
||||
inputData->mBuffers[0].mDataByteSize = frameCount * format->mBytesPerPacket;
|
||||
bzero(inputData->mBuffers[0].mData, inputData->mBuffers[0].mDataByteSize);
|
||||
inputData->mBuffers[0].mNumberChannels = channels;
|
||||
inputData->mBuffers[0].mDataByteSize = frameCount * format->mBytesPerPacket;
|
||||
bzero(inputData->mBuffers[0].mData, inputData->mBuffers[0].mDataByteSize);
|
||||
inputData->mBuffers[0].mNumberChannels = channels;
|
||||
|
||||
if(_self->resetting) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
float *outSamples = (float*)inputData->mBuffers[0].mData;
|
||||
|
||||
@autoreleasepool {
|
||||
while(renderedSamples < frameCount) {
|
||||
[refLock lock];
|
||||
AudioChunk *chunk = nil;
|
||||
if(![_self->outputBuffer isEmpty]) {
|
||||
chunk = [_self->outputBuffer removeSamples:frameCount - renderedSamples];
|
||||
}
|
||||
[refLock unlock];
|
||||
if(!_self->faded) {
|
||||
while(renderedSamples < frameCount) {
|
||||
[refLock lock];
|
||||
AudioChunk *chunk = nil;
|
||||
if(_self->outputBuffer && ![_self->outputBuffer isEmpty]) {
|
||||
chunk = [_self->outputBuffer removeSamples:frameCount - renderedSamples];
|
||||
}
|
||||
[refLock unlock];
|
||||
|
||||
if(chunk && [chunk frameCount]) {
|
||||
_self->streamTimestamp = [chunk streamTimestamp];
|
||||
size_t _frameCount = 0;
|
||||
|
||||
size_t _frameCount = [chunk frameCount];
|
||||
NSData *sampleData = [chunk removeSamples:_frameCount];
|
||||
float *samplePtr = (float *)[sampleData bytes];
|
||||
size_t inputTodo = MIN(_frameCount, frameCount - renderedSamples);
|
||||
cblas_scopy((int)(inputTodo * channels), samplePtr, 1, ((float *)inputData->mBuffers[0].mData) + renderedSamples * channels, 1);
|
||||
renderedSamples += inputTodo;
|
||||
}
|
||||
if(chunk && [chunk frameCount]) {
|
||||
_self->streamTimestamp = [chunk streamTimestamp];
|
||||
|
||||
if(_self->stopping || _self->resetting || _self->faded) {
|
||||
break;
|
||||
_frameCount = [chunk frameCount];
|
||||
NSData *sampleData = [chunk removeSamples:_frameCount];
|
||||
float *samplePtr = (float *)[sampleData bytes];
|
||||
size_t inputTodo = MIN(_frameCount, frameCount - renderedSamples);
|
||||
|
||||
if(!_self->fading) {
|
||||
cblas_scopy((int)(inputTodo * channels), samplePtr, 1, outSamples + renderedSamples * channels, 1);
|
||||
} else {
|
||||
BOOL faded = fadeAudio(samplePtr, outSamples + renderedSamples * channels, channels, inputTodo, &_self->fadeLevel, _self->fadeStep, _self->fadeTarget);
|
||||
if(faded) {
|
||||
if(_self->fadeStep < 0.0) {
|
||||
_self->faded = YES;
|
||||
}
|
||||
_self->fading = NO;
|
||||
_self->fadeStep = 0.0f;
|
||||
}
|
||||
}
|
||||
|
||||
renderedSamples += inputTodo;
|
||||
}
|
||||
|
||||
if(_self->stopping || _self->resetting || _self->faded || !chunk || !_frameCount) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -661,21 +727,19 @@ static BOOL fadeAudio(float * samples, size_t channels, size_t count, float * fa
|
|||
}
|
||||
}
|
||||
|
||||
scale_by_volume((float*)inputData->mBuffers[0].mData, renderedSamples * channels, volumeScale * _self->volume);
|
||||
|
||||
if(_self->fading) {
|
||||
BOOL faded = fadeAudio((float*)inputData->mBuffers[0].mData, channels, renderedSamples, &_self->fadeLevel, _self->fadeStep, _self->fadeTarget);
|
||||
if(faded) {
|
||||
if(_self->fadeStep < 0.0f) {
|
||||
_self->faded = YES;
|
||||
}
|
||||
_self->fading = NO;
|
||||
_self->fadeStep = 0.0f;
|
||||
[fadersLock lock];
|
||||
for(size_t i = 0; i < [faders count];) {
|
||||
FadedBuffer *buffer = faders[i];
|
||||
BOOL stopping = [buffer mix:outSamples sampleCount:frameCount channelCount:channels];
|
||||
if(stopping) {
|
||||
[faders removeObjectAtIndex:i];
|
||||
} else {
|
||||
++i;
|
||||
}
|
||||
}
|
||||
[fadersLock unlock];
|
||||
|
||||
inputData->mBuffers[0].mDataByteSize = renderedSamples * format->mBytesPerPacket;
|
||||
inputData->mBuffers[0].mNumberChannels = channels;
|
||||
scale_by_volume(outSamples, frameCount * channels, volumeScale * _self->volume);
|
||||
|
||||
[_self updateLatency:secondsRendered];
|
||||
}
|
||||
|
@ -711,6 +775,7 @@ static BOOL fadeAudio(float * samples, size_t channels, size_t count, float * fa
|
|||
outputDeviceID = -1;
|
||||
restarted = NO;
|
||||
|
||||
cutOffInput = NO;
|
||||
fadeTarget = 1.0f;
|
||||
fadeLevel = 1.0f;
|
||||
fadeStep = 0.0f;
|
||||
|
@ -840,9 +905,13 @@ static BOOL fadeAudio(float * samples, size_t channels, size_t count, float * fa
|
|||
} while(!commandStop && compareVal > 0 && compareMax-- > 0);
|
||||
} else {
|
||||
[self fadeOut];
|
||||
while(fading && !faded) {
|
||||
usleep(5000);
|
||||
[fadedBuffersLock lock];
|
||||
while([fadedBuffers count]) {
|
||||
[fadedBuffersLock unlock];
|
||||
usleep(10000);
|
||||
[fadedBuffersLock lock];
|
||||
}
|
||||
[fadedBuffersLock unlock];
|
||||
}
|
||||
[_au stopHardware];
|
||||
_au = nil;
|
||||
|
@ -912,11 +981,24 @@ static BOOL fadeAudio(float * samples, size_t channels, size_t count, float * fa
|
|||
fading = YES;
|
||||
}
|
||||
|
||||
- (void)fadeOutBackground {
|
||||
cutOffInput = YES;
|
||||
[outputLock lock];
|
||||
[fadedBuffersLock lock];
|
||||
FadedBuffer *buffer = [[FadedBuffer alloc] initWithBuffer:outputBuffer fadeTarget:0.0 sampleRate:deviceFormat.mSampleRate];
|
||||
outputBuffer = [[ChunkList alloc] initWithMaximumDuration:0.5];
|
||||
[fadedBuffers addObject:buffer];
|
||||
[fadedBuffersLock unlock];
|
||||
[outputLock unlock];
|
||||
}
|
||||
|
||||
- (void)fadeIn {
|
||||
fadeTarget = 1.0;
|
||||
fadeLevel = 0.0f;
|
||||
fadeTarget = 1.0f;
|
||||
fadeStep = ((fadeTarget - fadeLevel) / deviceFormat.mSampleRate) * (1000.0f / 125.0f);
|
||||
fading = YES;
|
||||
faded = NO;
|
||||
cutOffInput = NO;
|
||||
}
|
||||
|
||||
@end
|
||||
|
|
Loading…
Reference in a new issue