This is the code that actually needs to be added to make it process audio. This insertion makes the whole app crash when processing audio at all. Weirdly, simply reverting these two files makes the audio code work again. I can't explain it. Also, commenting out CMAudioFormatDescriptionCreate makes it work, too. There's something weird going on with that function. Signed-off-by: Christopher Snowhill <kode54@gmail.com>
145 lines
2.9 KiB
Objective-C
145 lines
2.9 KiB
Objective-C
//
|
|
// OutputAVFoundation.h
|
|
// Cog
|
|
//
|
|
// Created by Christopher Snowhill on 6/23/22.
|
|
// Copyright 2022 Christopher Snowhill. All rights reserved.
|
|
//
|
|
|
|
#import <AssertMacros.h>
|
|
#import <Cocoa/Cocoa.h>
|
|
|
|
#import <AVFoundation/AVFoundation.h>
|
|
#import <AudioToolbox/AudioToolbox.h>
|
|
#import <AudioUnit/AudioUnit.h>
|
|
#import <CoreAudio/AudioHardware.h>
|
|
#import <CoreAudio/CoreAudioTypes.h>
|
|
|
|
#ifdef __cplusplus
|
|
#import <atomic>
|
|
using std::atomic_long;
|
|
#else
|
|
#import <stdatomic.h>
|
|
#endif
|
|
|
|
#import "Downmix.h"
|
|
|
|
#import "VisualizationController.h"
|
|
|
|
#import "HeadphoneFilter.h"
|
|
|
|
//#define OUTPUT_LOG
|
|
#ifdef OUTPUT_LOG
|
|
#import <stdio.h>
|
|
#endif
|
|
|
|
@class OutputNode;
|
|
|
|
@class FSurroundFilter;
|
|
|
|
@interface OutputAVFoundation : NSObject {
|
|
OutputNode *outputController;
|
|
|
|
BOOL r8bDone;
|
|
void *r8bstate, *r8bold;
|
|
|
|
void *r8bvis;
|
|
double lastVisRate;
|
|
|
|
BOOL stopInvoked;
|
|
BOOL stopCompleted;
|
|
BOOL running;
|
|
BOOL stopping;
|
|
BOOL stopped;
|
|
BOOL started;
|
|
BOOL paused;
|
|
BOOL restarted;
|
|
BOOL commandStop;
|
|
|
|
BOOL eqEnabled;
|
|
BOOL eqInitialized;
|
|
|
|
BOOL streamFormatStarted;
|
|
BOOL streamFormatChanged;
|
|
|
|
double secondsHdcdSustained;
|
|
|
|
BOOL defaultdevicelistenerapplied;
|
|
BOOL currentdevicelistenerapplied;
|
|
BOOL devicealivelistenerapplied;
|
|
BOOL observersapplied;
|
|
BOOL outputdevicechanged;
|
|
|
|
float volume;
|
|
float eqPreamp;
|
|
|
|
AudioDeviceID outputDeviceID;
|
|
AudioStreamBasicDescription realStreamFormat; // stream format pre-hrtf
|
|
AudioStreamBasicDescription streamFormat; // stream format last seen in render callback
|
|
AudioStreamBasicDescription realNewFormat; // in case of resampler flush
|
|
AudioStreamBasicDescription newFormat; // in case of resampler flush
|
|
|
|
AudioStreamBasicDescription visFormat; // Mono format for vis
|
|
|
|
uint32_t realStreamChannelConfig;
|
|
uint32_t streamChannelConfig;
|
|
uint32_t realNewChannelConfig;
|
|
uint32_t newChannelConfig;
|
|
|
|
AVSampleBufferAudioRenderer *audioRenderer;
|
|
AVSampleBufferRenderSynchronizer *renderSynchronizer;
|
|
|
|
CMAudioFormatDescriptionRef audioFormatDescription;
|
|
|
|
id currentPtsObserver;
|
|
NSLock *currentPtsLock;
|
|
CMTime currentPts, lastPts;
|
|
double secondsLatency;
|
|
|
|
CMTime outputPts, trackPts, lastCheckpointPts;
|
|
AudioTimeStamp timeStamp;
|
|
|
|
size_t _bufferSize;
|
|
|
|
AudioUnit _eq;
|
|
|
|
DownmixProcessor *downmixerForVis;
|
|
|
|
VisualizationController *visController;
|
|
|
|
BOOL enableHrtf;
|
|
HeadphoneFilter *hrtf;
|
|
|
|
BOOL enableFSurround;
|
|
FSurroundFilter *fsurround;
|
|
|
|
float *samplePtr;
|
|
float inputBuffer[4096 * 32]; // 4096 samples times maximum supported channel count
|
|
float fsurroundBuffer[4096 * 6];
|
|
float hrtfBuffer[4096 * 2];
|
|
float eqBuffer[4096 * 32];
|
|
|
|
#ifdef OUTPUT_LOG
|
|
FILE *_logFile;
|
|
#endif
|
|
}
|
|
|
|
- (id)initWithController:(OutputNode *)c;
|
|
|
|
- (BOOL)setup;
|
|
- (OSStatus)setOutputDeviceByID:(AudioDeviceID)deviceID;
|
|
- (BOOL)setOutputDeviceWithDeviceDict:(NSDictionary *)deviceDict;
|
|
- (void)start;
|
|
- (void)pause;
|
|
- (void)resume;
|
|
- (void)stop;
|
|
|
|
- (double)latency;
|
|
|
|
- (void)setVolume:(double)v;
|
|
|
|
- (void)setEqualizerEnabled:(BOOL)enabled;
|
|
|
|
- (void)sustainHDCD;
|
|
|
|
@end
|