iphone – AudioUnit输入样本

所以我在这里遇到一些麻烦,我的AudioUnit从iOS中的麦克风/线路输入数据.我能够将所有内容设置为我认为可以,并且它正在调用我的recordingCallback,但是我从缓冲区中获取的数据不正确.它总是返回完全相同的东西,主要是零和随机大数.有谁知道可能导致这种情况的原因.我的代码如下.

设置音频单元

OSStatus status;

// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;

// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
status = AudioComponentInstanceNew(inputComponent, &audioUnit);

// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(audioUnit, 
                              kAudioOutputUnitProperty_EnableIO, 
                              kAudioUnitScope_Input, 
                              kInputBusNumber,
                              &flag, 
                              sizeof(flag));
// Disable playback IO
flag = 0;
status = AudioUnitSetProperty(audioUnit, 
                              kAudioOutputUnitProperty_EnableIO, 
                              kAudioUnitScope_Output, 
                              kOutputBusNumber,
                              &flag, 
                              sizeof(flag));

// Describe format
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate         = 44100.00;
audioFormat.mFormatID           = kAudioFormatLinearPCM;
audioFormat.mFormatFlags        = kAudioFormatFlagsNativeFloatPacked |kAudioFormatFlagIsNonInterleaved;
audioFormat.mFramesPerPacket    = 1;
audioFormat.mChannelsPerFrame   = 1;
audioFormat.mBitsPerChannel     = 32;
audioFormat.mBytesPerPacket     = 4;
audioFormat.mBytesPerFrame      = 4;

// Apply format
status = AudioUnitSetProperty(audioUnit, 
                              kAudioUnitProperty_StreamFormat, 
                              kAudioUnitScope_Output, 
                              kInputBusNumber, 
                              &audioFormat, 
                              sizeof(audioFormat));

// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = (__bridge void*)self;
status = AudioUnitSetProperty(audioUnit, 
                              kAudioOutputUnitProperty_SetInputCallback, 
                              kAudioUnitScope_Global, 
                              kInputBusNumber, 
                              &callbackStruct, 
                              sizeof(callbackStruct));
status = AudioUnitInitialize(audioUnit);

输入回调

static OSStatus recordingCallback(void *inRefCon, 
                                  AudioUnitRenderActionFlags *ioActionFlags, 
                                  const AudioTimeStamp *inTimeStamp, 
                                  UInt32 inBusNumber, 
                                  UInt32 inNumberFrames, 
                                  AudioBufferList *ioData) {

    AudioBufferList bufferList;
    bufferList.mNumberBuffers = 1;
    bufferList.mBuffers[0].mDataByteSize = 4;
    bufferList.mBuffers[0].mNumberChannels = 1;
    bufferList.mBuffers[0].mData = malloc(sizeof(float)*inNumberFrames); //
    InputAudio *input = (__bridge InputAudio*)inRefCon;

    OSStatus status;

    status = AudioUnitRender([input audioUnit], 
                             ioActionFlags, 
                             inTimeStamp, 
                             inBusNumber, 
                             inNumberFrames, 
                             &bufferList);

    float* result = (float*)&bufferList.mBuffers[0].mData;

    if (input->counter == 5) {
        for (int i = 0;i<inNumberFrames;i++) {
            printf("%f ",result[i]);
        }
    }
    input->counter++;
    return noErr;
}

任何人都遇到类似问题或在我的代码中看到明显错误的东西.在此先感谢您的帮助!

我将所有这些都从Michael Tysons Core Audio RemoteIO code开始

最佳答案 如果我没记错的话,你从回调中的音频缓冲区得到的样本不是浮点数,它们是SInt16.尝试像这样投射样本:

SInt16 *sn16AudioData= (SInt16 *)(bufferList.mBuffers[0].mData);

这些应该是最大值和最小值:

#define sn16_MAX_SAMPLE_VALUE 32767
#define sn16_MIN_SAMPLE_VALUE -32768
点赞