問題描述
這是我的代碼:我使用此代碼通過 Audio Unit 錄制 iPhone 輸出音頻然后將輸出保存在 output.caf 但 output.caf 文件為空任何人都知道我該怎么做?輸出音頻文件為空
這是初始化音頻單元
-(void) 初始化OutputUnit{OSStatus 狀態;//描述音頻組件AudioComponentDescription 描述;desc.componentType = kAudioUnitType_Output;desc.componentSubType = kAudioUnitSubType_RemoteIO;desc.componentFlags = 0;desc.componentFlagsMask = 0;desc.componentManufacturer = kAudioUnitManufacturer_Apple;//獲取組件AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);//獲取音頻單元狀態 = AudioComponentInstanceNew(inputComponent, &audioUnit);//啟用 IO 進行錄制UInt32 標志 = 1;狀態 = AudioUnitSetProperty(audioUnit,kAudioOutputUnitProperty_EnableIO,kAudioUnitScope_Input,kInputBus,&國旗,大小(標志));//啟用 IO 進行播放狀態 = AudioUnitSetProperty(audioUnit,kAudioOutputUnitProperty_EnableIO,kAudioUnitScope_Output,k輸出總線,&國旗,大小(標志));//描述格式AudioStreamBasicDescription audioFormat={0};音頻格式.mSampleRate = 44100.00;audioFormat.mFormatID = kAudioFormatLinearPCM;audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger |kAudioFormatFlagIsPacked;audioFormat.mFramesPerPacket = 1;audioFormat.mChannelsPerFrame = 1;audioFormat.mBitsPerChannel = 16;audioFormat.mBytesPerPacket = 2;audioFormat.mBytesPerFrame = 2;//應用格式狀態 = AudioUnitSetProperty(audioUnit,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Output,kInputBus,&音頻格式,大小(音頻格式));狀態 = AudioUnitSetProperty(audioUnit,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Input,k輸出總線,&音頻格式,大小(音頻格式));//設置輸入回調AURenderCallbackStruct 回調結構;callbackStruct.inputProc = 錄音回調;callbackStruct.inputProcRefCon = self;狀態 = AudioUnitSetProperty(audioUnit,kAudioOutputUnitProperty_SetInputCallback,kAudioUnitScope_Global,kInputBus,&回調結構,sizeof(callbackStruct));//設置輸出回調callbackStruct.inputProc = 播放回調;callbackStruct.inputProcRefCon = self;狀態 = AudioUnitSetProperty(audioUnit,kAudioUnitProperty_SetRenderCallback,kAudioUnitScope_Global,k輸出總線,&回調結構,sizeof(callbackStruct));//禁用記錄器的緩沖區分配(可選 - 如果我們想傳入我們自己的,請執行此操作)標志 = 0;狀態 = AudioUnitSetProperty(audioUnit,kAudioUnitProperty_ShouldAllocateBuffer,kAudioUnitScope_Output,kInputBus,&國旗,大小(標志));音頻單元初始化(音頻單元);音頻輸出單元開始(音頻單元);//初始化 le fichier 音頻NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);NSString *documentsDirectory = [paths objectAtIndex:0];NSString *destinationFilePath = [[[NSString alloc] initWithFormat: @"%@/output.caf", documentsDirectory] ??autorelease];NSLog(@">>>%@",destinationFilePath);CFURLRef 目的地URL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, (CFStringRef)destinationFilePath, kCFURLPOSIXPathStyle, false);OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileWAVEType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &effectState.audioFileRef);CFRelease(destinationURL);NSAssert(setupErr == noErr, @"無法創建寫入文件");setupErr = ExtAudioFileSetProperty(effectState.audioFileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &audioFormat);NSAssert(setupErr == noErr, @"無法為格式創建文件");setupErr = ExtAudioFileWriteAsync(effectState.audioFileRef, 0, NULL);NSAssert(setupErr == noErr, @"無法初始化音頻文件的寫入緩沖區");}
錄音回調
靜態 OSStatus recordingCallback (void * inRefCon,AudioUnitRenderActionFlags * ioActionFlags,const AudioTimeStamp * inTimeStamp,UInt32 inBusNumber,UInt32 inNumberFrames,音頻緩沖區列表 * ioData) {NSLog(@"回調");if (*ioActionFlags == kAudioUnitRenderAction_PostRender&&inBusNumber==0){AudioBufferList *bufferList;//<- 用緩沖區填充它(你會想要 malloc 它,因為它是一個動態長度列表)EffectState *effectState = (EffectState *)inRefCon;AudioUnit rioUnit =[(MixerHostAudio*)inRefCon getAudioUnit];OSStatus 狀態;NSLog(@"de5eal el 回調");//下面我得到了錯誤狀態 = AudioUnitRender(rioUnit,ioActionFlags,在時間戳,inBusNumber,inNumberFrames,緩沖區列表);if (noErr != status) { NSLog(@"AudioUnitRender error");返回無錯誤;}//現在,我們剛剛讀取的樣本位于 bufferList 的緩沖區中ExtAudioFileWriteAsync(effectState->audioFileRef, inNumberFrames, bufferList);}返回無錯誤;}//然后停止錄制-(無效)停止記錄{音頻輸出單元停止(音頻單元);AudioUnitUninitialize(audioUnit);}
在 initializaeOutputUnit 你只創建了你的音頻文件:
OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileWAVEType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &effectState.audioFileRef);
通過傳遞 0(幀)和 NULL(音頻緩沖區)僅用于初始化內部緩沖區:
setupErr = ExtAudioFileWriteAsync(effectState.audioFileRef, 0, NULL);
這就是 recordingCallback 的問題所在:
1) ioActionFlags 始終為 0,inBusNumber 始終為 1,因為這就是您設置回調的方式 (kInputBus = 1):
if (*ioActionFlags == kAudioUnitRenderAction_PostRender&&inBusNumber==0)
所以只需刪除 if 語句.
2) 從 AudioUnitRender 您將收到 -50 錯誤,它在 CoreAudioTypes.h 中定義為 kAudio_ParamError 錯誤.發生這種情況的原因是 bufferList 未定義且為 NULL!
<塊引用> OSStatus 狀態;狀態 = AudioUnitRender(這個->mAudioUnit,ioActionFlags,在時間戳,kInputBus,inNumberFrames,&緩沖區列表);if (noErr != status) {printf("AudioUnitRender 錯誤:%ld", status);返回無錯誤;}
您只需要定義一個有效的 AudioBuffer 并將其傳遞給 AudioUnitRender,這是我的工作 RenderCallback:
<塊引用> 靜態 OSStatus recordingCallback (void * inRefCon,AudioUnitRenderActionFlags * ioActionFlags,const AudioTimeStamp * inTimeStamp,UInt32 inBusNumber,UInt32 inNumberFrames,音頻緩沖區列表 * ioData) {雙倍 timeInSeconds = inTimeStamp->mSampleTime/kSampleRate;printf("
%fs inBusNumber: %lu inNumberFrames: %lu ", timeInSeconds, inBusNumber, inNumberFrames);//printAudioUnitRenderActionFlags(ioActionFlags);AudioBufferList 緩沖區列表;SInt16 個樣本[inNumberFrames];//足夠大的大小,不必擔心緩沖區溢出memset (&samples, 0, sizeof (samples));bufferList.mNumberBuffers = 1;bufferList.mBuffers[0].mData = 樣本;bufferList.mBuffers[0].mNumberChannels = 1;bufferList.mBuffers[0].mDataByteSize = inNumberFrames*sizeof(SInt16);ViewController* THIS = THIS = (__bridge ViewController *)inRefCon;OSStatus 狀態;狀態 = AudioUnitRender(這個->mAudioUnit,ioActionFlags,在時間戳,kInputBus,inNumberFrames,&緩沖區列表);if (noErr != status) {printf("AudioUnitRender 錯誤:%ld", status);返回無錯誤;}//現在,我們剛剛讀取的樣本位于 bufferList 的緩沖區中ExtAudioFileWriteAsync(THIS->mAudioFileRef, inNumberFrames, &bufferList);返回無錯誤;}
在 stopRecord 中,您應該使用 ExtAudioFileDispose 關閉音頻文件:
<塊引用> - (void)stopRecording:(NSTimer*)theTimer {printf("
停止錄制
");AudioOutputUnitStop(mAudioUnit);AudioUnitUninitialize(mAudioUnit);OSStatus 狀態 = ExtAudioFileDispose(mAudioFileRef);printf("OSStatus(ExtAudioFileDispose): %ld
", status);}
完整源代碼:http://pastebin.com/92Fyjaye
this is my code : i use this code to record the iPhone output audio by using Audio Unit then saving the output in output.caf but the output.caf file is empty any body have idea about what shall i do ? the output audio file is empty
this is intializing the audio unit
-(void) initializaeOutputUnit
{
OSStatus status;
// Describe audio component
AudioComponentDescription desc;
desc.componentType = kAudioUnitType_Output;
desc.componentSubType = kAudioUnitSubType_RemoteIO;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
// Get component
AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);
// Get audio units
status = AudioComponentInstanceNew(inputComponent, &audioUnit);
// Enable IO for recording
UInt32 flag = 1;
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&flag,
sizeof(flag));
// Enable IO for playback
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
kOutputBus,
&flag,
sizeof(flag));
// Describe format
AudioStreamBasicDescription audioFormat={0};
audioFormat.mSampleRate = 44100.00;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
// Apply format
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
kInputBus,
&audioFormat,
sizeof(audioFormat));
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
kOutputBus,
&audioFormat,
sizeof(audioFormat));
// Set input callback
AURenderCallbackStruct callbackStruct;
callbackStruct.inputProc = recordingCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty(audioUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global,
kInputBus,
&callbackStruct,
sizeof(callbackStruct));
// Set output callback
callbackStruct.inputProc = playbackCallback;
callbackStruct.inputProcRefCon = self;
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Global,
kOutputBus,
&callbackStruct,
sizeof(callbackStruct));
// Disable buffer allocation for the recorder (optional - do this if we want to pass in our own)
flag = 0;
status = AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_ShouldAllocateBuffer,
kAudioUnitScope_Output,
kInputBus,
&flag,
sizeof(flag));
AudioUnitInitialize(audioUnit);
AudioOutputUnitStart(audioUnit);
// On initialise le fichier audio
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
NSString *documentsDirectory = [paths objectAtIndex:0];
NSString *destinationFilePath = [[[NSString alloc] initWithFormat: @"%@/output.caf", documentsDirectory] autorelease];
NSLog(@">>> %@", destinationFilePath);
CFURLRef destinationURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, (CFStringRef)destinationFilePath, kCFURLPOSIXPathStyle, false);
OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileWAVEType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &effectState.audioFileRef);
CFRelease(destinationURL);
NSAssert(setupErr == noErr, @"Couldn't create file for writing");
setupErr = ExtAudioFileSetProperty(effectState.audioFileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &audioFormat);
NSAssert(setupErr == noErr, @"Couldn't create file for format");
setupErr = ExtAudioFileWriteAsync(effectState.audioFileRef, 0, NULL);
NSAssert(setupErr == noErr, @"Couldn't initialize write buffers for audio file");
}
the recording call back
static OSStatus recordingCallback (void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * ioData) {
NSLog(@"callback");
if (*ioActionFlags == kAudioUnitRenderAction_PostRender&&inBusNumber==0)
{
AudioBufferList *bufferList; // <- Fill this up with buffers (you will want to malloc it, as it's a dynamic-length list)
EffectState *effectState = (EffectState *)inRefCon;
AudioUnit rioUnit =[(MixerHostAudio*)inRefCon getAudioUnit];
OSStatus status;
NSLog(@"de5eal el call back ");
// BELOW I GET THE ERROR
status = AudioUnitRender( rioUnit,
ioActionFlags,
inTimeStamp,
inBusNumber,
inNumberFrames,
bufferList);
if (noErr != status) { NSLog(@"AudioUnitRender error"); return noErr;}
// Now, we have the samples we just read sitting in buffers in bufferList
ExtAudioFileWriteAsync(effectState->audioFileRef, inNumberFrames, bufferList);
}
return noErr;
}
// then stop Recording
- (void) stopRecord
{
AudioOutputUnitStop(audioUnit);
AudioUnitUninitialize(audioUnit);
}
In initializaeOutputUnit you only created your audio file:
OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileWAVEType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &effectState.audioFileRef);
by passing 0 (frames) and NULL (audiobuffer) is just for init internal buffers:
setupErr = ExtAudioFileWriteAsync(effectState.audioFileRef, 0, NULL);
That's what's going wrong in recordingCallback:
1) ioActionFlags are always 0 and inBusNumber are always 1, because thats how you setup your callback (kInputBus = 1):
if (*ioActionFlags == kAudioUnitRenderAction_PostRender&&inBusNumber==0)
so just remove the if statement.
2) From AudioUnitRender you will receive -50 error, which is defined in CoreAudioTypes.h as an kAudio_ParamError error. This happens by bufferList is not defined and NULL!
OSStatus status; status = AudioUnitRender(THIS->mAudioUnit, ioActionFlags, inTimeStamp, kInputBus, inNumberFrames, &bufferList); if (noErr != status) { printf("AudioUnitRender error: %ld", status); return noErr; }
You just need to define an valid AudioBuffer and pass it to AudioUnitRender, this is my working RenderCallback:
static OSStatus recordingCallback (void * inRefCon, AudioUnitRenderActionFlags * ioActionFlags, const AudioTimeStamp * inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames, AudioBufferList * ioData) { double timeInSeconds = inTimeStamp->mSampleTime / kSampleRate; printf(" %fs inBusNumber: %lu inNumberFrames: %lu ", timeInSeconds, inBusNumber, inNumberFrames); //printAudioUnitRenderActionFlags(ioActionFlags); AudioBufferList bufferList; SInt16 samples[inNumberFrames]; // A large enough size to not have to worry about buffer overrun memset (&samples, 0, sizeof (samples)); bufferList.mNumberBuffers = 1; bufferList.mBuffers[0].mData = samples; bufferList.mBuffers[0].mNumberChannels = 1; bufferList.mBuffers[0].mDataByteSize = inNumberFrames*sizeof(SInt16); ViewController* THIS = THIS = (__bridge ViewController *)inRefCon; OSStatus status; status = AudioUnitRender(THIS->mAudioUnit, ioActionFlags, inTimeStamp, kInputBus, inNumberFrames, &bufferList); if (noErr != status) { printf("AudioUnitRender error: %ld", status); return noErr; } // Now, we have the samples we just read sitting in buffers in bufferList ExtAudioFileWriteAsync(THIS->mAudioFileRef, inNumberFrames, &bufferList); return noErr; }
In stopRecord you should close the audio file with ExtAudioFileDispose:
- (void)stopRecording:(NSTimer*)theTimer { printf(" stopRecording "); AudioOutputUnitStop(mAudioUnit); AudioUnitUninitialize(mAudioUnit); OSStatus status = ExtAudioFileDispose(mAudioFileRef); printf("OSStatus(ExtAudioFileDispose): %ld ", status); }
Full source code: http://pastebin.com/92Fyjaye
這篇關于誰能幫我通過音頻單元錄制 iPhone 輸出的聲音的文章就介紹到這了,希望我們推薦的答案對大家有所幫助,也希望大家多多支持html5模板網!