久久久久久久av_日韩在线中文_看一级毛片视频_日本精品二区_成人深夜福利视频_武道仙尊动漫在线观看

誰能幫我通過音頻單元錄制 iPhone 輸出的聲音

Can anybody help me in recording iPhone output sound through Audio Unit(誰能幫我通過音頻單元錄制 iPhone 輸出的聲音)
本文介紹了誰能幫我通過音頻單元錄制 iPhone 輸出的聲音的處理方法,對大家解決問題具有一定的參考價值,需要的朋友們下面隨著小編來一起學習吧!

問題描述

限時送ChatGPT賬號..

這是我的代碼:我使用此代碼通過 Audio Unit 錄制 iPhone 輸出音頻然后將輸出保存在 output.caf 但 output.caf 文件為空任何人都知道我該怎么做?輸出音頻文件為空

這是初始化音頻單元

-(void) 初始化OutputUnit{OSStatus 狀態;//描述音頻組件AudioComponentDescription 描述;desc.componentType = kAudioUnitType_Output;desc.componentSubType = kAudioUnitSubType_RemoteIO;desc.componentFlags = 0;desc.componentFlagsMask = 0;desc.componentManufacturer = kAudioUnitManufacturer_Apple;//獲取組件AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);//獲取音頻單元狀態 = AudioComponentInstanceNew(inputComponent, &audioUnit);//啟用 IO 進行錄制UInt32 標志 = 1;狀態 = AudioUnitSetProperty(audioUnit,kAudioOutputUnitProperty_EnableIO,kAudioUnitScope_Input,kInputBus,&國旗,大小(標志));//啟用 IO 進行播放狀態 = AudioUnitSetProperty(audioUnit,kAudioOutputUnitProperty_EnableIO,kAudioUnitScope_Output,k輸出總線,&國旗,大小(標志));//描述格式AudioStreamBasicDescription audioFormat={0};音頻格式.mSampleRate = 44100.00;audioFormat.mFormatID = kAudioFormatLinearPCM;audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger |kAudioFormatFlagIsPacked;audioFormat.mFramesPerPacket = 1;audioFormat.mChannelsPerFrame = 1;audioFormat.mBitsPerChannel = 16;audioFormat.mBytesPerPacket = 2;audioFormat.mBytesPerFrame = 2;//應用格式狀態 = AudioUnitSetProperty(audioUnit,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Output,kInputBus,&音頻格式,大小(音頻格式));狀態 = AudioUnitSetProperty(audioUnit,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Input,k輸出總線,&音頻格式,大小(音頻格式));//設置輸入回調AURenderCallbackStruct 回調結構;callbackStruct.inputProc = 錄音回調;callbackStruct.inputProcRefCon = self;狀態 = AudioUnitSetProperty(audioUnit,kAudioOutputUnitProperty_SetInputCallback,kAudioUnitScope_Global,kInputBus,&回調結構,sizeof(callbackStruct));//設置輸出回調callbackStruct.inputProc = 播放回調;callbackStruct.inputProcRefCon = self;狀態 = AudioUnitSetProperty(audioUnit,kAudioUnitProperty_SetRenderCallback,kAudioUnitScope_Global,k輸出總線,&回調結構,sizeof(callbackStruct));//禁用記錄器的緩沖區分配(可選 - 如果我們想傳入我們自己的,請執行此操作)標志 = 0;狀態 = AudioUnitSetProperty(audioUnit,kAudioUnitProperty_ShouldAllocateBuffer,kAudioUnitScope_Output,kInputBus,&國旗,大小(標志));音頻單元初始化(音頻單元);音頻輸出單元開始(音頻單元);//初始化 le fichier 音頻NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);NSString *documentsDirectory = [paths objectAtIndex:0];NSString *destinationFilePath = [[[NSString alloc] initWithFormat: @"%@/output.caf", documentsDirectory] ??autorelease];NSLog(@">>>%@",destinationFilePath);CFURLRef 目的地URL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, (CFStringRef)destinationFilePath, kCFURLPOSIXPathStyle, false);OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileWAVEType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &effectState.audioFileRef);CFRelease(destinationURL);NSAssert(setupErr == noErr, @"無法創建寫入文件");setupErr = ExtAudioFileSetProperty(effectState.audioFileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &audioFormat);NSAssert(setupErr == noErr, @"無法為格式創建文件");setupErr = ExtAudioFileWriteAsync(effectState.audioFileRef, 0, NULL);NSAssert(setupErr == noErr, @"無法初始化音頻文件的寫入緩沖區");}

錄音回調

靜態 OSStatus recordingCallback (void * inRefCon,AudioUnitRenderActionFlags * ioActionFlags,const AudioTimeStamp * inTimeStamp,UInt32 inBusNumber,UInt32 inNumberFrames,音頻緩沖區列表 * ioData) {NSLog(@"回調");if (*ioActionFlags == kAudioUnitRenderAction_PostRender&&inBusNumber==0){AudioBufferList *bufferList;//<- 用緩沖區填充它(你會想要 malloc 它,因為它是一個動態長度列表)EffectState *effectState = (EffectState *)inRefCon;AudioUnit rioUnit =[(MixerHostAudio*)inRefCon getAudioUnit];OSStatus 狀態;NSLog(@"de5eal el 回調");//下面我得到了錯誤狀態 = AudioUnitRender(rioUnit,ioActionFlags,在時間戳,inBusNumber,inNumberFrames,緩沖區列表);if (noErr != status) { NSLog(@"AudioUnitRender error");返回無錯誤;}//現在,我們剛剛讀取的樣本位于 bufferList 的緩沖區中ExtAudioFileWriteAsync(effectState->audioFileRef, inNumberFrames, bufferList);}返回無錯誤;}//然后停止錄制-(無效)停止記錄{音頻輸出單元停止(音頻單元);AudioUnitUninitialize(audioUnit);}

解決方案

initializaeOutputUnit 你只創建了你的音頻文件:

OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileWAVEType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &effectState.audioFileRef);

通過傳遞 0(幀)和 NULL(音頻緩沖區)僅用于初始化內部緩沖區:

setupErr = ExtAudioFileWriteAsync(effectState.audioFileRef, 0, NULL);

這就是 recordingCallback 的問題所在:

1) ioActionFlags 始終為 0,inBusNumber 始終為 1,因為這就是您設置回調的方式 (kInputBus = 1):

if (*ioActionFlags == kAudioUnitRenderAction_PostRender&&inBusNumber==0)

所以只需刪除 if 語句.

2) 從 AudioUnitRender 您將收到 -50 錯誤,它在 CoreAudioTypes.h 中定義為 kAudio_ParamError 錯誤.發生這種情況的原因是 bufferList 未定義且為 NULL!

<塊引用>

 OSStatus 狀態;狀態 = AudioUnitRender(這個->mAudioUnit,ioActionFlags,在時間戳,kInputBus,inNumberFrames,&緩沖區列表);if (noErr != status) {printf("AudioUnitRender 錯誤:%ld", status);返回無錯誤;}

您只需要定義一個有效的 AudioBuffer 并將其傳遞給 AudioUnitRender,這是我的工作 RenderCallback:

<塊引用>

 靜態 OSStatus recordingCallback (void * inRefCon,AudioUnitRenderActionFlags * ioActionFlags,const AudioTimeStamp * inTimeStamp,UInt32 inBusNumber,UInt32 inNumberFrames,音頻緩沖區列表 * ioData) {雙倍 timeInSeconds = inTimeStamp->mSampleTime/kSampleRate;printf("
%fs inBusNumber: %lu inNumberFrames: %lu ", timeInSeconds, inBusNumber, inNumberFrames);//printAudioUnitRenderActionFlags(ioActionFlags);AudioBufferList 緩沖區列表;SInt16 個樣本[inNumberFrames];//足夠大的大小,不必擔心緩沖區溢出memset (&samples, 0, sizeof (samples));bufferList.mNumberBuffers = 1;bufferList.mBuffers[0].mData = 樣本;bufferList.mBuffers[0].mNumberChannels = 1;bufferList.mBuffers[0].mDataByteSize = inNumberFrames*sizeof(SInt16);ViewController* THIS = THIS = (__bridge ViewController *)inRefCon;OSStatus 狀態;狀態 = AudioUnitRender(這個->mAudioUnit,ioActionFlags,在時間戳,kInputBus,inNumberFrames,&緩沖區列表);if (noErr != status) {printf("AudioUnitRender 錯誤:%ld", status);返回無錯誤;}//現在,我們剛剛讀取的樣本位于 bufferList 的緩沖區中ExtAudioFileWriteAsync(THIS->mAudioFileRef, inNumberFrames, &bufferList);返回無錯誤;}

stopRecord 中,您應該使用 ExtAudioFileDispose 關閉音頻文件:

<塊引用>

 - (void)stopRecording:(NSTimer*)theTimer {printf("
停止錄制
");AudioOutputUnitStop(mAudioUnit);AudioUnitUninitialize(mAudioUnit);OSStatus 狀態 = ExtAudioFileDispose(mAudioFileRef);printf("OSStatus(ExtAudioFileDispose): %ld
", status);}

完整源代碼:http://pastebin.com/92Fyjaye

this is my code : i use this code to record the iPhone output audio by using Audio Unit then saving the output in output.caf but the output.caf file is empty any body have idea about what shall i do ? the output audio file is empty

this is intializing the audio unit

-(void) initializaeOutputUnit
{
    OSStatus status;

    // Describe audio component
    AudioComponentDescription desc;
    desc.componentType = kAudioUnitType_Output;
    desc.componentSubType = kAudioUnitSubType_RemoteIO;
    desc.componentFlags = 0;
    desc.componentFlagsMask = 0;
    desc.componentManufacturer = kAudioUnitManufacturer_Apple;

    // Get component
    AudioComponent inputComponent = AudioComponentFindNext(NULL, &desc);

    // Get audio units
    status = AudioComponentInstanceNew(inputComponent, &audioUnit);

    // Enable IO for recording
    UInt32 flag = 1;
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioOutputUnitProperty_EnableIO, 
                                  kAudioUnitScope_Input, 
                                  kInputBus,
                                  &flag, 
                                  sizeof(flag));

    // Enable IO for playback
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioOutputUnitProperty_EnableIO, 
                                  kAudioUnitScope_Output, 
                                  kOutputBus,
                                  &flag, 
                                  sizeof(flag));

    // Describe format
    AudioStreamBasicDescription audioFormat={0};
    audioFormat.mSampleRate         = 44100.00;
    audioFormat.mFormatID           = kAudioFormatLinearPCM;
    audioFormat.mFormatFlags        = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    audioFormat.mFramesPerPacket    = 1;
    audioFormat.mChannelsPerFrame   = 1;
    audioFormat.mBitsPerChannel     = 16;
    audioFormat.mBytesPerPacket     = 2;
    audioFormat.mBytesPerFrame      = 2;

    // Apply format
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioUnitProperty_StreamFormat, 
                                  kAudioUnitScope_Output, 
                                  kInputBus, 
                                  &audioFormat, 
                                  sizeof(audioFormat));
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioUnitProperty_StreamFormat, 
                                  kAudioUnitScope_Input, 
                                  kOutputBus, 
                                  &audioFormat, 
                                  sizeof(audioFormat));


    // Set input callback
    AURenderCallbackStruct callbackStruct;
    callbackStruct.inputProc = recordingCallback;
    callbackStruct.inputProcRefCon = self;
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioOutputUnitProperty_SetInputCallback, 
                                  kAudioUnitScope_Global, 
                                  kInputBus, 
                                  &callbackStruct, 
                                  sizeof(callbackStruct));

    // Set output callback
    callbackStruct.inputProc = playbackCallback;
    callbackStruct.inputProcRefCon = self;
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioUnitProperty_SetRenderCallback, 
                                  kAudioUnitScope_Global, 
                                  kOutputBus,
                                  &callbackStruct, 
                                  sizeof(callbackStruct));

    // Disable buffer allocation for the recorder (optional - do this if we want to pass in our own)
    flag = 0;
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioUnitProperty_ShouldAllocateBuffer,
                                  kAudioUnitScope_Output, 
                                  kInputBus,
                                  &flag, 
                                  sizeof(flag));


    AudioUnitInitialize(audioUnit);
    AudioOutputUnitStart(audioUnit);


    // On initialise le fichier audio
    NSArray  *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
    NSString *documentsDirectory = [paths objectAtIndex:0];
    NSString *destinationFilePath = [[[NSString alloc] initWithFormat: @"%@/output.caf", documentsDirectory] autorelease];
    NSLog(@">>> %@", destinationFilePath);
    CFURLRef destinationURL = CFURLCreateWithFileSystemPath(kCFAllocatorDefault, (CFStringRef)destinationFilePath, kCFURLPOSIXPathStyle, false);

    OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileWAVEType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &effectState.audioFileRef);  
    CFRelease(destinationURL);
    NSAssert(setupErr == noErr, @"Couldn't create file for writing");

    setupErr = ExtAudioFileSetProperty(effectState.audioFileRef, kExtAudioFileProperty_ClientDataFormat, sizeof(AudioStreamBasicDescription), &audioFormat);
    NSAssert(setupErr == noErr, @"Couldn't create file for format");

    setupErr =  ExtAudioFileWriteAsync(effectState.audioFileRef, 0, NULL);
    NSAssert(setupErr == noErr, @"Couldn't initialize write buffers for audio file");

   }

the recording call back

static OSStatus recordingCallback       (void *                         inRefCon,
                                         AudioUnitRenderActionFlags *      ioActionFlags,
                                         const AudioTimeStamp *            inTimeStamp,
                                         UInt32                            inBusNumber,
                                         UInt32                            inNumberFrames,
                                         AudioBufferList *                 ioData) {
    NSLog(@"callback");
   if (*ioActionFlags == kAudioUnitRenderAction_PostRender&&inBusNumber==0) 
   {
        AudioBufferList *bufferList; // <- Fill this up with buffers (you will want to malloc it, as it's a dynamic-length list)

        EffectState *effectState = (EffectState *)inRefCon;
       AudioUnit rioUnit =[(MixerHostAudio*)inRefCon getAudioUnit];

        OSStatus status;
        NSLog(@"de5eal el call back ");
        // BELOW I GET THE ERROR
        status = AudioUnitRender( rioUnit,     
                                 ioActionFlags, 
                                 inTimeStamp, 
                                 inBusNumber, 
                                 inNumberFrames, 
                                 bufferList);

        if (noErr != status) { NSLog(@"AudioUnitRender error"); return noErr;}

        // Now, we have the samples we just read sitting in buffers in bufferList
        ExtAudioFileWriteAsync(effectState->audioFileRef, inNumberFrames, bufferList);

    }
    return noErr;     
}




// then stop Recording 
- (void) stopRecord
{

    AudioOutputUnitStop(audioUnit);
    AudioUnitUninitialize(audioUnit);
}

解決方案

In initializaeOutputUnit you only created your audio file:

OSStatus setupErr = ExtAudioFileCreateWithURL(destinationURL, kAudioFileWAVEType, &audioFormat, NULL, kAudioFileFlags_EraseFile, &effectState.audioFileRef);

by passing 0 (frames) and NULL (audiobuffer) is just for init internal buffers:

setupErr =  ExtAudioFileWriteAsync(effectState.audioFileRef, 0, NULL);

That's what's going wrong in recordingCallback:

1) ioActionFlags are always 0 and inBusNumber are always 1, because thats how you setup your callback (kInputBus = 1):

if (*ioActionFlags == kAudioUnitRenderAction_PostRender&&inBusNumber==0)

so just remove the if statement.

2) From AudioUnitRender you will receive -50 error, which is defined in CoreAudioTypes.h as an kAudio_ParamError error. This happens by bufferList is not defined and NULL!

 OSStatus status; 
 status = AudioUnitRender(THIS->mAudioUnit,     
                          ioActionFlags, 
                          inTimeStamp, 
                          kInputBus, 
                          inNumberFrames, 
                          &bufferList);

 if (noErr != status) {
      printf("AudioUnitRender error: %ld", status);   
      return noErr; 
 }

You just need to define an valid AudioBuffer and pass it to AudioUnitRender, this is my working RenderCallback:

  static OSStatus recordingCallback       (void *                      inRefCon,
                                           AudioUnitRenderActionFlags *      ioActionFlags,
                                           const AudioTimeStamp *            inTimeStamp,
                                           UInt32                            inBusNumber,
                                           UInt32                            inNumberFrames,
                                           AudioBufferList *                 ioData)  {
      double timeInSeconds = inTimeStamp->mSampleTime / kSampleRate;
      printf("
%fs inBusNumber: %lu inNumberFrames: %lu ", timeInSeconds, inBusNumber, inNumberFrames);
      //printAudioUnitRenderActionFlags(ioActionFlags);

      AudioBufferList bufferList;

      SInt16 samples[inNumberFrames]; // A large enough size to not have to worry about buffer overrun
      memset (&samples, 0, sizeof (samples));

      bufferList.mNumberBuffers = 1;
      bufferList.mBuffers[0].mData = samples;
      bufferList.mBuffers[0].mNumberChannels = 1;
      bufferList.mBuffers[0].mDataByteSize = inNumberFrames*sizeof(SInt16);

      ViewController* THIS = THIS = (__bridge ViewController *)inRefCon;

      OSStatus status;
      status = AudioUnitRender(THIS->mAudioUnit,     
                               ioActionFlags, 
                               inTimeStamp, 
                               kInputBus, 
                               inNumberFrames, 
                               &bufferList);

      if (noErr != status) {

          printf("AudioUnitRender error: %ld", status); 
          return noErr;
      }

      // Now, we have the samples we just read sitting in buffers in bufferList
      ExtAudioFileWriteAsync(THIS->mAudioFileRef, inNumberFrames, &bufferList);

      return noErr;      
 }

In stopRecord you should close the audio file with ExtAudioFileDispose:

  - (void)stopRecording:(NSTimer*)theTimer {
      printf("
stopRecording
");
      AudioOutputUnitStop(mAudioUnit);
      AudioUnitUninitialize(mAudioUnit);

      OSStatus status = ExtAudioFileDispose(mAudioFileRef);
      printf("OSStatus(ExtAudioFileDispose): %ld
", status); 
 }

Full source code: http://pastebin.com/92Fyjaye

這篇關于誰能幫我通過音頻單元錄制 iPhone 輸出的聲音的文章就介紹到這了,希望我們推薦的答案對大家有所幫助,也希望大家多多支持html5模板網!

【網站聲明】本站部分內容來源于互聯網,旨在幫助大家更快的解決問題,如果有圖片或者內容侵犯了您的權益,請聯系我們刪除處理,感謝您的支持!

相關文檔推薦

Can#39;t change target membership visibility in Xcode 4.5(無法更改 Xcode 4.5 中的目標成員身份可見性)
UITableView: Handle cell selection in a mixed cell table view static and dynamic cells(UITableView:在混合單元格表視圖靜態和動態單元格中處理單元格選擇)
How to remove Address Bar in Safari in iOS?(如何在 iOS 中刪除 Safari 中的地址欄?)
iOS 5 SDK is gone after upgrade to Xcode 4.5(升級到 Xcode 4.5 后,iOS 5 SDK 消失了)
Having trouble creating UIImage from CIImage in iOS5(在 iOS5 中從 CIImage 創建 UIImage 時遇到問題)
Open target=quot;_blankquot; links outside of UIWebView in Safari(打開目標=“_blank;Safari 中 UIWebView 之外的鏈接)
主站蜘蛛池模板: av免费网| 97久久超碰| 黄色成人在线观看 | 国产一区二区中文字幕 | 欧美成人精品一区 | 一区二区国产视频 | 日韩网站在线观看 | 欧美日韩免费在线观看 | 毛片av在线 | 伊人久久综合 | 日韩网站在线观看 | 狠狠干狠狠干 | 国产精品区二区三区日本 | 精品婷婷 | 亚洲天堂色图 | 亚洲在线一区二区 | 日韩一区不卡 | 欧美日韩在线一区 | 久久av一区二区 | 黄在线观看 | 日韩中文一区 | 国产成人精品毛片 | 99热最新| 911看片| 天天色网站 | 91看片淫黄大片 | 97视频免费 | 日韩看片| 国产永久免费视频 | 在线欧美日韩 | 亚洲欧美综合另类 | 亚洲成人天堂 | 久久久精品国产sm调教网站 | 日本黄色视屏 | 黄色三级视频网站 | 久久国产欧美 | 97视频在线| 特大黑人巨交吊性xxxx视频 | 激情小说五月天 | 精品在线一区二区 | 精品黄色片 |