Vous devez modifier certains codes dans myInputBufferHandler, j'avais créé un objet obj-c pour adopter le code cpp de l'échantillon Apple SpeakHere.
N'hésitez pas à l'utiliser : MIP_StreamAudioRecorder.h
//
// MIP_StreamAudioRecorder.h
//
// Created by Dennies Chang on 12/10/3.
// Copyright (c) 2012年 Dennies Chang. All rights reserved.
//
#import <Foundation/Foundation.h>
#include <AudioToolbox/AudioToolbox.h>
#include <Foundation/Foundation.h>
#include <libkern/OSAtomic.h>
#include "CAStreamBasicDescription.h"
#include "CAXException.h"
#define kNumberRecordBuffers 3
@protocol MIP_StreamAudioRecorderDelegate;
@interface MIP_StreamAudioRecorder : NSObject {
CAStreamBasicDescription mRecordFormat;
AudioQueueRef mQueue;
AudioQueueBufferRef mBuffers[kNumberRecordBuffers];
BOOL mIsRunning;
id <MIP_StreamAudioRecorderDelegate> delegate;
}
@property (nonatomic, assign) id <MIP_StreamAudioRecorderDelegate> delegate;
@property (nonatomic, readonly) BOOL mIsRunning;
- (void)SetupAudioFormat:(UInt32) inFormatID;
- (void)startRecord;
- (void)stopRecord;
- (int)computeRecordBufferSize:(AudioStreamBasicDescription *)format duration:(float)second;
@end
@protocol MIP_StreamAudioRecorderDelegate <NSObject>
@optional
- (void)gotAudioData:(NSData *)audioData;
@end
Et le fichier .mm : MIP_StreamAudioRecorder.mm
//
// MIP_StreamAudioRecorder.mm
//
// Created by Dennies Chang on 12/10/3.
// Copyright (c) 2012年 Dennies Chang. All rights reserved.
//
#import "MIP_StreamAudioRecorder.h"
@implementation MIP_StreamAudioRecorder
@synthesize delegate;
@synthesize mIsRunning;
- (id)init {
self = [super init];
return self;
}
- (void)dealloc {
[super dealloc];
}
- (void)SetupAudioFormat:(UInt32) inFormatID {
memset(&mRecordFormat, 0, sizeof(mRecordFormat));
UInt32 size = sizeof(mRecordFormat.mSampleRate);
XThrowIfError(AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareSampleRate,
&size,
&mRecordFormat.mSampleRate), "couldn't get hardware sample rate");
size = sizeof(mRecordFormat.mChannelsPerFrame);
XThrowIfError(AudioSessionGetProperty( kAudioSessionProperty_CurrentHardwareInputNumberChannels,
&size,
&mRecordFormat.mChannelsPerFrame), "couldn't get input channel count");
mRecordFormat.mFormatID = inFormatID;
if (inFormatID == kAudioFormatLinearPCM)
{
// if we want pcm, default to signed 16-bit little-endian
mRecordFormat.mChannelsPerFrame = 1;
mRecordFormat.mSampleRate = 8000;
mRecordFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
mRecordFormat.mBitsPerChannel = 16;
mRecordFormat.mBytesPerPacket = mRecordFormat.mBytesPerFrame = (mRecordFormat.mBitsPerChannel / 8) * mRecordFormat.mChannelsPerFrame;
mRecordFormat.mFramesPerPacket = 1;
}
}
- (int)computeRecordBufferSize:(AudioStreamBasicDescription *)format duration:(float)second {
int packets, frames, bytes = 0;
try {
frames = (int)ceil(second * format->mSampleRate);
if (format->mBytesPerFrame > 0)
bytes = frames * format->mBytesPerFrame;
else {
UInt32 maxPacketSize;
if (format->mBytesPerPacket > 0)
maxPacketSize = format->mBytesPerPacket; // constant packet size
else {
UInt32 propertySize = sizeof(maxPacketSize);
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_MaximumOutputPacketSize, &maxPacketSize,
&propertySize), "couldn't get queue's maximum output packet size");
}
if (format->mFramesPerPacket > 0)
packets = frames / format->mFramesPerPacket;
else
packets = frames; // worst-case scenario: 1 frame in a packet
if (packets == 0) // sanity check
packets = 1;
bytes = packets * maxPacketSize;
}
} catch (CAXException e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
return 0;
}
return bytes;
}
/*
- (void)myInputBufferHandler:(id)inUserData AudioQueue:(AudioQueueRef) inAQ BufferRef:(AudioQueueBufferRef)inBuffer withAudioTS:(AudioTimeStamp *)inStartTime andNumPackets:(UInt32)inNumPackets andDescription:(AudioStreamPacketDescription *)inPacketDesc {
*/
void MyInputBufferHandler( void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription* inPacketDesc)
{
MIP_StreamAudioRecorder *THIS = (MIP_StreamAudioRecorder *)inUserData;
try {
if (inNumPackets > 0) {
//use delegate to handle;
if (THIS.delegate) {
NSMutableData *data = [[NSMutableData alloc] init];
if ([THIS.delegate respondsToSelector:@selector(gotAudioData:)]) {
[data appendBytes:inBuffer->mAudioData length:inBuffer->mAudioDataByteSize];
[THIS.delegate gotAudioData:data];
}
[data release];
}
/*
// write packets to file
XThrowIfError(AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize,
inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData),
"AudioFileWritePackets failed");
aqr->mRecordPacket += inNumPackets;
*/
}
// if we're not stopping, re-enqueue the buffe so that it gets filled again
if (THIS->mIsRunning)
XThrowIfError(AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
} catch (CAXException e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
}
- (void)startRecord {
int i, bufferByteSize;
try {
[self SetupAudioFormat:kAudioFormatLinearPCM];
// create the queue
XThrowIfError(AudioQueueNewInput(
&mRecordFormat,
MyInputBufferHandler,
self /* userData */,
NULL /* run loop */, NULL /* run loop mode */,
0 /* flags */, &mQueue), "AudioQueueNewInput failed");
// get the record format back from the queue's audio converter --
// the file may require a more specific stream description than was necessary to create the encoder.
UInt32 size = sizeof(mRecordFormat);
XThrowIfError(AudioQueueGetProperty(mQueue, kAudioQueueProperty_StreamDescription,
&mRecordFormat, &size), "couldn't get queue's format");
// allocate and enqueue buffers
bufferByteSize = [self computeRecordBufferSize:&mRecordFormat duration:kBufferDurationSeconds]; // enough bytes for half a second
for (i = 0; i < kNumberRecordBuffers; ++i) {
XThrowIfError(AudioQueueAllocateBuffer(mQueue, bufferByteSize, &mBuffers[i]),
"AudioQueueAllocateBuffer failed");
XThrowIfError(AudioQueueEnqueueBuffer(mQueue, mBuffers[i], 0, NULL),
"AudioQueueEnqueueBuffer failed");
}
// start the queue
mIsRunning = true;
XThrowIfError(AudioQueueStart(mQueue, NULL), "AudioQueueStart failed");
}
catch (CAXException &e) {
char buf[256];
fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
catch (...) {
fprintf(stderr, "An unknown error occurred\n");
}
}
- (void)stopRecord {
XThrowIfError(AudioQueueStop(mQueue, true), "AudioQueueStop failed");
AudioQueueDispose(mQueue, true);
}
@end
Veuillez vous informer, vous devez changer le sampleRate et la condition relative, Je l'ai réglé en mono (1 canal), 16 bit, 8Khz pour enregistrer.
Et vous pouvez obtenir les données brutes dans le code obj-c qui implémente MIP_StreamAudioRecorderDelegate, vous pouvez envoyer les données brutes avec le canal internet, ou les enregistrer dans un fichier.
Meilleur regard, Dennies.