title: iOS音頻編程之實時語音通信
date: 2016-07-14
tags: AAC Converter,Audio Queue,Audio Unit,MultipeerConnectivity
博客地址
iOS音頻編程之實時語音通信
需求:手機通過Mic采集PCM編碼的原始音頻數據,將PCM轉換為AAC編碼格式,通過MultipeerConnectivity框架連接手機并發送AAC數據,在接收端使用Audio Queue播放收到的AAC音頻
音頻設置
對音頻以44.1KHZ的采樣率來采樣,以64000的比特率對PCM進行AAC轉碼
1)對AVAudioSession的設置
NSError *error;
self.session = [AVAudioSession sharedInstance];
[self.session setCategory:AVAudioSessionCategoryPlayAndRecord error:&error];
handleError(error);
//route變化監聽
[[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(audioSessionRouteChangeHandle:) name:AVAudioSessionRouteChangeNotification object:self.session];
[self.session setPreferredIOBufferDuration:0.005 error:&error];
handleError(error);
[self.session setPreferredSampleRate:kSmaple error:&error];
handleError(error);
//[self.session overrideOutputAudioPort:AVAudioSessionPortOverrideSpeaker error:&error];
//handleError(error);
[self.session setActive:YES error:&error];
handleError(error);
-(void)audioSessionRouteChangeHandle:(NSNotification *)noti{
// NSError *error;
// [self.session overrideOutputAudioPort:AVAudioSessionPortOverrideSpeaker error:&error];
// handleError(error);
[self.session setActive:YES error:nil];
if (self.startRecord) {
CheckError(AudioOutputUnitStart(_toneUnit), "couldnt start audio unit");
}
}
音頻輸入輸出路徑改變會觸發audioSessionRouteChangeHandle
,如果想一直讓音頻從手機的揚聲器輸出需要在每次Route改變時,把音頻輸出重定向到AVAudioSessionPortOverrideSpeaker
,否則為手機聽筒輸出音頻;其他設置說明請參照iOS音頻編程之變聲處理的初始化部分
2)對Audio Unit的設置
AudioComponentDescription acd;
acd.componentType = kAudioUnitType_Output;
acd.componentSubType = kAudioUnitSubType_RemoteIO;
acd.componentFlags = 0;
acd.componentFlagsMask = 0;
acd.componentManufacturer = kAudioUnitManufacturer_Apple;
AudioComponent inputComponent = AudioComponentFindNext(NULL, &acd);
AudioComponentInstanceNew(inputComponent, &_toneUnit);
UInt32 enable = 1;
AudioUnitSetProperty(_toneUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
kInputBus,
&enable,
sizeof(enable));
mAudioFormat.mSampleRate = kSmaple;//采樣率
mAudioFormat.mFormatID = kAudioFormatLinearPCM;//PCM采樣
mAudioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
mAudioFormat.mFramesPerPacket = 1;//每個數據包多少幀
mAudioFormat.mChannelsPerFrame = 1;//1單聲道,2立體聲
mAudioFormat.mBitsPerChannel = 16;//語音每采樣點占用位數
mAudioFormat.mBytesPerFrame = mAudioFormat.mBitsPerChannel*mAudioFormat.mChannelsPerFrame/8;//每幀的bytes數
mAudioFormat.mBytesPerPacket = mAudioFormat.mBytesPerFrame*mAudioFormat.mFramesPerPacket;//每個數據包的bytes總數,每幀的bytes數*每個數據包的幀數
mAudioFormat.mReserved = 0;
CheckError(AudioUnitSetProperty(_toneUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output, kInputBus,
&mAudioFormat, sizeof(mAudioFormat)),
"couldn't set the remote I/O unit's input client format");
CheckError(AudioUnitSetProperty(_toneUnit,
kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Output,
kInputBus,
&_inputProc, sizeof(_inputProc)),
"couldnt set remote i/o render callback for input");
CheckError(AudioUnitInitialize(_toneUnit),
"couldn't initialize the remote I/O unit");
具體參數說明請參照iOS音頻編程之變聲處理
采集音頻數據的輸入回調
static OSStatus inputRenderTone(
void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList *ioData)
{
VoiceConvertHandle *THIS=(__bridge VoiceConvertHandle*)inRefCon;
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mData = NULL;
bufferList.mBuffers[0].mDataByteSize = 0;
OSStatus status = AudioUnitRender(THIS->_toneUnit,
ioActionFlags,
inTimeStamp,
kInputBus,
inNumberFrames,
&bufferList);
NSInteger lastTimeRear = recordStruct.rear;
for (int i = 0; i < inNumberFrames; i++) {
SInt16 data = ((SInt16 *)bufferList.mBuffers[0].mData)[i];
recordStruct.recordArr[recordStruct.rear] = data;
recordStruct.rear = (recordStruct.rear+1)%kRecordDataLen;
}
if ((lastTimeRear/1024 + 1) == (recordStruct.rear/1024)) {
pthread_cond_signal(&recordCond);
}
return status;
}
采用循環隊列存儲原始的音頻數據,每1024點的PCM數據,讓Converter轉換為AAC編碼,所以當收集了1024點PCM后,喚醒Converter線程。
3)音頻轉碼
初始化
AudioStreamBasicDescription sourceDes = mAudioFormat;
AudioStreamBasicDescription targetDes;
memset(&targetDes, 0, sizeof(targetDes));
targetDes.mFormatID = kAudioFormatMPEG4AAC;
targetDes.mSampleRate = kSmaple;
targetDes.mChannelsPerFrame = sourceDes.mChannelsPerFrame;
UInt32 size = sizeof(targetDes);
CheckError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
0, NULL, &size, &targetDes),
"couldnt create target data format");
//選擇軟件編碼
AudioClassDescription audioClassDes;
CheckError(AudioFormatGetPropertyInfo(kAudioFormatProperty_Encoders,
sizeof(targetDes.mFormatID),
&targetDes.mFormatID,
&size), "cant get kAudioFormatProperty_Encoders");
UInt32 numEncoders = size/sizeof(AudioClassDescription);
AudioClassDescription audioClassArr[numEncoders];
CheckError(AudioFormatGetProperty(kAudioFormatProperty_Encoders,
sizeof(targetDes.mFormatID),
&targetDes.mFormatID,
&size,
audioClassArr),
"wrirte audioClassArr fail");
for (int i = 0; i < numEncoders; i++) {
if (audioClassArr[i].mSubType == kAudioFormatMPEG4AAC
&& audioClassArr[i].mManufacturer == kAppleSoftwareAudioCodecManufacturer) {
memcpy(&audioClassDes, &audioClassArr[i], sizeof(AudioClassDescription));
break;
}
}
CheckError(AudioConverterNewSpecific(&sourceDes, &targetDes, 1,
&audioClassDes, &_encodeConvertRef),
"cant new convertRef");
size = sizeof(sourceDes);
CheckError(AudioConverterGetProperty(_encodeConvertRef, kAudioConverterCurrentInputStreamDescription, &size, &sourceDes), "cant get kAudioConverterCurrentInputStreamDescription");
size = sizeof(targetDes);
CheckError(AudioConverterGetProperty(_encodeConvertRef, kAudioConverterCurrentOutputStreamDescription, &size, &targetDes), "cant get kAudioConverterCurrentOutputStreamDescription");
UInt32 bitRate = 64000;
size = sizeof(bitRate);
CheckError(AudioConverterSetProperty(_encodeConvertRef,
kAudioConverterEncodeBitRate,
size, &bitRate),
"cant set covert property bit rate");
[self performSelectorInBackground:@selector(convertPCMToAAC) withObject:nil];
主要是設置編碼器的輸入音頻格式(PCM),輸出音頻格式(AAC),選擇軟件編碼器(默認使用硬件編碼器),設置編碼器的比特率
AAC編碼
-(void)convertPCMToAAC{
UInt32 maxPacketSize = 0;
UInt32 size = sizeof(maxPacketSize);
CheckError(AudioConverterGetProperty(_encodeConvertRef,
kAudioConverterPropertyMaximumOutputPacketSize,
&size,
&maxPacketSize),
"cant get max size of packet");
AudioBufferList *bufferList = malloc(sizeof(AudioBufferList));
bufferList->mNumberBuffers = 1;
bufferList->mBuffers[0].mNumberChannels = 1;
bufferList->mBuffers[0].mData = malloc(maxPacketSize);
bufferList->mBuffers[0].mDataByteSize = maxPacketSize;
for (; ; ) {
@autoreleasepool {
pthread_mutex_lock(&recordLock);
while (ABS(recordStruct.rear - recordStruct.front) < 1024 ) {
pthread_cond_wait(&recordCond, &recordLock);
}
pthread_mutex_unlock(&recordLock);
SInt16 *readyData = (SInt16 *)calloc(1024, sizeof(SInt16));
memcpy(readyData, &recordStruct.recordArr[recordStruct.front], 1024*sizeof(SInt16));
recordStruct.front = (recordStruct.front+1024)%kRecordDataLen;
UInt32 packetSize = 1;
AudioStreamPacketDescription *outputPacketDescriptions = malloc(sizeof(AudioStreamPacketDescription)*packetSize);
bufferList->mBuffers[0].mDataByteSize = maxPacketSize;
CheckError(AudioConverterFillComplexBuffer(_encodeConvertRef,
encodeConverterComplexInputDataProc,
readyData,
&packetSize,
bufferList,
outputPacketDescriptions),
"cant set AudioConverterFillComplexBuffer");
free(outputPacketDescriptions);
free(readyData);
NSMutableData *fullData = [NSMutableData dataWithBytes:bufferList->mBuffers[0].mData length:bufferList->mBuffers[0].mDataByteSize];
if ([self.delegate respondsToSelector:@selector(covertedData:)]) {
[self.delegate covertedData:[fullData copy]];
}
}
}
新建的bufferList
是用來存放每次轉碼后的AAC音頻數據.for循環中等待音頻輸入回調存滿1024個PCM數組并喚醒它。outputPacketDescriptions
數組是每次轉換的AAC編碼后各個包的描述,但這里每次只轉換一包數據(由傳入的packetSize決定)。調用AudioConverterFillComplexBuffer
觸發轉碼,他的第二個參數是填充原始音頻數據的回調。轉碼完成后,會將轉碼的數據存放在它的第五個參數中(bufferList
).轉換完成的AAC就可以發送給另外一臺手機了。
填充原始數據回調
OSStatus encodeConverterComplexInputDataProc(AudioConverterRef inAudioConverter,
UInt32 *ioNumberDataPackets,
AudioBufferList *ioData,
AudioStreamPacketDescription **outDataPacketDescription,
void *inUserData)
{
ioData->mBuffers[0].mData = inUserData;
ioData->mBuffers[0].mNumberChannels = 1;
ioData->mBuffers[0].mDataByteSize = 1024*2;
*ioNumberDataPackets = 1024;
return 0;
}
4)Audio Queue播放AAC音頻數據
Audio Queue基礎知識
音頻數據以一個個AudioQueueBuffer
的形式存在與音頻隊列中,Audio Queue
使用它提供的音頻數據來播放,某一個AudioQueueBuffer
使用完畢后,會調用Audio Queue
的回調,要求用戶再在這個AudioQueueBuffer
填入數據,并使它加入Audio Queue
中,如此循環,達到不間斷播放音頻數據的效果。
Audio Queue初始化
CheckError(AudioQueueNewOutput(&targetDes,
fillBufCallback,
(__bridge void *)self,
NULL,
NULL,
0,
&(_playQueue)),
"cant new audio queue");
CheckError( AudioQueueSetParameter(_playQueue,
kAudioQueueParam_Volume, 1.0),
"cant set audio queue gain");
for (int i = 0; i < 3; i++) {
AudioQueueBufferRef buffer;
CheckError(AudioQueueAllocateBuffer(_playQueue, 1024, &buffer), "cant alloc buff");
BNRAudioQueueBuffer *buffObj = [[BNRAudioQueueBuffer alloc] init];
buffObj.buffer = buffer;
[_buffers addObject:buffObj];
[_reusableBuffers addObject:buffObj];
}
[self performSelectorInBackground:@selector(playData) withObject:nil];
Audio Queue播放音頻數據
-(void)playData{
for (; ; ) {
@autoreleasepool {
NSMutableData *data = [[NSMutableData alloc] init];
pthread_mutex_lock(&playLock);
if (self.aacArry.count%8 != 0 || self.aacArry.count == 0) {
pthread_cond_wait(&playCond, &playLock);
}
AudioStreamPacketDescription *paks = calloc(sizeof(AudioStreamPacketDescription), 8);
for (int i = 0; i < 8 ; i++) {//8包AAC數據組成放入一個AudioQueueBuffer的數據包
BNRAudioData *audio = [self.aacArry firstObject];
[data appendData:audio.data];
paks[i].mStartOffset = audio.packetDescription.mStartOffset;
paks[i].mDataByteSize = audio.packetDescription.mDataByteSize;
[self.aacArry removeObjectAtIndex:0];
}
pthread_mutex_unlock(&playLock);
pthread_mutex_lock(&buffLock);
if (_reusableBuffers.count == 0) {
static dispatch_once_t onceToken;
dispatch_once(&onceToken, ^{
AudioQueueStart(_playQueue, nil);
});
pthread_cond_wait(&buffcond, &buffLock);
}
BNRAudioQueueBuffer *bufferObj = [_reusableBuffers firstObject];
[_reusableBuffers removeObject:bufferObj];
pthread_mutex_unlock(&buffLock);
memcpy(bufferObj.buffer->mAudioData,[data bytes] , [data length]);
bufferObj.buffer->mAudioDataByteSize = (UInt32)[data length];
CheckError(AudioQueueEnqueueBuffer(_playQueue, bufferObj.buffer, 8, paks), "cant enqueue");
free(paks);
}
}
}
static void fillBufCallback(void *inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef buffer){
VoiceConvertHandle *THIS=(__bridge VoiceConvertHandle*)inUserData;
for (int i = 0; i < THIS->_buffers.count; ++i) {
if (buffer == [THIS->_buffers[i] buffer]) {
pthread_mutex_lock(&buffLock);
[THIS->_reusableBuffers addObject:THIS->_buffers[i]];
pthread_mutex_unlock(&buffLock);
pthread_cond_signal(&buffcond);
break;
}
}
}
在playData
中等待收到的aacArry
數據,這里要注意:每1024點PCM轉換成的一包AAC數據加入到AudioQueueBuffer
中,不足以使Audio Queue播放音頻,所以這里使用8包AAC數據放到一個AudioQueueBuffer
中。fillBufCallback
是Audio Queue播放完一個AudioQueueBuffer
調用的回調函數,在這里面通知playData
可以往使用完的AudioQueueBufferRef
填數據了,填完后,用AudioQueueEnqueueBuffer
將它加入Audio Queue
中,這個三個AudioQueueBufferRef
不斷重用。
實時語音通信處理
原來是想用藍牙來傳送數據的,但是自己寫的藍牙傳送數據機制的速度跟不上轉換的AAC數據。使用
MultipeerConnectivity
框架既可使用藍牙也可以使用WIFI來通信,底層自動選擇。當把兩個手機的WIFI都關掉時,他們使用藍牙來傳送數據,在剛剛建立通話時,能聽到傳送的語音,之后就聽不到了,使用wifi傳輸數據時不會出現這種情況。
- MultipeerConnectivity基礎知識
MCNearbyServiceAdvertiser
發送廣播,并接收MCNearbyServiceBrowser
端的邀請,MCSession
發送接收數據、管理連接狀態。建立連接和通信的流程是,MCNearbyServiceAdvertiser
廣播服務,MCNearbyServiceBrowser
搜到這個服務后,要求把這個服務所對用的MCPeerID
加入到它自己(MCNearbyServiceBrowser
端)的MCSession
中,MCNearbyServiceAdvertiser
收到這個邀請,并同意,同時也將MCNearbyServiceBrowser
端對應的MCPeerID
加入到了它自己(MCNearbyServiceAdvertiser
)的MCSession
中.
之后雙方可以使用各自的MCSession
發送接收數據。
2)各端發送本身轉碼的AAC數據,并接收對方發送的AAC數據提供給Auduio queue
播放