最近有需求从蓝牙接收音频数据进行播放,以前没作过,就各类百度啊,谷歌,看官方文档,而后顺带说一下,这里是用的是Audio Queue Services,只能用于PCM数据,其余压缩的音频文件要配合AudioFileStream或者AudioFile解析后播放。git
在个人这篇文章中有一些音频的介绍(主要是使用Speex这个库),适合萌新观看。github
注意点:segmentfault
上述的解决办法是往播放队列中插入空数据(感受效果很差),或者是先暂停后,等数据来了再播放。缓存
具体能够看 码农人生这个博客,讲的很是详细。
#import <Foundation/Foundation.h> #import <AudioToolbox/AudioToolbox.h> @interface AudioQueuePlay : NSObject // 播放并顺带附上数据 - (void)playWithData: (NSData *)data; // reset - (void)resetPlay; @end
#import "AudioQueuePlay.h" #define MIN_SIZE_PER_FRAME 2000 #define QUEUE_BUFFER_SIZE 3 //队列缓冲个数 @interface AudioQueuePlay() { AudioQueueRef audioQueue; //音频播放队列 AudioStreamBasicDescription _audioDescription; AudioQueueBufferRef audioQueueBuffers[QUEUE_BUFFER_SIZE]; //音频缓存 BOOL audioQueueBufferUsed[QUEUE_BUFFER_SIZE]; //判断音频缓存是否在使用 NSLock *sysnLock; NSMutableData *tempData; OSStatus osState; } @end @implementation AudioQueuePlay - (instancetype)init { self = [super init]; if (self) { sysnLock = [[NSLock alloc]init]; // 播放PCM使用 if (_audioDescription.mSampleRate <= 0) { //设置音频参数 _audioDescription.mSampleRate = 8000.0;//采样率 _audioDescription.mFormatID = kAudioFormatLinearPCM; // 下面这个是保存音频数据的方式的说明,如能够根据大端字节序或小端字节序,浮点数或整数以及不一样体位去保存数据 _audioDescription.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked; //1单声道 2双声道 _audioDescription.mChannelsPerFrame = 1; //每个packet一侦数据,每一个数据包下的桢数,即每一个数据包里面有多少桢 _audioDescription.mFramesPerPacket = 1; //每一个采样点16bit量化 语音每采样点占用位数 _audioDescription.mBitsPerChannel = 16; _audioDescription.mBytesPerFrame = (_audioDescription.mBitsPerChannel / 8) * _audioDescription.mChannelsPerFrame; //每一个数据包的bytes总数,每桢的bytes数*每一个数据包的桢数 _audioDescription.mBytesPerPacket = _audioDescription.mBytesPerFrame * _audioDescription.mFramesPerPacket; } // 使用player的内部线程播放 新建输出 AudioQueueNewOutput(&_audioDescription, AudioPlayerAQInputCallback, (__bridge void * _Nullable)(self), nil, 0, 0, &audioQueue); // 设置音量 AudioQueueSetParameter(audioQueue, kAudioQueueParam_Volume, 1.0); // 初始化须要的缓冲区 for (int i = 0; i < QUEUE_BUFFER_SIZE; i++) { audioQueueBufferUsed[i] = false; osState = AudioQueueAllocateBuffer(audioQueue, MIN_SIZE_PER_FRAME, &audioQueueBuffers[i]); printf("第 %d 个AudioQueueAllocateBuffer 初始化结果 %d (0表示成功)", i + 1, osState); } osState = AudioQueueStart(audioQueue, NULL); if (osState != noErr) { printf("AudioQueueStart Error"); } } return self; } - (void)resetPlay { if (audioQueue != nil) { AudioQueueReset(audioQueue); } } // 播放相关 -(void)playWithData:(NSData *)data { [sysnLock lock]; tempData = [NSMutableData new]; [tempData appendData: data]; // 获得数据 NSUInteger len = tempData.length; Byte *bytes = (Byte*)malloc(len); [tempData getBytes:bytes length: len]; int i = 0; while (true) { if (!audioQueueBufferUsed[i]) { audioQueueBufferUsed[i] = true; break; }else { i++; if (i >= QUEUE_BUFFER_SIZE) { i = 0; } } } audioQueueBuffers[i] -> mAudioDataByteSize = (unsigned int)len; // 把bytes的头地址开始的len字节给mAudioData memcpy(audioQueueBuffers[i] -> mAudioData, bytes, len); // free(bytes); AudioQueueEnqueueBuffer(audioQueue, audioQueueBuffers[i], 0, NULL); printf("本次播放数据大小: %lu", len); [sysnLock unlock]; } // ************************** 回调 ********************************** // 回调回来把buffer状态设为未使用 static void AudioPlayerAQInputCallback(void* inUserData,AudioQueueRef audioQueueRef, AudioQueueBufferRef audioQueueBufferRef) { AudioQueuePlay* player = (__bridge AudioQueuePlay*)inUserData; [player resetBufferState:audioQueueRef and:audioQueueBufferRef]; } - (void)resetBufferState:(AudioQueueRef)audioQueueRef and:(AudioQueueBufferRef)audioQueueBufferRef { for (int i = 0; i < QUEUE_BUFFER_SIZE; i++) { // 将这个buffer设为未使用 if (audioQueueBufferRef == audioQueueBuffers[i]) { audioQueueBufferUsed[i] = false; } } } // ************************** 内存回收 ********************************** - (void)dealloc { if (audioQueue != nil) { AudioQueueStop(audioQueue,true); } audioQueue = nil; sysnLock = nil; } @end
import UIKit import AudioToolbox class PCMPlayerConstant: NSObject { // 缓冲个数 static let BUFF_NUM = 3 // 一次播放的大小 static let ONCE_PLAY_SIZE: UInt32 = 2000 } class PCMPlayer: NSObject { fileprivate var audioQueueRef: AudioQueueRef? fileprivate var audioQueueBuffer: [AudioQueueBufferRef?]! fileprivate var audioDescription: AudioStreamBasicDescription! fileprivate var audioQueueBufferUsed: [Bool]! fileprivate var syncLock: NSLock! fileprivate var playData: NSMutableData! fileprivate var oSStatus: OSStatus! override init() { super.init() self.playData = NSMutableData() self.syncLock = NSLock() oSStatus = OSStatus() audioQueueBufferUsed = [] self.audioQueueBuffer = [] audioDescription = AudioStreamBasicDescription() // 设置音频参数 audioDescription.mSampleRate = 8000.0 //采样率 audioDescription.mFormatID = kAudioFormatLinearPCM // 下面这个是保存音频数据的方式的说明,如能够根据大端字节序或小端字节序,浮点数或整数以及不一样体位去保存数据 audioDescription.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked //1单声道 2双声道 audioDescription.mChannelsPerFrame = 1 //每个packet一侦数据,每一个数据包下的桢数,即每一个数据包里面有多少桢 audioDescription.mFramesPerPacket = 1 //每一个采样点16bit量化 语音每采样点占用位数 audioDescription.mBitsPerChannel = 16 audioDescription.mBytesPerFrame = (audioDescription.mBitsPerChannel / 8) * audioDescription.mChannelsPerFrame //每一个数据包的bytes总数,每桢的bytes数*每一个数据包的桢数 audioDescription.mBytesPerPacket = audioDescription.mBytesPerFrame * audioDescription.mFramesPerPacket self.initPlay() } fileprivate func initPlay() -> Void { let selfPointer = unsafeBitCast(self, to: UnsafeMutableRawPointer.self) // 使用audioDescripton新建audioQueue oSStatus = AudioQueueNewOutput(&self.audioDescription!, MyAudioQueueOutputCallback, selfPointer, CFRunLoopGetCurrent(), nil, 0, &self.audioQueueRef) if oSStatus != noErr { print("AudioQueueNewOutput Error") return } // 设置音量 AudioQueueSetParameter(self.audioQueueRef!, kAudioQueueParam_Volume, 1.0) for index in 0..<PCMPlayerConstant.BUFF_NUM { var audioBuffer: AudioQueueBufferRef? = nil // oSStatus = AudioQueueAllocateBuffer(self.audioQueueRef!, PCMPlayerConstant.ONCE_PLAY_SIZE, &audioBuffer) if oSStatus != noErr { print("AudioQueueAllocateBuffer Error \\\\\\\\(index)") return }else{ self.audioQueueBuffer.append(audioBuffer) // 表示未使用 self.audioQueueBufferUsed.append(false) print("第 \\\\\\\\(index + 1) 个AudioQueueAllocateBuffer 初始化结果 \\\\\\\\(oSStatus) (0表示成功)") } } AudioQueueStart(self.audioQueueRef!, nil) } func playWithData(data: Data) -> Void { syncLock.lock() playData.append(data) // 数值大于980 再播放 这里能够按需求改 if playData.length > 980 { let playDataLength = playData.length var i = 0 // 循环找出可用buffer while true { if !self.audioQueueBufferUsed[i] { // 表示已使用 self.audioQueueBufferUsed[i] = true break }else { i += 1 // 当循环到头了就从新循环 if i >= PCMPlayerConstant.BUFF_NUM { i = 0 } } } let p = self.audioQueueBuffer[i] let selfPointer = unsafeBitCast(self, to: UnsafeMutableRawPointer.self) p?.pointee.mUserData = selfPointer p?.pointee.mAudioDataByteSize = UInt32(playDataLength) p?.pointee.mAudioData.advanced(by: 0).copyBytes(from: playData.bytes, count: playDataLength) // 丢入audioQueue中 AudioQueueEnqueueBuffer(self.audioQueueRef!, self.audioQueueBuffer[i]!, 0, nil) playData = NSMutableData() print("play length \\\\\\\\(playDataLength)") } syncLock.unlock() } } // 播放完的回调 func MyAudioQueueOutputCallback(clientData: UnsafeMutableRawPointer?, AQ: AudioQueueRef, buffer: AudioQueueBufferRef) { let my = Unmanaged<PCMPlayer>.fromOpaque(UnsafeRawPointer(clientData)!).takeUnretainedValue() // AudioQueueFreeBuffer(AQ, buffer) for index in 0..<PCMPlayerConstant.BUFF_NUM { if my.audioQueueBuffer[index] == buffer { // 把当前放完的buffer设为未使用 my.audioQueueBufferUsed[index] = false //print("|-> \\\\\\\\(index) buffer is \\\\\\\\(self.audioQueueBufferUsed[index]) <-|") } } }