项目地址
https://github.com/979451341/FFmpegOpenslESandroid
此次说的是FFmpeg解码mp3,数据给OpenSL ES播放,而且可以暂停。
1.建立引擎git
slCreateEngine(&engineObject,0,NULL,0,NULL,NULL);//建立引擎 (*engineObject)->Realize(engineObject,SL_BOOLEAN_FALSE);//实现engineObject接口对象 (*engineObject)->GetInterface(engineObject,SL_IID_ENGINE,&engineEngine);//经过引擎调用接口初始化SLEngineItf
2.建立混音器github
(*engineEngine)->CreateOutputMix(engineEngine,&outputMixObject,0,0,0);//用引擎对象建立混音器接口对象 (*outputMixObject)->Realize(outputMixObject,SL_BOOLEAN_FALSE);//实现混音器接口对象 SLresult sLresult = (*outputMixObject)->GetInterface(outputMixObject,SL_IID_ENVIRONMENTALREVERB,&outputMixEnvironmentalReverb);//利用混音器实例对象接口初始化具体的混音器对象 //设置 if (SL_RESULT_SUCCESS == sLresult) { (*outputMixEnvironmentalReverb)-> SetEnvironmentalReverbProperties(outputMixEnvironmentalReverb, &settings); }
3.FFmpeg解码mp3准备工做缓存
av_register_all(); char *input = "/storage/emulated/0/pauseRecordDemo/video/a.mp3"; pFormatCtx = avformat_alloc_context(); LOGE("Lujng %s",input); LOGE("xxx %p",pFormatCtx); int error; char buf[] = ""; //打开视频地址并获取里面的内容(解封装) if (error = avformat_open_input(&pFormatCtx, input, NULL, NULL) < 0) { av_strerror(error, buf, 1024); // LOGE("%s" ,inputPath) LOGE("Couldn't open file %s: %d(%s)", input, error, buf); // LOGE("%d",error) LOGE("打开视频失败") } //3.获取视频信息 if(avformat_find_stream_info(pFormatCtx,NULL) < 0){ LOGE("%s","获取视频信息失败"); return -1; } int i=0; for (int i = 0; i < pFormatCtx->nb_streams; ++i) { if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) { LOGE(" 找到音频id %d", pFormatCtx->streams[i]->codec->codec_type); audio_stream_idx=i; break; } }
// mp3的×××ide
// 获取音频编×××
pCodecCtx=pFormatCtx->streams[audio_stream_idx]->codec;
LOGE("获取视频编码器上下文 %p ",pCodecCtx);函数
pCodex = avcodec_find_decoder(pCodecCtx->codec_id); LOGE("获取视频编码 %p",pCodex); if (avcodec_open2(pCodecCtx, pCodex, NULL)<0) { } packet = (AVPacket *)av_malloc(sizeof(AVPacket));
// av_init_packet(packet);
// 音频数据ui
frame = av_frame_alloc();
// mp3 里面所包含的编码格式 转换成 pcm SwcContext
swrContext = swr_alloc();编码
int length=0; int got_frame;
// 441002
out_buffer = (uint8_t ) av_malloc(44100 * 2);
uint64_t out_ch_layout=AV_CH_LAYOUT_STEREO;
// 输出采样位数 16位
enum AVSampleFormat out_formart=AV_SAMPLE_FMT_S16;
//输出的采样率必须与输入相同
int out_sample_rate = pCodecCtx->sample_rate;code
swr_alloc_set_opts(swrContext, out_ch_layout, out_formart, out_sample_rate, pCodecCtx->channel_layout, pCodecCtx->sample_fmt, pCodecCtx->sample_rate, 0, NULL); swr_init(swrContext);
// 获取通道数 2
out_channer_nb = av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO);
rate = pCodecCtx->sample_rate;
channel = pCodecCtx->channels;orm
4.缓存队列设置
int rate; int channels; createFFmpeg(&rate,&channels); LOGE("RATE %d",rate); LOGE("channels %d",channels); /* * typedef struct SLDataLocator_AndroidBufferQueue_ { SLuint32 locatorType;//缓冲区队列类型 SLuint32 numBuffers;//buffer位数
} */
SLDataLocator_AndroidBufferQueue android_queue = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE,2}; /** typedef struct SLDataFormat_PCM_ { SLuint32 formatType; pcm SLuint32 numChannels; 通道数 SLuint32 samplesPerSec; 采样率 SLuint32 bitsPerSample; 采样位数 SLuint32 containerSize; 包含位数 SLuint32 channelMask; 立体声 SLuint32 endianness; end标志位 } SLDataFormat_PCM; */ SLDataFormat_PCM pcm = {SL_DATAFORMAT_PCM,channels,rate*1000 ,SL_PCMSAMPLEFORMAT_FIXED_16 ,SL_PCMSAMPLEFORMAT_FIXED_16 ,SL_SPEAKER_FRONT_LEFT|SL_SPEAKER_FRONT_RIGHT,SL_BYTEORDER_LITTLEENDIAN}; /* * typedef struct SLDataSource_ { void *pLocator;//缓冲区队列 void *pFormat;//数据样式,配置信息 } SLDataSource; * */ SLDataSource dataSource = {&android_queue,&pcm}; SLDataLocator_OutputMix slDataLocator_outputMix={SL_DATALOCATOR_OUTPUTMIX,outputMixObject}; SLDataSink slDataSink = {&slDataLocator_outputMix,NULL}; const SLInterfaceID ids[3]={SL_IID_BUFFERQUEUE,SL_IID_EFFECTSEND,SL_IID_VOLUME}; const SLboolean req[3]={SL_BOOLEAN_FALSE,SL_BOOLEAN_FALSE,SL_BOOLEAN_FALSE}; /* * SLresult (*CreateAudioPlayer) ( SLEngineItf self, SLObjectItf * pPlayer, SLDataSource *pAudioSrc,//数据设置 SLDataSink *pAudioSnk,//关联混音器 SLuint32 numInterfaces, const SLInterfaceID * pInterfaceIds, const SLboolean * pInterfaceRequired ); * */ LOGE("执行到此处") (*engineEngine)->CreateAudioPlayer(engineEngine,&audioplayer,&dataSource,&slDataSink,3,ids,req); (*audioplayer)->Realize(audioplayer,SL_BOOLEAN_FALSE); LOGE("执行到此处2") (*audioplayer)->GetInterface(audioplayer,SL_IID_PLAY,&slPlayItf);//初始化播放器 //注册缓冲区,经过缓冲区里面 的数据进行播放 (*audioplayer)->GetInterface(audioplayer,SL_IID_BUFFERQUEUE,&slBufferQueueItf); //设置回调接口 (*slBufferQueueItf)->RegisterCallback(slBufferQueueItf,getQueueCallBack,NULL);
最后还要给这个缓存回调函数赋予参数,这个回调函数主要负责提供FFmpeg解码出的数据
//开始播放 getQueueCallBack(slBufferQueueItf,NULL);
咱们再来看看这个函数说的啥,靠Enqueue函数把数据放入队列里,这个数据则是从getPcm函数获得的
void getQueueCallBack(SLAndroidSimpleBufferQueueItf slBufferQueueItf, void context){
buffersize=0;
getPcm(&buffer,&buffersize);
if(buffer!=NULL&&buffersize!=0){
//将获得的数据加入到队列中
(slBufferQueueItf)->Enqueue(slBufferQueueItf,buffer,buffersize);
}
}
这个FFmpeg解码mp3获得Pcm数据,这个主要是每解码出一个packet数据,就跳出循环,将数据给上层函数压入队列,当队列的数据读取完了,又会调用getQueueCallBack函数再来获取FFmpeg解码出的pcm数据
int getPcm(void *pcm,size_t pcm_size){
int frameCount=0;
int got_frame;
while (av_read_frame(pFormatCtx, packet) >= 0) {
if (packet->stream_index == audio_stream_idx) {
// 解码 mp3 编码格式frame----pcm frame
avcodec_decode_audio4(pCodecCtx, frame, &got_frame, packet);
if (got_frame) {
LOGE("解码");
/**
5.播放音乐
(*slPlayItf)->SetPlayState(slPlayItf,SL_PLAYSTATE_PLAYING);
6.暂停音乐
(*slPlayItf)->SetPlayState(slPlayItf, SL_PLAYSTATE_PAUSED);
7.释放资源
首先释放关于OpenSL ES的实体
if(audioplayer!=NULL){ (*audioplayer)->Destroy(audioplayer); audioplayer=NULL; slBufferQueueItf=NULL; slPlayItf=NULL; } if(outputMixObject!=NULL){ (*outputMixObject)->Destroy(outputMixObject); outputMixObject=NULL; outputMixEnvironmentalReverb=NULL; } if(engineObject!=NULL){ (*engineObject)->Destroy(engineObject); engineObject=NULL; engineEngine=NULL; }
而后释放FFmpeg占用的资源
av_free_packet(packet); av_free(out_buffer); av_frame_free(&frame); swr_free(&swrContext); avcodec_close(pCodecCtx); avformat_close_input(&pFormatCtx);