音频解码与视频解码的流程大致相同,唯一的区别只有处理帧数据的时候,视频是像素转换并显示,而音频则是重采样并播放。
所以基于这一点,在以后做架构的时候,可以将音频、视频这两部分,相同的逻辑放在共同的父类当中,自身子类则各自处理视频显示和声音播放等逻辑。
下面直接看实现代码:
//1. 生成 resample 上下文,设置输入和输出的通道数、采样率以及采样格式,初始化上下文
m_SwrContext = swr_alloc();
av_opt_set_int(m_SwrContext, "in_channel_layout", codeCtx->channel_layout, 0);
av_opt_set_int(m_SwrContext, "out_channel_layout", AUDIO_DST_CHANNEL_LAYOUT, 0);
av_opt_set_int(m_SwrContext, "in_sample_rate", codeCtx->sample_rate, 0);
av_opt_set_int(m_SwrContext, "out_sample_rate", AUDIO_DST_SAMPLE_RATE, 0);
av_opt_set_sample_fmt(m_SwrContext, "in_sample_fmt", codeCtx->sample_fmt, 0);
av_opt_set_sample_fmt(m_SwrContext, "out_sample_fmt", DST_SAMPLT_FORMAT, 0);
swr_init(m_SwrContext);
//2. 申请输出 Buffer
m_nbSamples = (int)av_rescale_rnd(NB_SAMPLES, AUDIO_DST_SAMPLE_RATE, codeCtx->sample_rate, AV_ROUND_UP);
m_BufferSize = av_samples_get_buffer_size(NULL, AUDIO_DST_CHANNEL_COUNTS,m_nbSamples, DST_SAMPLT_FORMAT, 1);
m_AudioOutBuffer = (uint8_t *) malloc(m_BufferSize);
//3. 重采样,frame 为解码帧
int result = swr_convert(m_SwrContext, &m_AudioOutBuffer, m_BufferSize / 2, (const uint8_t **) frame->data, frame->nb_samples);
if (result > 0 ) {
//play
}
//4. 释放资源
if(m_AudioOutBuffer) {
free(m_AudioOutBuffer);
m_AudioOutBuffer = nullptr;
}
if(m_SwrContext) {
swr_free(&m_SwrContext);
m_SwrContext = nullptr;
}
然后,就是将重采样后的数据,交给OpenSLES去处理。
OpenSL ES 全称为: Open Sound Library for Embedded Systems,是一个针对嵌入式系统的开放硬件音频加速库,支持音频的采集和播放,它提供了一套高性能、低延迟的音频功能实现方法,并且实现了软硬件音频性能的跨平台部署,大大降低了上层处理音频应用的开发难度。
Object 和 Interface 是OpenSL ES 中的两大基本概念,可以类比为 Java 中的对象和接口。在 OpenSL ES 中, 每个 Object 可以存在一系列的 Interface ,并且为每个对象都提供了一系列的基本操作,如 Realize,GetState,Destroy 等。
重要的一点,只有通过 GetInterface 方法拿到 Object 的 Interface ,才能使用 Object 提供的功能。
这里的例子是播放一个手机里的视频文件,所以只介绍OpenSLES Audio Player 播放音频的过程。
//OpenSLES 渲染器初始化
void OpenSLRender::Init() {
LOGCATE("OpenSLRender::Init");
int result = -1;
do {
//创建并初始化引擎对象
result = CreateEngine();
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::Init CreateEngine fail. result=%d", result);
break;
}
//创建并初始化混音器
result = CreateOutputMixer();
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::Init CreateOutputMixer fail. result=%d", result);
break;
}
//创建并初始化播放器
result = CreateAudioPlayer();
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::Init CreateAudioPlayer fail. result=%d", result);
break;
}
//设置播放状态
(*m_AudioPlayerPlay)->SetPlayState(m_AudioPlayerPlay, SL_PLAYSTATE_PLAYING);
//激活回调接口
AudioPlayerCallback(m_BufferQueue, this);
} while (false);
if(result != SL_RESULT_SUCCESS) {
LOGCATE("OpenSLRender::Init fail. result=%d", result);
UnInit();
}
}
int OpenSLRender::CreateEngine() {
SLresult result = SL_RESULT_SUCCESS;
do {
result = slCreateEngine(&m_EngineObj, 0, nullptr, 0, nullptr, nullptr);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateEngine slCreateEngine fail. result=%d", result);
break;
}
result = (*m_EngineObj)->Realize(m_EngineObj, SL_BOOLEAN_FALSE);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateEngine Realize fail. result=%d", result);
break;
}
result = (*m_EngineObj)->GetInterface(m_EngineObj, SL_IID_ENGINE, &m_EngineEngine);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateEngine GetInterface fail. result=%d", result);
break;
}
} while (false);
return result;
}
int OpenSLRender::CreateOutputMixer() {
SLresult result = SL_RESULT_SUCCESS;
do {
const SLInterfaceID mids[1] = {SL_IID_ENVIRONMENTALREVERB};
const SLboolean mreq[1] = {SL_BOOLEAN_FALSE};
result = (*m_EngineEngine)->CreateOutputMix(m_EngineEngine, &m_OutputMixObj, 1, mids, mreq);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateOutputMixer CreateOutputMix fail. result=%d", result);
break;
}
result = (*m_OutputMixObj)->Realize(m_OutputMixObj, SL_BOOLEAN_FALSE);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateOutputMixer CreateOutputMix fail. result=%d", result);
break;
}
} while (false);
return result;
}
int OpenSLRender::CreateAudioPlayer() {
SLDataLocator_AndroidSimpleBufferQueue android_queue = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
SLDataFormat_PCM pcm = {
SL_DATAFORMAT_PCM,//format type
(SLuint32)2,//channel count
SL_SAMPLINGRATE_44_1,//44100hz
SL_PCMSAMPLEFORMAT_FIXED_16,// bits per sample
SL_PCMSAMPLEFORMAT_FIXED_16,// container size
SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT,// channel mask
SL_BYTEORDER_LITTLEENDIAN // endianness
};
SLDataSource slDataSource = {&android_queue, &pcm};
SLDataLocator_OutputMix outputMix = {SL_DATALOCATOR_OUTPUTMIX, m_OutputMixObj};
SLDataSink slDataSink = {&outputMix, nullptr};
const SLInterfaceID ids[3] = {SL_IID_BUFFERQUEUE, SL_IID_EFFECTSEND, SL_IID_VOLUME};
const SLboolean req[3] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE};
SLresult result;
do {
result = (*m_EngineEngine)->CreateAudioPlayer(m_EngineEngine, &m_AudioPlayerObj, &slDataSource, &slDataSink, 3, ids, req);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateAudioPlayer CreateAudioPlayer fail. result=%d", result);
break;
}
result = (*m_AudioPlayerObj)->Realize(m_AudioPlayerObj, SL_BOOLEAN_FALSE);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateAudioPlayer Realize fail. result=%d", result);
break;
}
result = (*m_AudioPlayerObj)->GetInterface(m_AudioPlayerObj, SL_IID_PLAY, &m_AudioPlayerPlay);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateAudioPlayer GetInterface fail. result=%d", result);
break;
}
result = (*m_AudioPlayerObj)->GetInterface(m_AudioPlayerObj, SL_IID_BUFFERQUEUE, &m_BufferQueue);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateAudioPlayer GetInterface fail. result=%d", result);
break;
}
result = (*m_BufferQueue)->RegisterCallback(m_BufferQueue, AudioPlayerCallback, this);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateAudioPlayer RegisterCallback fail. result=%d", result);
break;
}
result = (*m_AudioPlayerObj)->GetInterface(m_AudioPlayerObj, SL_IID_VOLUME, &m_AudioPlayerVolume);
if(result != SL_RESULT_SUCCESS)
{
LOGCATE("OpenSLRender::CreateAudioPlayer GetInterface fail. result=%d", result);
break;
}
} while (false);
return result;
}
//播放器的 callback
void OpenSLRender::AudioPlayerCallback(SLAndroidSimpleBufferQueueItf bufferQueue, void *context) {
OpenSLRender *openSlRender = static_cast<OpenSLRender *>(context);
openSlRender->HandleAudioFrameQueue();
}
void OpenSLRender::HandleAudioFrameQueue() {
LOGCATE("OpenSLRender::HandleAudioFrameQueue QueueSize=%d", m_AudioFrameQueue.size());
if (m_AudioPlayerPlay == nullptr) return;
//播放存放在音频帧队列中的数据
AudioFrame *audioFrame = m_AudioFrameQueue.front();
if (nullptr != audioFrame && m_AudioPlayerPlay) {
SLresult result = (*m_BufferQueue)->Enqueue(m_BufferQueue, audioFrame->data, (SLuint32) audioFrame->dataSize);
if (result == SL_RESULT_SUCCESS) {
m_AudioFrameQueue.pop();
delete audioFrame;
}
}
}
音频播放的大致流程就是这样,其实还有音频录入的功能的,这个以后再介绍。音频的解码,大部分都和视频解码的流程一致,只要你熟悉OpenGLES的几个API和流程,基本都能播放成功。