接上文。。。
前面我们看到,音频流 Track 的 hook
将当前流的音频数据混音进输出数据中,hook
的实际函数由 TrackBase::getTrackHook()
根据是否需要重采样,输入输出的采样格式和通道数等参数决定,具体策略如下列代码所示:
AudioMixerBase::hook_t AudioMixerBase::TrackBase::getTrackHook(int trackType, uint32_t channelCount,
audio_format_t mixerInFormat, audio_format_t mixerOutFormat __unused)
{
if (!kUseNewMixer && channelCount == FCC_2 && mixerInFormat == AUDIO_FORMAT_PCM_16_BIT) {
switch (trackType) {
case TRACKTYPE_NOP:
return &TrackBase::track__nop;
case TRACKTYPE_RESAMPLE:
return &TrackBase::track__genericResample;
case TRACKTYPE_NORESAMPLEMONO:
return &TrackBase::track__16BitsMono;
case TRACKTYPE_NORESAMPLE:
return &TrackBase::track__16BitsStereo;
default:
LOG_ALWAYS_FATAL("bad trackType: %d", trackType);
break;
}
}
LOG_ALWAYS_FATAL_IF(channelCount > MAX_NUM_CHANNELS);
switch (trackType) {
case TRACKTYPE_NOP:
return &TrackBase::track__nop;
case TRACKTYPE_RESAMPLE:
switch (mixerInFormat) {
case AUDIO_FORMAT_PCM_FLOAT:
return (AudioMixerBase::hook_t) &TrackBase::track__Resample<
MIXTYPE_MULTI, float /*TO*/, float /*TI*/, TYPE_AUX>;
case AUDIO_FORMAT_PCM_16_BIT:
return (AudioMixerBase::hook_t) &TrackBase::track__Resample<
MIXTYPE_MULTI, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
default:
LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
break;
}
break;
case TRACKTYPE_RESAMPLESTEREO:
switch (mixerInFormat) {
case AUDIO_FORMAT_PCM_FLOAT:
return (AudioMixerBase::hook_t) &TrackBase::track__Resample<
MIXTYPE_MULTI_STEREOVOL, float /*TO*/, float /*TI*/,
TYPE_AUX>;
case AUDIO_FORMAT_PCM_16_BIT:
return (AudioMixerBase::hook_t) &TrackBase::track__Resample<
MIXTYPE_MULTI_STEREOVOL, int32_t /*TO*/, int16_t /*TI*/,
TYPE_AUX>;
default:
LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
break;
}
break;
// RESAMPLEMONO needs MIXTYPE_STEREOEXPAND since resampler will upmix mono
// track to stereo track
case TRACKTYPE_RESAMPLEMONO:
switch (mixerInFormat) {
case AUDIO_FORMAT_PCM_FLOAT:
return (AudioMixerBase::hook_t) &TrackBase::track__Resample<
MIXTYPE_STEREOEXPAND, float /*TO*/, float /*TI*/,
TYPE_AUX>;
case AUDIO_FORMAT_PCM_16_BIT:
return (AudioMixerBase::hook_t) &TrackBase::track__Resample<
MIXTYPE_STEREOEXPAND, int32_t /*TO*/, int16_t /*TI*/,
TYPE_AUX>;
default:
LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
break;
}
break;
case TRACKTYPE_NORESAMPLEMONO:
switch (mixerInFormat) {
case AUDIO_FORMAT_PCM_FLOAT:
return (AudioMixerBase::hook_t) &TrackBase::track__NoResample<
MIXTYPE_MONOEXPAND, float /*TO*/, float /*TI*/, TYPE_AUX>;
case AUDIO_FORMAT_PCM_16_BIT:
return (AudioMixerBase::hook_t) &TrackBase::track__NoResample<
MIXTYPE_MONOEXPAND, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
default:
LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
break;
}
break;
case TRACKTYPE_NORESAMPLE:
switch (mixerInFormat) {
case AUDIO_FORMAT_PCM_FLOAT:
return (AudioMixerBase::hook_t) &TrackBase::track__NoResample<
MIXTYPE_MULTI, float /*TO*/, float /*TI*/, TYPE_AUX>;
case AUDIO_FORMAT_PCM_16_BIT:
return (AudioMixerBase::hook_t) &TrackBase::track__NoResample<
MIXTYPE_MULTI, int32_t /*TO*/, int16_t /*TI*/, TYPE_AUX>;
default:
LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
break;
}
break;
case TRACKTYPE_NORESAMPLESTEREO:
switch (mixerInFormat) {
case AUDIO_FORMAT_PCM_FLOAT:
return (AudioMixerBase::hook_t) &TrackBase::track__NoResample<
MIXTYPE_MULTI_STEREOVOL, float /*TO*/, float /*TI*/,
TYPE_AUX>;
case AUDIO_FORMAT_PCM_16_BIT:
return (AudioMixerBase::hook_t) &TrackBase::track__NoResample<
MIXTYPE_MULTI_STEREOVOL, int32_t /*TO*/, int16_t /*TI*/,
TYPE_AUX>;
default:
LOG_ALWAYS_FATAL("bad mixerInFormat: %#x", mixerInFormat);
break;
}
break;
default:
LOG_ALWAYS_FATAL("bad trackType: %d", trackType);
break;
}
return NULL;
}
当混音器的音频源不需要混音时,音频源 Track 的 hook
为 TrackBase::track__nop()
,它什么都不做,该函数定义 (位于 frameworks/av/media/libaudioprocessing/AudioMixerBase.cpp
) 如下:
void AudioMixerBase::TrackBase::track__nop(int32_t* out __unused,
size_t outFrameCount __unused, int32_t* temp __unused, int32_t* aux __unused)
{
}
当混音器的音频源 Track 需要重采样之后再混音,且音频源 Track 的输出采样格式为 AUDIO_FORMAT_PCM_16_BIT
,混音器输出通道数为双通道立体声时,音频源 Track 的 hook
为 TrackBase::track__genericResample()
,该函数定义 (位于 frameworks/av/media/libaudioprocessing/AudioMixerBase.cpp
) 如下:
void AudioMixerBase::TrackBase::track__genericResample(
int32_t* out, size_t outFrameCount, int32_t* temp, int32_t* aux)
{
ALOGVV("track__genericResample\n");
mResampler->setSampleRate(sampleRate);
// ramp gain - resample to temp buffer and scale/mix in 2nd step
if (aux != NULL) {
// always resample with unity gain when sending to auxiliary buffer to be able
// to apply send level after resampling
mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
memset(temp, 0, outFrameCount * mMixerChannelCount * sizeof(int32_t));
mResampler->resample(temp, outFrameCount, bufferProvider);
if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
volumeRampStereo(out, outFrameCount, temp, aux);
} else {
volumeStereo(out, outFrameCount, temp, aux);
}
} else {
if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
memset(temp, 0, outFrameCount * MAX_NUM_CHANNELS * sizeof(int32_t));
mResampler->resample(temp, outFrameCount, bufferProvider);
volumeRampStereo(out, outFrameCount, temp, aux);
}
// constant gain
else {
mResampler->setVolume(mVolume[0], mVolume[1]);
mResampler->resample(out, outFrameCount, bufferProvider);
}
}
}
void AudioMixerBase::TrackBase::track__nop(int32_t* out __unused,
size_t outFrameCount __unused, int32_t* temp __unused, int32_t* aux __unused)
{
}
void AudioMixerBase::TrackBase::volumeRampStereo(
int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
{
int32_t vl = prevVolume[0];
int32_t vr = prevVolume[1];
const int32_t vlInc = volumeInc[0];
const int32_t vrInc = volumeInc[1];
//ALOGD("[0] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
// t, vlInc/65536.0f, vl/65536.0f, volume[0],
// (vl + vlInc*frameCount)/65536.0f, frameCount);
// ramp volume
if (CC_UNLIKELY(aux != NULL)) {
int32_t va = prevAuxLevel;
const int32_t vaInc = auxInc;
int32_t l;
int32_t r;
do {
l = (*temp++ >> 12);
r = (*temp++ >> 12);
*out++ += (vl >> 16) * l;
*out++ += (vr >> 16) * r;
*aux++ += (va >> 17) * (l + r);
vl += vlInc;
vr += vrInc;
va += vaInc;
} while (--frameCount);
prevAuxLevel = va;
} else {
do {
*out++ += (vl >> 16) * (*temp++ >> 12);
*out++ += (vr >> 16) * (*temp++ >> 12);
vl += vlInc;
vr += vrInc;
} while (--frameCount);
}
prevVolume[0] = vl;
prevVolume[1] = vr;
adjustVolumeRamp(aux != NULL);
}
void AudioMixerBase::TrackBase::volumeStereo(
int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
{
const int16_t vl = volume[0];
const int16_t vr = volume[1];
if (CC_UNLIKELY(aux != NULL)) {
const int16_t va = auxLevel;
do {
int16_t l = (int16_t)(*temp++ >> 12);
int16_t r = (int16_t)(*temp++ >> 12);
out[0] = mulAdd(l, vl, out[0]);
int16_t a = (int16_t)(((int32_t)l + r) >> 1);
out[1] = mulAdd(r, vr, out[1]);
out += 2;
aux[0] = mulAdd(a, va, aux[0]);
aux++;
} while (--frameCount);
} else {
do {
int16_t l = (int16_t)(*temp++ >> 12);
int16_t r = (int16_t)(*temp++ >> 12);
out[0] = mulAdd(l, vl, out[0]);
out[1] = mulAdd(r, vr, out[1]);
out += 2;
} while (--frameCount);
}
}
TrackBase::track__genericResample()
函数的基本处理流程大概是这样的:
- 对音频源 Track 的音频数据做重采样;
- 处理音量的同时做混音,如果有 Aux 数据,也同时处理,这里的处理音量有两个含义,即如果需要做音量斜坡的话则做音量斜坡,否则应用音量增益。
Android 重采样器支持重采样的同时应用音量增益,并把处理之后的音频数据混音进输出数据中。因此,如果不需要对音频源 Track 做音量斜坡且没有 Aux 数据,则可以直接由重采样器完成重采样、应用音量增益和混音。TrackBase::track__genericResample()
函数的实现看上去有些重复代码,如果以更加贴合其整体执行流程的写法来写,可以写为类似下面这样:
void AudioMixerBase::TrackBase::track__genericResample(
int32_t* out, size_t outFrameCount, int32_t* temp, int32_t* aux)
{
ALOGVV("track__genericResample\n");
mResampler->setSampleRate(sampleRate);
float left = mVolume[0];
float right = mVolume[1];
int32_t* out_inter = out;
if (aux != NULL || CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
left = UNITY_GAIN_FLOAT;
right = UNITY_GAIN_FLOAT;
memset(temp, 0, outFrameCount * mMixerChannelCount * sizeof(int32_t));
out_inter = temp;
}
mResampler->setVolume(left, right);
mResampler->resample(out_inter, outFrameCount, bufferProvider);
if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]) || (aux != NULL && auxInc)) {
volumeRampStereo(out, outFrameCount, out_inter, aux);
} else if (aux != NULL) {
volumeStereo(out, outFrameCount, out_inter, aux);
}
}
TrackBase::track__genericResample()
函数调用 volumeRampStereo()
和 volumeStereo()
来混音重采样之后的数据,前者在需要执行音量斜坡时调用,后者则在不需要时调用。AUX 数据由音频左右声道的数据生成。这两个函数同样可以以更简洁的方式来写,如下面这样:
void AudioMixerBase::TrackBase::volumeRampStereo(
int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
{
int32_t vl = prevVolume[0];
int32_t vr = prevVolume[1];
const int32_t vlInc = volumeInc[0];
const int32_t vrInc = volumeInc[1];
//ALOGD("[0] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
// t, vlInc/65536.0f, vl/65536.0f, volume[0],
// (vl + vlInc*frameCount)/65536.0f, frameCount);
// ramp volume
int32_t va = prevAuxLevel;
const int32_t vaInc = auxInc;
int32_t l;
int32_t r;
do {
l = (*temp++ >> 12);
r = (*temp++ >> 12);
*out++ += (vl >> 16) * l;
*out++ += (vr >> 16) * r;
vl += vlInc;
vr += vrInc;
if (CC_UNLIKELY(aux != NULL)) {
*aux++ += (va >> 17) * (l + r);
va += vaInc;
}
} while (--frameCount);
prevAuxLevel = va;
prevVolume[0] = vl;
prevVolume[1] = vr;
adjustVolumeRamp(aux != NULL);
}
void AudioMixerBase::TrackBase::volumeStereo(
int32_t* out, size_t frameCount, int32_t* temp, int32_t* aux)
{
const int16_t vl = volume[0];
const int16_t vr = volume[1];
const int16_t va = auxLevel;
do {
int16_t l = (int16_t)(*temp++ >> 12);
int16_t r = (int16_t)(*temp++ >> 12);
out[0] = mulAdd(l, vl, out[0]);
out[1] = mulAdd(r, vr, out[1]);
out += 2;
if (CC_UNLIKELY(aux != NULL)) {
int16_t a = (int16_t)(((int32_t)l + r) >> 1);
aux[0] = mulAdd(a, va, aux[0]);
aux++;
}
} while (--frameCount);
}
当混音器的音频源 Track 不需要重采样即可混音,且音频源 Track 的输出采样格式为 AUDIO_FORMAT_PCM_16_BIT
,音频源 Track 的输出通道数为单声道,混音器输出通道数为双通道立体声时,音频源 Track 的 hook
为 TrackBase::track__16BitsMono()
,该函数定义 (位于 frameworks/av/media/libaudioprocessing/AudioMixerBase.cpp
) 如下:
void AudioMixerBase::TrackBase::track__16BitsMono(
int32_t* out, size_t frameCount, int32_t* temp __unused, int32_t* aux)
{
ALOGVV("track__16BitsMono\n");
const int16_t *in = static_cast<int16_t const *>(mIn);
if (CC_UNLIKELY(aux != NULL)) {
// ramp gain
if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
int32_t vl = prevVolume[0];
int32_t vr = prevVolume[1];
int32_t va = prevAuxLevel;
const int32_t vlInc = volumeInc[0];
const int32_t vrInc = volumeInc[1];
const int32_t vaInc = auxInc;
// ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
// t, vlInc/65536.0f, vl/65536.0f, volume[0],
// (vl + vlInc*frameCount)/65536.0f, frameCount);
do {
int32_t l = *in++;
*out++ += (vl >> 16) * l;
*out++ += (vr >> 16) * l;
*aux++ += (va >> 16) * l;
vl += vlInc;
vr += vrInc;
va += vaInc;
} while (--frameCount);
prevVolume[0] = vl;
prevVolume[1] = vr;
prevAuxLevel = va;
adjustVolumeRamp(true);
}
// constant gain
else {
const int16_t vl = volume[0];
const int16_t vr = volume[1];
const int16_t va = (int16_t)auxLevel;
do {
int16_t l = *in++;
out[0] = mulAdd(l, vl, out[0]);
out[1] = mulAdd(l, vr, out[1]);
out += 2;
aux[0] = mulAdd(l, va, aux[0]);
aux++;
} while (--frameCount);
}
} else {
// ramp gain
if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
int32_t vl = prevVolume[0];
int32_t vr = prevVolume[1];
const int32_t vlInc = volumeInc[0];
const int32_t vrInc = volumeInc[1];
// ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
// t, vlInc/65536.0f, vl/65536.0f, volume[0],
// (vl + vlInc*frameCount)/65536.0f, frameCount);
do {
int32_t l = *in++;
*out++ += (vl >> 16) * l;
*out++ += (vr >> 16) * l;
vl += vlInc;
vr += vrInc;
} while (--frameCount);
prevVolume[0] = vl;
prevVolume[1] = vr;
adjustVolumeRamp(false);
}
// constant gain
else {
const int16_t vl = volume[0];
const int16_t vr = volume[1];
do {
int16_t l = *in++;
out[0] = mulAdd(l, vl, out[0]);
out[1] = mulAdd(l, vr, out[1]);
out += 2;
} while (--frameCount);
}
}
mIn = in;
}
这个函数主要分四种情况来处理:
- 需要生成 Aux 数据,需要执行音量斜坡处理;
- 需要生成 Aux 数据,不需要执行音量斜坡处理;
- 不需要生成 Aux 数据,需要执行音量斜坡处理;
- 不需要生成 Aux 数据,不需要执行音量斜坡处理。
同样这个函数也可以以更简洁更清晰的方式来写,如下面这样:这
void AudioMixerBase::TrackBase::track__16BitsMono(
int32_t* out, size_t frameCount, int32_t* temp , int32_t* aux)
{
ALOGVV("track__16BitsMono\n");
const int16_t *in = static_cast<int16_t const *>(mIn);
// ramp gain
if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]) || (CC_UNLIKELY(aux != NULL && auxInc))) {
int32_t vl = prevVolume[0];
int32_t vr = prevVolume[1];
int32_t va = prevAuxLevel;
const int32_t vlInc = volumeInc[0];
const int32_t vrInc = volumeInc[1];
const int32_t vaInc = auxInc;
// ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
// t, vlInc/65536.0f, vl/65536.0f, volume[0],
// (vl + vlInc*frameCount)/65536.0f, frameCount);
do {
int32_t l = *in++;
*out++ += (vl >> 16) * l;
*out++ += (vr >> 16) * l;
vl += vlInc;
vr += vrInc;
if (aux != NULL) {
*aux++ += (va >> 16) * l;
va += vaInc;
}
} while (--frameCount);
prevVolume[0] = vl;
prevVolume[1] = vr;
prevAuxLevel = va;
adjustVolumeRamp(true);
}
// constant gain
else {
const int16_t vl = volume[0];
const int16_t vr = volume[1];
const int16_t va = (int16_t)auxLevel;
do {
int16_t l = *in++;
out[0] = mulAdd(l, vl, out[0]);
out[1] = mulAdd(l, vr, out[1]);
out += 2;
if (aux != NULL) {
aux[0] = mulAdd(l, va, aux[0]);
aux++;
}
} while (--frameCount);
}
mIn = in;
}
即以是否需要执行音量斜坡为主要的逻辑划分维度,以是否需要生成 AUX 数据为次要的逻辑划分维度。
当混音器的音频源 Track 不需要重采样即可混音,且音频源 Track 的输出采样格式为 AUDIO_FORMAT_PCM_16_BIT
,音频源 Track 的输出通道数为双声道立体声,混音器输出通道数为双通道立体声时,音频源 Track 的 hook
为 TrackBase::track__16BitsStereo()
,该函数定义 (位于 frameworks/av/media/libaudioprocessing/AudioMixerBase.cpp
) 如下:
void AudioMixerBase::TrackBase::track__16BitsStereo(
int32_t* out, size_t frameCount, int32_t* temp __unused, int32_t* aux)
{
ALOGVV("track__16BitsStereo\n");
const int16_t *in = static_cast<const int16_t *>(mIn);
if (CC_UNLIKELY(aux != NULL)) {
int32_t l;
int32_t r;
// ramp gain
if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]|auxInc)) {
int32_t vl = prevVolume[0];
int32_t vr = prevVolume[1];
int32_t va = prevAuxLevel;
const int32_t vlInc = volumeInc[0];
const int32_t vrInc = volumeInc[1];
const int32_t vaInc = auxInc;
// ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
// t, vlInc/65536.0f, vl/65536.0f, volume[0],
// (vl + vlInc*frameCount)/65536.0f, frameCount);
do {
l = (int32_t)*in++;
r = (int32_t)*in++;
*out++ += (vl >> 16) * l;
*out++ += (vr >> 16) * r;
*aux++ += (va >> 17) * (l + r);
vl += vlInc;
vr += vrInc;
va += vaInc;
} while (--frameCount);
prevVolume[0] = vl;
prevVolume[1] = vr;
prevAuxLevel = va;
adjustVolumeRamp(true);
}
// constant gain
else {
const uint32_t vrl = volumeRL;
const int16_t va = (int16_t)auxLevel;
do {
uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
int16_t a = (int16_t)(((int32_t)in[0] + in[1]) >> 1);
in += 2;
out[0] = mulAddRL(1, rl, vrl, out[0]);
out[1] = mulAddRL(0, rl, vrl, out[1]);
out += 2;
aux[0] = mulAdd(a, va, aux[0]);
aux++;
} while (--frameCount);
}
} else {
// ramp gain
if (CC_UNLIKELY(volumeInc[0]|volumeInc[1])) {
int32_t vl = prevVolume[0];
int32_t vr = prevVolume[1];
const int32_t vlInc = volumeInc[0];
const int32_t vrInc = volumeInc[1];
// ALOGD("[1] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
// t, vlInc/65536.0f, vl/65536.0f, volume[0],
// (vl + vlInc*frameCount)/65536.0f, frameCount);
do {
*out++ += (vl >> 16) * (int32_t) *in++;
*out++ += (vr >> 16) * (int32_t) *in++;
vl += vlInc;
vr += vrInc;
} while (--frameCount);
prevVolume[0] = vl;
prevVolume[1] = vr;
adjustVolumeRamp(false);
}
// constant gain
else {
const uint32_t vrl = volumeRL;
do {
uint32_t rl = *reinterpret_cast<const uint32_t *>(in);
in += 2;
out[0] = mulAddRL(1, rl, vrl, out[0]);
out[1] = mulAddRL(0, rl, vrl, out[1]);
out += 2;
} while (--frameCount);
}
}
mIn = in;
}
TrackBase::track__16BitsStereo()
函数与上面的 TrackBase::track__16BitsMono()
函数大体相同,但两者有如下两点差别:
- 输出音频数据中两个通道数据的来源不同,对于前者,输出音频数据中两个通道的数据分别来自于输入数据中不同通道的数据,后者则是复制输入数据中单个通道的数据扩展成为输出数据中的双通道数据;
- 前者在不需要执行音量斜坡操作时,使用了某些平台上可以借助机器的指令来优化性能的
mulAddRL()
操作,不过,这个操作在需要执行音量斜坡操作时,明明也可以用。
看上去,将 TrackBase::track__16BitsStereo()
和 TrackBase::track__16BitsMono()
函数合并成一个函数也并非不可以,如下面这样,不使用 mulAddRL()
操作的版本:
template<bool stereo>
void AudioMixerBase::TrackBase::track__16Bits(
int32_t* out, size_t frameCount, int32_t* temp , int32_t* aux)
{
ALOGVV("track__16BitsMono\n");
const int16_t *in = static_cast<int16_t const *>(mIn);
// ramp gain
if (CC_UNLIKELY(volumeInc[0]|volumeInc[1]) || (CC_UNLIKELY(aux != NULL && auxInc))) {
int32_t l;
int32_t r;
int32_t vl = prevVolume[0];
int32_t vr = prevVolume[1];
int32_t va = prevAuxLevel;
const int32_t vlInc = volumeInc[0];
const int32_t vrInc = volumeInc[1];
const int32_t vaInc = auxInc;
// ALOGD("[2] %p: inc=%f, v0=%f, v1=%d, final=%f, count=%d",
// t, vlInc/65536.0f, vl/65536.0f, volume[0],
// (vl + vlInc*frameCount)/65536.0f, frameCount);
do {
l = *in++;
if (stereo) {
r = *in++;
} else {
r = l;
}
*out++ += (vl >> 16) * l;
*out++ += (vr >> 16) * r;
vl += vlInc;
vr += vrInc;
if (aux != NULL) {
if (stereo) {
*aux++ += (va >> 17) * (l + r);;
} else {
*aux++ += (va >> 16) * l;
}
va += vaInc;
}
} while (--frameCount);
prevVolume[0] = vl;
prevVolume[1] = vr;
prevAuxLevel = va;
adjustVolumeRamp(true);
}
// constant gain
else {
const int16_t vl = volume[0];
const int16_t vr = volume[1];
const int16_t va = (int16_t)auxLevel;
int16_t l;
int16_t r;
do {
l = *in++;
if (stereo) {
r = *in++;
} else {
r = l;
}
out[0] = mulAdd(l, vl, out[0]);
out[1] = mulAdd(r, vr, out[1]);
out += 2;
if (aux != NULL) {
if (stereo) {
aux[0] = mulAdd((l + r) >> 1, va, aux[0]);
} else {
aux[0] = mulAdd(l, va, aux[0]);
}
aux++;
}
} while (--frameCount);
}
mIn = in;
}
上面的各种音频源 Track 的混音操作函数,在不强制使用新混音器时可用。当强制使用新混音器,或混音器及音频源 Track 的配置参数不同于上面所列时,主要根据是否需要混音,分别使用模板函数 AudioMixerBase::TrackBase::track__Resample()
和 AudioMixerBase::TrackBase::track__NoResample()
,这两个函数的定义 (位于 frameworks/av/media/libaudioprocessing/AudioMixerBase.cpp
) 如下:
/* This track hook is called to do resampling then mixing,
* pulling from the track's upstream AudioBufferProvider.
*
* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
* TO: int32_t (Q4.27) or float
* TI: int32_t (Q4.27) or int16_t (Q0.15) or float
* TA: int32_t (Q4.27) or float
*/
template <int MIXTYPE, typename TO, typename TI, typename TA>
void AudioMixerBase::TrackBase::track__Resample(TO* out, size_t outFrameCount, TO* temp, TA* aux)
{
ALOGVV("track__Resample\n");
mResampler->setSampleRate(sampleRate);
const bool ramp = needsRamp();
if (MIXTYPE == MIXTYPE_MONOEXPAND || MIXTYPE == MIXTYPE_STEREOEXPAND // custom volume handling
|| ramp || aux != NULL) {
// if ramp: resample with unity gain to temp buffer and scale/mix in 2nd step.
// if aux != NULL: resample with unity gain to temp buffer then apply send level.
mResampler->setVolume(UNITY_GAIN_FLOAT, UNITY_GAIN_FLOAT);
memset(temp, 0, outFrameCount * mMixerChannelCount * sizeof(TO));
mResampler->resample((int32_t*)temp, outFrameCount, bufferProvider);
volumeMix<MIXTYPE, std::is_same_v<TI, float> /* USEFLOATVOL */, true /* ADJUSTVOL */>(
out, outFrameCount, temp, aux, ramp);
} else { // constant volume gain
mResampler->setVolume(mVolume[0], mVolume[1]);
mResampler->resample((int32_t*)out, outFrameCount, bufferProvider);
}
}
/* This track hook is called to mix a track, when no resampling is required.
* The input buffer should be present in in.
*
* MIXTYPE (see AudioMixerOps.h MIXTYPE_* enumeration)
* TO: int32_t (Q4.27) or float
* TI: int32_t (Q4.27) or int16_t (Q0.15) or float
* TA: int32_t (Q4.27) or float
*/
template <int MIXTYPE, typename TO, typename TI, typename TA>
void AudioMixerBase::TrackBase::track__NoResample(
TO* out, size_t frameCount, TO* temp __unused, TA* aux)
{
ALOGVV("track__NoResample\n");
const TI *in = static_cast<const TI *>(mIn);
volumeMix<MIXTYPE, std::is_same_v<TI, float> /* USEFLOATVOL */, true /* ADJUSTVOL */>(
out, frameCount, in, aux, needsRamp());
// MIXTYPE_MONOEXPAND reads a single input channel and expands to NCHAN output channels.
// MIXTYPE_MULTI reads NCHAN input channels and places to NCHAN output channels.
in += (MIXTYPE == MIXTYPE_MONOEXPAND) ? frameCount : frameCount * mMixerChannelCount;
mIn = in;
}
AudioMixerBase::TrackBase::track__Resample()
函数与 AudioMixerBase::TrackBase::track__genericResample()
有几分相似,但前者是更通用的实现。
修改或设置混音器及其音频源参数
AudioMixerBase
和 AudioMixer
都提供了 setParameter()
函数,用来修改或设置混音器及其音频源参数,AudioMixer::setParameter()
函数定义 (位于 frameworks/av/media/libaudioprocessing/AudioMixer.cpp
) 如下:
void AudioMixer::setParameter(int name, int target, int param, void *value)
{
LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
const std::shared_ptr<Track> &track = getTrack(name);
int valueInt = static_cast<int>(reinterpret_cast<uintptr_t>(value));
int32_t *valueBuf = reinterpret_cast<int32_t*>(value);
switch (target) {
case TRACK:
switch (param) {
case CHANNEL_MASK: {
const audio_channel_mask_t trackChannelMask =
static_cast<audio_channel_mask_t>(valueInt);
if (setChannelMasks(name, trackChannelMask,
static_cast<audio_channel_mask_t>(
track->mMixerChannelMask | track->mMixerHapticChannelMask))) {
ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", trackChannelMask);
invalidate();
}
} break;
case MAIN_BUFFER:
if (track->mainBuffer != valueBuf) {
track->mainBuffer = valueBuf;
ALOGV("setParameter(TRACK, MAIN_BUFFER, %p)", valueBuf);
if (track->mKeepContractedChannels) {
track->prepareForAdjustChannels(mFrameCount);
}
invalidate();
}
break;
case AUX_BUFFER:
AudioMixerBase::setParameter(name, target, param, value);
break;
case FORMAT: {
audio_format_t format = static_cast<audio_format_t>(valueInt);
if (track->mFormat != format) {
ALOG_ASSERT(audio_is_linear_pcm(format), "Invalid format %#x", format);
track->mFormat = format;
ALOGV("setParameter(TRACK, FORMAT, %#x)", format);
track->prepareForReformat();
invalidate();
}
} break;
// FIXME do we want to support setting the downmix type from AudioFlinger?
// for a specific track? or per mixer?
/* case DOWNMIX_TYPE:
break */
case MIXER_FORMAT: {
audio_format_t format = static_cast<audio_format_t>(valueInt);
if (track->mMixerFormat != format) {
track->mMixerFormat = format;
ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format);
if (track->mKeepContractedChannels) {
track->prepareForAdjustChannels(mFrameCount);
}
}
} break;
case MIXER_CHANNEL_MASK: {
const audio_channel_mask_t mixerChannelMask =
static_cast<audio_channel_mask_t>(valueInt);
if (setChannelMasks(name, static_cast<audio_channel_mask_t>(
track->channelMask | track->mHapticChannelMask),
mixerChannelMask)) {
ALOGV("setParameter(TRACK, MIXER_CHANNEL_MASK, %#x)", mixerChannelMask);
invalidate();
}
} break;
case HAPTIC_ENABLED: {
const bool hapticPlaybackEnabled = static_cast<bool>(valueInt);
if (track->mHapticPlaybackEnabled != hapticPlaybackEnabled) {
track->mHapticPlaybackEnabled = hapticPlaybackEnabled;
track->mKeepContractedChannels = hapticPlaybackEnabled;
track->prepareForAdjustChannels(mFrameCount);
}
} break;
case HAPTIC_INTENSITY: {
const os::HapticScale hapticIntensity = static_cast<os::HapticScale>(valueInt);
if (track->mHapticIntensity != hapticIntensity) {
track->mHapticIntensity = hapticIntensity;
}
} break;
case HAPTIC_MAX_AMPLITUDE: {
const float hapticMaxAmplitude = *reinterpret_cast<float*>(value);
if (track->mHapticMaxAmplitude != hapticMaxAmplitude) {
track->mHapticMaxAmplitude = hapticMaxAmplitude;
}
} break;
default:
LOG_ALWAYS_FATAL("setParameter track: bad param %d", param);
}
break;
case RESAMPLE:
case RAMP_VOLUME:
case VOLUME:
AudioMixerBase::setParameter(name, target, param, value);
break;
case TIMESTRETCH:
switch (param) {
case PLAYBACK_RATE: {
const AudioPlaybackRate *playbackRate =
reinterpret_cast<AudioPlaybackRate*>(value);
ALOGW_IF(!isAudioPlaybackRateValid(*playbackRate),
"bad parameters speed %f, pitch %f",
playbackRate->mSpeed, playbackRate->mPitch);
if (track->setPlaybackRate(*playbackRate)) {
ALOGV("setParameter(TIMESTRETCH, PLAYBACK_RATE, STRETCH_MODE, FALLBACK_MODE "
"%f %f %d %d",
playbackRate->mSpeed,
playbackRate->mPitch,
playbackRate->mStretchMode,
playbackRate->mFallbackMode);
// invalidate(); (should not require reconfigure)
}
} break;
default:
LOG_ALWAYS_FATAL("setParameter timestretch: bad param %d", param);
}
break;
default:
LOG_ALWAYS_FATAL("setParameter: bad target %d", target);
}
}
AudioMixer::setParameter()
函数支持设置的参数包括如下这些:
-
TRACK
- CHANNEL_MASK:为混音器的单个音频源设置通道掩码格式;
- MAIN_BUFFER:为混音器的单个音频源设置混音输出数据缓冲区,这个值本不应该属于单个音频源;
-
AUX_BUFFER:由
AudioMixerBase::setParameter()
处理; - FORMAT:为混音器的单个音频源设置采样格式;
- MIXER_FORMAT:为混音器的单个音频源设置混音器采样格式,这个值本不应该属于单个音频源;
- MIXER_CHANNEL_MASK:为混音器的单个音频源设置混音器通道掩码格式,这个值本不应该属于单个音频源;
- HAPTIC_ENABLED:为混音器的单个音频源设置触觉开关;
- HAPTIC_INTENSITY:为混音器的单个音频源设置触觉强度;
- HAPTIC_MAX_AMPLITUDE:为混音器的单个音频源设置触觉最大振幅;
-
RESAMPLE/RAMP_VOLUME/VOLUME:由
AudioMixerBase::setParameter()
处理; -
TIMESTRETCH
- PLAYBACK_RATE:为混音器的单个音频源设置播放速率。
AudioMixerBase::setParameter()
函数定义 (位于 frameworks/av/media/libaudioprocessing/AudioMixerBase.cpp
) 如下:
void AudioMixerBase::setParameter(int name, int target, int param, void *value)
{
LOG_ALWAYS_FATAL_IF(!exists(name), "invalid name: %d", name);
const std::shared_ptr<TrackBase> &track = mTracks[name];
int valueInt = static_cast<int>(reinterpret_cast<uintptr_t>(value));
int32_t *valueBuf = reinterpret_cast<int32_t*>(value);
switch (target) {
case TRACK:
switch (param) {
case CHANNEL_MASK: {
const audio_channel_mask_t trackChannelMask =
static_cast<audio_channel_mask_t>(valueInt);
if (setChannelMasks(name, trackChannelMask, track->mMixerChannelMask)) {
ALOGV("setParameter(TRACK, CHANNEL_MASK, %x)", trackChannelMask);
invalidate();
}
} break;
case MAIN_BUFFER:
if (track->mainBuffer != valueBuf) {
track->mainBuffer = valueBuf;
ALOGV("setParameter(TRACK, MAIN_BUFFER, %p)", valueBuf);
invalidate();
}
break;
case AUX_BUFFER:
if (track->auxBuffer != valueBuf) {
track->auxBuffer = valueBuf;
ALOGV("setParameter(TRACK, AUX_BUFFER, %p)", valueBuf);
invalidate();
}
break;
case FORMAT: {
audio_format_t format = static_cast<audio_format_t>(valueInt);
if (track->mFormat != format) {
ALOG_ASSERT(audio_is_linear_pcm(format), "Invalid format %#x", format);
track->mFormat = format;
ALOGV("setParameter(TRACK, FORMAT, %#x)", format);
invalidate();
}
} break;
case MIXER_FORMAT: {
audio_format_t format = static_cast<audio_format_t>(valueInt);
if (track->mMixerFormat != format) {
track->mMixerFormat = format;
ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format);
}
} break;
case MIXER_CHANNEL_MASK: {
const audio_channel_mask_t mixerChannelMask =
static_cast<audio_channel_mask_t>(valueInt);
if (setChannelMasks(name, track->channelMask, mixerChannelMask)) {
ALOGV("setParameter(TRACK, MIXER_CHANNEL_MASK, %#x)", mixerChannelMask);
invalidate();
}
} break;
default:
LOG_ALWAYS_FATAL("setParameter track: bad param %d", param);
}
break;
case RESAMPLE:
switch (param) {
case SAMPLE_RATE:
ALOG_ASSERT(valueInt > 0, "bad sample rate %d", valueInt);
if (track->setResampler(uint32_t(valueInt), mSampleRate)) {
ALOGV("setParameter(RESAMPLE, SAMPLE_RATE, %u)",
uint32_t(valueInt));
invalidate();
}
break;
case RESET:
track->resetResampler();
invalidate();
break;
case REMOVE:
track->mResampler.reset(nullptr);
track->sampleRate = mSampleRate;
invalidate();
break;
default:
LOG_ALWAYS_FATAL("setParameter resample: bad param %d", param);
}
break;
case RAMP_VOLUME:
case VOLUME:
switch (param) {
case AUXLEVEL:
if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
target == RAMP_VOLUME ? mFrameCount : 0,
&track->auxLevel, &track->prevAuxLevel, &track->auxInc,
&track->mAuxLevel, &track->mPrevAuxLevel, &track->mAuxInc)) {
ALOGV("setParameter(%s, AUXLEVEL: %04x)",
target == VOLUME ? "VOLUME" : "RAMP_VOLUME", track->auxLevel);
invalidate();
}
break;
default:
if ((unsigned)param >= VOLUME0 && (unsigned)param < VOLUME0 + MAX_NUM_VOLUMES) {
if (setVolumeRampVariables(*reinterpret_cast<float*>(value),
target == RAMP_VOLUME ? mFrameCount : 0,
&track->volume[param - VOLUME0],
&track->prevVolume[param - VOLUME0],
&track->volumeInc[param - VOLUME0],
&track->mVolume[param - VOLUME0],
&track->mPrevVolume[param - VOLUME0],
&track->mVolumeInc[param - VOLUME0])) {
ALOGV("setParameter(%s, VOLUME%d: %04x)",
target == VOLUME ? "VOLUME" : "RAMP_VOLUME", param - VOLUME0,
track->volume[param - VOLUME0]);
invalidate();
}
} else {
LOG_ALWAYS_FATAL("setParameter volume: bad param %d", param);
}
}
break;
default:
LOG_ALWAYS_FATAL("setParameter: bad target %d", target);
}
}
AudioMixerBase::setParameter()
函数支持设置的参数包括如下这些:
-
TRACK
-
CHANNEL_MASK/MAIN_BUFFER/FORMAT/MIXER_FORMAT/MIXER_CHANNEL_MASK:
AudioMixer::setParameter()
函数中也处理了这些参数; - AUX_BUFFER:为混音器的单个音频源设置辅助数据缓冲区;
-
CHANNEL_MASK/MAIN_BUFFER/FORMAT/MIXER_FORMAT/MIXER_CHANNEL_MASK:
-
RESAMPLE
- SAMPLE_RATE:为混音器的单个音频源设置重采样器的输入采样率,也是音频源本身的采样率,输出采样率为混音器的输出采样率;
- RESET:复位混音器的单个音频源的重采样器;
- REMOVE:移除混音器的单个音频源的重采样器;
-
RAMP_VOLUME/VOLUME
- AUXLEVEL:为混音器的单个音频源设置辅助数据音量;
- default:为混音器的单个音频源设置音量。
这些参数,不管其本身在逻辑上是不是属于单个音频源,但全部都是设置给单个音频源的。修改单个音频源的部分参数,如通道掩码格式、采样格式和倍速播放等,可能会触发单个音频源音频数据处理管线的重新配置。许多参数的改变,都会导致混音器 invalidate()
的执行,会重新初始化混音过程,包括音频源的分组等。
这里来更细致地看下为混音器的单个音频源设置重采样器的输入采样率,这个设置由 AudioMixerBase::TrackBase::setResampler()
函数处理,该函数定义 (位于 frameworks/av/media/libaudioprocessing/AudioMixerBase.cpp
) 如下:
bool AudioMixerBase::TrackBase::setResampler(uint32_t trackSampleRate, uint32_t devSampleRate)
{
if (trackSampleRate != devSampleRate || mResampler.get() != nullptr) {
if (sampleRate != trackSampleRate) {
sampleRate = trackSampleRate;
if (mResampler.get() == nullptr) {
ALOGV("Creating resampler from track %d Hz to device %d Hz",
trackSampleRate, devSampleRate);
AudioResampler::src_quality quality;
// force lowest quality level resampler if use case isn't music or video
// FIXME this is flawed for dynamic sample rates, as we choose the resampler
// quality level based on the initial ratio, but that could change later.
// Should have a way to distinguish tracks with static ratios vs. dynamic ratios.
if (isMusicRate(trackSampleRate)) {
quality = AudioResampler::DEFAULT_QUALITY;
} else {
quality = AudioResampler::DYN_LOW_QUALITY;
}
// TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
// but if none exists, it is the channel count (1 for mono).
const int resamplerChannelCount = getOutputChannelCount();
ALOGVV("Creating resampler:"
" format(%#x) channels(%d) devSampleRate(%u) quality(%d)\n",
mMixerInFormat, resamplerChannelCount, devSampleRate, quality);
mResampler.reset(AudioResampler::create(
mMixerInFormat,
resamplerChannelCount,
devSampleRate, quality));
}
return true;
}
}
return false;
}
AudioMixerBase::TrackBase::setResampler()
函数接收两个参数,一个是音频源的采样率,另一个是设备采样率,也即重采样器的输出采样率。AudioMixerBase::TrackBase::setResampler()
函数被调用时,设备采样率传入的始终是混音器的输出采样率,对于特定的混音器,它将保持不变,也就是说 AudioMixerBase::TrackBase::setResampler()
函数的设备采样率参数将是一个固定的值。
当音频源的采样率和设备采样率不同时,表示需要重采样器。如果重采样器不存在,会创建重采样器。创建重采样器时,只需传入输出采样率即可,不需要传入输入采样率。创建重采样器时,输入的 quality 参数根据音频源的采样率,即输入采样率计算获得。创建重采样器时,输入的音频数据通道数从音频源配置获得。
对于设置的音频源的采样率,在重采样器创建之后,只是做简单的记录。在混音操作处理过程中,AudioMixerBase::TrackBase
的相关函数会为重采样器设置输入采样率。
AudioMixerBase::TrackBase::setResampler()
函数的实现存在一些问题:
- 从接口来看,通道数和 quality 等参数在重采样器创建之后是可能改变的,但 Android 的重采样器在创建之后,这些参数都不再改变;
- 传入基本上固定的设备采样率参数显得十分蹩脚。
另外,重采样是一个开始就不能结束的操作。如音频源采样率最初与设备采样率不同,但后来变得与设备采样率相同,此时重采样器也不会销毁。
设置音量由 setVolumeRampVariables()
函数完成,该函数定义 (位于 frameworks/av/media/libaudioprocessing/AudioMixerBase.cpp
) 如下:
static inline bool setVolumeRampVariables(float newVolume, int32_t ramp,
int16_t *pIntSetVolume, int32_t *pIntPrevVolume, int32_t *pIntVolumeInc,
float *pSetVolume, float *pPrevVolume, float *pVolumeInc) {
// check floating point volume to see if it is identical to the previously
// set volume.
// We do not use a tolerance here (and reject changes too small)
// as it may be confusing to use a different value than the one set.
// If the resulting volume is too small to ramp, it is a direct set of the volume.
if (newVolume == *pSetVolume) {
return false;
}
if (newVolume < 0) {
newVolume = 0; // should not have negative volumes
} else {
switch (fpclassify(newVolume)) {
case FP_SUBNORMAL:
case FP_NAN:
newVolume = 0;
break;
case FP_ZERO:
break; // zero volume is fine
case FP_INFINITE:
// Infinite volume could be handled consistently since
// floating point math saturates at infinities,
// but we limit volume to unity gain float.
// ramp = 0; break;
//
newVolume = AudioMixerBase::UNITY_GAIN_FLOAT;
break;
case FP_NORMAL:
default:
// Floating point does not have problems with overflow wrap
// that integer has. However, we limit the volume to
// unity gain here.
// TODO: Revisit the volume limitation and perhaps parameterize.
if (newVolume > AudioMixerBase::UNITY_GAIN_FLOAT) {
newVolume = AudioMixerBase::UNITY_GAIN_FLOAT;
}
break;
}
}
// set floating point volume ramp
if (ramp != 0) {
// when the ramp completes, *pPrevVolume is set to *pSetVolume, so there
// is no computational mismatch; hence equality is checked here.
ALOGD_IF(*pPrevVolume != *pSetVolume, "previous float ramp hasn't finished,"
" prev:%f set_to:%f", *pPrevVolume, *pSetVolume);
const float inc = (newVolume - *pPrevVolume) / ramp; // could be inf, nan, subnormal
// could be inf, cannot be nan, subnormal
const float maxv = std::max(newVolume, *pPrevVolume);
if (isnormal(inc) // inc must be a normal number (no subnormals, infinite, nan)
&& maxv + inc != maxv) { // inc must make forward progress
*pVolumeInc = inc;
// ramp is set now.
// Note: if newVolume is 0, then near the end of the ramp,
// it may be possible that the ramped volume may be subnormal or
// temporarily negative by a small amount or subnormal due to floating
// point inaccuracies.
} else {
ramp = 0; // ramp not allowed
}
}
// compute and check integer volume, no need to check negative values
// The integer volume is limited to "unity_gain" to avoid wrapping and other
// audio artifacts, so it never reaches the range limit of U4.28.
// We safely use signed 16 and 32 bit integers here.
const float scaledVolume = newVolume * AudioMixerBase::UNITY_GAIN_INT; // not neg, subnormal, nan
const int32_t intVolume = (scaledVolume >= (float)AudioMixerBase::UNITY_GAIN_INT) ?
AudioMixerBase::UNITY_GAIN_INT : (int32_t)scaledVolume;
// set integer volume ramp
if (ramp != 0) {
// integer volume is U4.12 (to use 16 bit multiplies), but ramping uses U4.28.
// when the ramp completes, *pIntPrevVolume is set to *pIntSetVolume << 16, so there
// is no computational mismatch; hence equality is checked here.
ALOGD_IF(*pIntPrevVolume != *pIntSetVolume << 16, "previous int ramp hasn't finished,"
" prev:%d set_to:%d", *pIntPrevVolume, *pIntSetVolume << 16);
const int32_t inc = ((intVolume << 16) - *pIntPrevVolume) / ramp;
if (inc != 0) { // inc must make forward progress
*pIntVolumeInc = inc;
} else {
ramp = 0; // ramp not allowed
}
}
// if no ramp, or ramp not allowed, then clear float and integer increments
if (ramp == 0) {
*pVolumeInc = 0;
*pPrevVolume = newVolume;
*pIntVolumeInc = 0;
*pIntPrevVolume = intVolume << 16;
}
*pSetVolume = newVolume;
*pIntSetVolume = intVolume;
return true;
}
混音器的使用者设置 float 型的音量增益值,这个函数可以处理音量斜坡,并生成整型的音量值。
最后
回过头来,再看文章开头的问题,并基于文中内容对它们做一些回答。
如何表示要混音的一路音频源数据?
Android 的混音器用 AudioMixerBase::TrackBase
/AudioMixer::Track
表示要混音的一路音频源数据。
如何为要混音的一路音频源设置音频数据来源?
要混音的一路音频源的音频数据来源由 AudioBufferProvider
表示,AudioMixer::setBufferProvider(int name, AudioBufferProvider* bufferProvider)
函数可以为要混音的一路音频源设置音频数据来源。
如何设置混音之后的数据输出的缓冲区,或如何获得混音之后的数据?
混音之后的数据输出的缓冲区被称为 main buffer,通过 AudioMixer::setParameter(int name, int target, int param, void *value)
可以为要混音的一路音频源设置 main buffer。从 setParameter()
接口来看,main buffer 好像是特定于要混音的一路音频源的,但实际可能是给多个要混音的音频源设置相同的 main buffer。具有相同 main buffer 的音频源会放在一起混音。main buffer 由混音器的使用者维护,混音器的使用者驱动混音器执行了混音之后,从 main buffer 读取数据做进一步的处理,如丢给设备播放。
如何确定混音器的输出数据格式和配置,即是外部使用者通过提供的接口设置,还是根据各路音频源的数据格式和配置动态计算获得?
这些信息通过各种各样的设置接口设置。如采样率,在 AudioMixer
对象构造的时候传入。对于混音器的输出通道数和采样格式,与对 main buffer 的处理类似,是设置给要混音的一路音频源的。
当音频源的数据格式和配置与输出的数据格式和配置不同时,怎么做数据格式和配置的转换?
AudioMixer::Track
维护了一个音频数据处理流水线,用来执行包括音频数据格式转换、音量限制和变速播放等在内的多种音频数据处理。重采样不是 AudioMixer::Track
的音频数据处理流水线处理的,但它有一个重采样器来处理重采样。
如何为混音器添加或创建一路音频源?
AudioMixerBase::create()
函数可以用来为混音器创建并添加一路音频源。
如何修改混音器的一路音频源的参数?
AudioMixerBase::TrackBase
/AudioMixer::Track
提供的接口比较少,一般通过 AudioMixerBase::setParameter(int name, int target, int param, void *value)
函数和 AudioMixer::setParameter(int name, int target, int param, void *value)
函数设置。Android 混音器的这些组件封装的不太好,甚至可以说比较差。
如何删除混音器的一路音频源?
AudioMixerBase::disable(int name)
函数可以禁用一路音频源,但不把它从混音器中移除。AudioMixerBase::destroy(int name)
函数会将一路音频源从混音器中移除。
混音时,混音之后的数据越界了怎么处理?
简单限制上限。
混音操作如何驱动?是混音器内部起线程执行混音操作并将结果通过回调抛出去,还是使用者主动调用混音操作?
AudioMixerBase
类有个 process()
函数,混音器的使用者调用这个方法驱动混音操作的执行,这个函数执行之后,混音器的使用者从 main buffer 获得混音之后的数据。
Android 的混音器同时可以执行多组混音操作,也就是同时可以产生多个输出。
Done.