AudioRecord的开始录音 startRecording和 RecordThread
start录音流程走到(Android 应用阶段)
Jni android_media_AudioRecord_start ---> mAudioRecord->start ---> (AudioFlinger阶段开始跨进程)RecordHandle->start ---> RecordTrack->start --->RecordThread->start ---> (录音线程阶段)RecordThread-->threadLoop 走一轮还是来到了RecordThread,直奔解析RecordThread的start。
status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrack,
AudioSystem::sync_event_t event,
int triggerSession)
{
ALOGV("RecordThread::start event %d, triggerSession %d", event, triggerSession);
sp<ThreadBase> strongMe = this;
status_t status = NO_ERROR;
/* 基本默认都用SYNC_EVENT_NONE */
if (event == AudioSystem::SYNC_EVENT_NONE) {
recordTrack->clearSyncStartEvent();
} else if (event != AudioSystem::SYNC_EVENT_SAME) {
......
}
{
// This section is a rendezvous between binder thread executing start() and RecordThread
AutoMutex lock(mLock);
if (mActiveTracks.indexOf(recordTrack) >= 0) {
/**这里做判断,如果当前recordTrack已经start过了的,但是在暂停状态的话,
直接调整为ACTIVE状态,并且返回。
*/
if (recordTrack->mState == TrackBase::PAUSING) {
ALOGV("active record track PAUSING -> ACTIVE");
recordTrack->mState = TrackBase::ACTIVE;
} else {
ALOGV("active record track state %d", recordTrack->mState);
}
return status;
}
// TODO consider other ways of handling this, such as changing the state to :STARTING and
// adding the track to mActiveTracks after returning from AudioSystem::startInput(),
// or using a separate command thread
recordTrack->mState = TrackBase::STARTING_1;
mActiveTracks.add(recordTrack); //当前recordTrack存起来
mActiveTracksGen++;
status_t status = NO_ERROR;
if (recordTrack->isExternalTrack()) {
mLock.unlock();
status = AudioSystem::startInput(mId, (audio_session_t)recordTrack->sessionId());
mLock.lock();
// FIXME should verify that recordTrack is still in mActiveTracks
if (status != NO_ERROR) {
mActiveTracks.remove(recordTrack);
mActiveTracksGen++;
recordTrack->clearSyncStartEvent();
ALOGV("RecordThread::start error %d", status);
return status;
}
}
// Catch up with current buffer indices if thread is already running.
// This is what makes a new client discard all buffered data. If the track's mRsmpInFront
// was initialized to some value closer to the thread's mRsmpInFront, then the track could
// see previously buffered data before it called start(), but with greater risk of overrun.
/* reset一下mResamplerBufferProvider 和mRecordBufferConverter*/
recordTrack->mResamplerBufferProvider->reset();
// clear any converter state as new data will be discontinuous
recordTrack->mRecordBufferConverter->reset();
recordTrack->mState = TrackBase::STARTING_2;
// signal thread to start
mWaitWorkCV.broadcast();
if (mActiveTracks.indexOf(recordTrack) < 0) {
ALOGV("Record failed to start");
status = BAD_VALUE;
goto startError;
}
return status;
}
startError:
if (recordTrack->isExternalTrack()) {
AudioSystem::stopInput(mId, (audio_session_t)recordTrack->sessionId());
}
recordTrack->clearSyncStartEvent();
// FIXME I wonder why we do not reset the state here?
return status;
}
AudioSystem::startInput 基本上涉及到AudioSystem都是和AudioPolicy有关的,依然是由AudioPolicyManager::startInput(audio_io_handle_t input,
audio_session_t session)来处理这档事,。现在这里又要绕一圈audioPolicy 到 audioFlinger createAudioPatch创建AuioPatch来连接Source和sink端?不太懂这个AudioPatch概念。AudioPatch连接开始后和状态更新后唤醒录音线程mWaitWorkCV.broadcast(); 到这一步可以看RecordThread->threadLoop了。
bool AudioFlinger::RecordThread::threadLoop()
{
nsecs_t lastWarning = 0;
/* 暂停快速截取和让AudioStreamIn 输入流待机状态*/
inputStandBy();
reacquire_wakelock:
sp<RecordTrack> activeTrack;
int activeTracksGen;
{
Mutex::Autolock _l(mLock);
size_t size = mActiveTracks.size();
activeTracksGen = mActiveTracksGen;
if (size > 0) {
// FIXME an arbitrary choice
activeTrack = mActiveTracks[0];
acquireWakeLock_l(activeTrack->uid());
if (size > 1) {
SortedVector<int> tmp;
for (size_t i = 0; i < size; i++) {
tmp.add(mActiveTracks[i]->uid());
}
updateWakeLockUids_l(tmp);
}
} else {
acquireWakeLock_l(-1);
}
}
// used to request a deferred sleep, to be executed later while mutex is unlocked
uint32_t sleepUs = 0;
// loop while there is work to do
for (;;) {
Vector< sp<EffectChain> > effectChains;
// sleep with mutex unlocked
if (sleepUs > 0) {
ATRACE_BEGIN("sleep");
usleep(sleepUs);
ATRACE_END();
sleepUs = 0;
}
// activeTracks accumulates a copy of a subset of mActiveTracks
Vector< sp<RecordTrack> > activeTracks;
// reference to the (first and only) active fast track
sp<RecordTrack> fastTrack;
// reference to a fast track which is about to be removed
sp<RecordTrack> fastTrackToRemove;
{ // scope for mLock
Mutex::Autolock _l(mLock);
processConfigEvents_l();
// check exitPending here because checkForNewParameters_l() and
// checkForNewParameters_l() can temporarily release mLock
if (exitPending()) {
break;
}
// if no active track(s), then standby and release wakelock
size_t size = mActiveTracks.size();
if (size == 0) {
/* 如果没有正在进行的recordtrack,让input等待状态和释放wakelock
然后睡眠,等待mWaitWorkCV.broadcast() start录音后会唤醒和拥有一个active的track*/
standbyIfNotAlreadyInStandby();
// exitPending() can't become true here
releaseWakeLock_l();
ALOGV("RecordThread: loop stopping");
// go to sleep
mWaitWorkCV.wait(mLock);
ALOGV("RecordThread: loop starting");
goto reacquire_wakelock;
}
if (mActiveTracksGen != activeTracksGen) {
activeTracksGen = mActiveTracksGen;
SortedVector<int> tmp;
for (size_t i = 0; i < size; i++) {
tmp.add(mActiveTracks[i]->uid());
}
updateWakeLockUids_l(tmp);
}
bool doBroadcast = false;
/* 这段for循环是为了找出active或准备active状态的recordtrack*/
for (size_t i = 0; i < size; ) {
activeTrack = mActiveTracks[i];
/* 再检测下track是不是已经被关闭了的,清理掉*/
if (activeTrack->isTerminated()) {
if (activeTrack->isFastTrack()) {
ALOG_ASSERT(fastTrackToRemove == 0);
fastTrackToRemove = activeTrack;
}
removeTrack_l(activeTrack);
mActiveTracks.remove(activeTrack);
mActiveTracksGen++;
size--;
continue;
}
TrackBase::track_state activeTrackState = activeTrack->mState;
/* 状态流程选择开始录音后状态初始是TrackBase::STARTING_2*/
switch (activeTrackState) {
case TrackBase::PAUSING:
mActiveTracks.remove(activeTrack);
mActiveTracksGen++;
doBroadcast = true;
size--;
continue;
case TrackBase::STARTING_1:
sleepUs = 10000;
i++;
continue;
case TrackBase::STARTING_2: //here
doBroadcast = true;
mStandby = false;
activeTrack->mState = TrackBase::ACTIVE;
break;
case TrackBase::ACTIVE:
break;
case TrackBase::IDLE:
i++;
continue;
default:
LOG_ALWAYS_FATAL("Unexpected activeTrackState %d", activeTrackState);
}
/* 涮选出是Active或者是 STARTING_2状态的recordtrack记录*/
activeTracks.add(activeTrack);
i++;
if (activeTrack->isFastTrack()) {
ALOG_ASSERT(!mFastTrackAvail);
ALOG_ASSERT(fastTrack == 0);
fastTrack = activeTrack;
}
}
if (doBroadcast) {
/* 取消暂停的信号*/
mStartStopCond.broadcast();
}
// sleep if there are no active tracks to process
if (activeTracks.size() == 0) {
if (sleepUs == 0) {
sleepUs = kRecordThreadSleepUs;
}
continue;
}
sleepUs = 0;
lockEffectChains_l(effectChains);
}
// thread mutex is now unlocked, mActiveTracks unknown, activeTracks.size() > 0
size_t size = effectChains.size();
for (size_t i = 0; i < size; i++) {
// thread mutex is not locked, but effect chain is locked
effectChains[i]->process_l();
}
// Push a new fast capture state if fast capture is not already running, or cblk change
/* 这段还没详细看*/
if (mFastCapture != 0) {
FastCaptureStateQueue *sq = mFastCapture->sq();
FastCaptureState *state = sq->begin();
bool didModify = false;
FastCaptureStateQueue::block_t block = FastCaptureStateQueue::BLOCK_UNTIL_PUSHED;
if (state->mCommand != FastCaptureState::READ_WRITE /* FIXME &&
(kUseFastMixer != FastMixer_Dynamic || state->mTrackMask > 1)*/) {
if (state->mCommand == FastCaptureState::COLD_IDLE) {
int32_t old = android_atomic_inc(&mFastCaptureFutex);
if (old == -1) {
(void) syscall(__NR_futex, &mFastCaptureFutex, FUTEX_WAKE_PRIVATE, 1);
}
}
state->mCommand = FastCaptureState::READ_WRITE;
didModify = true;
}
audio_track_cblk_t *cblkOld = state->mCblk;
audio_track_cblk_t *cblkNew = fastTrack != 0 ? fastTrack->cblk() : NULL;
if (cblkNew != cblkOld) {
state->mCblk = cblkNew;
// block until acked if removing a fast track
if (cblkOld != NULL) {
block = FastCaptureStateQueue::BLOCK_UNTIL_ACKED;
}
didModify = true;
}
sq->end(didModify);
if (didModify) {
sq->push(block);
}
}
// now run the fast track destructor with thread mutex unlocked
fastTrackToRemove.clear();
// Read from HAL to keep up with fastest client if multiple active tracks, not slowest one.
// Only the client(s) that are too slow will overrun. But if even the fastest client is too
// slow, then this RecordThread will overrun by not calling HAL read often enough.
// If destination is non-contiguous, first read past the nominal end of buffer, then
// copy to the right place. Permitted because mRsmpInBuffer was over-allocated.
int32_t rear = mRsmpInRear & (mRsmpInFramesP2 - 1);
ssize_t framesRead;
// If an NBAIO source is present, use it to read the normal capture's data
/* 有俩种方式去读取录音数据管道和HAL 输入流读取 */
if (mPipeSource != 0) {
size_t framesToRead = mBufferSize / mFrameSize;
framesRead = mPipeSource->read((uint8_t*)mRsmpInBuffer + rear * mFrameSize,
framesToRead, AudioBufferProvider::kInvalidPTS);
if (framesRead == 0) {
// since pipe is non-blocking, simulate blocking input
sleepUs = (framesToRead * 1000000LL) / mSampleRate;
}
// otherwise use the HAL / AudioStreamIn directly
} else { // 这里mRsmpInBuffer 记住这个采样buffer指针,从流拿出的数据放在这里。
ssize_t bytesRead = mInput->stream->read(mInput->stream,
(uint8_t*)mRsmpInBuffer + rear * mFrameSize, mBufferSize);
if (bytesRead < 0) {
framesRead = bytesRead;
} else {
/* 记录有多少帧PCM数据 */
framesRead = bytesRead / mFrameSize;
}
}
if (framesRead < 0 || (framesRead == 0 && mPipeSource == 0)) {
ALOGE("read failed: framesRead=%d", framesRead);
// Force input into standby so that it tries to recover at next read attempt
inputStandBy();
sleepUs = kRecordThreadSleepUs;
}
if (framesRead <= 0) {
goto unlock;
}
ALOG_ASSERT(framesRead > 0);
if (mTeeSink != 0) {
/*官方功能teeSink 会把原始PCM数据写入指定的文件里 */
(void) mTeeSink->write((uint8_t*)mRsmpInBuffer + rear * mFrameSize, framesRead);
}
// If destination is non-contiguous, we now correct for reading past end of buffer.
{ /* 这部分比较难看懂,大概是如果mRsmpInFramesP2指向的前面多少帧数据已经被用过了,不需要了,则把后面新加入的尾部数据直接覆盖到前面去???循环有效使用内存,点赞 */
size_t part1 = mRsmpInFramesP2 - rear;
if ((size_t) framesRead > part1) {
memcpy(mRsmpInBuffer, (uint8_t*)mRsmpInBuffer + mRsmpInFramesP2 * mFrameSize,
(framesRead - part1) * mFrameSize);
}
}
/* 更新rear尾部帧数量*/
rear = mRsmpInRear += framesRead;
size = activeTracks.size();
// loop over each active track 这里一个for,将当前每个activetrack的处理一下
for (size_t i = 0; i < size; i++) {
activeTrack = activeTracks[i];
/*快速模式的track 不走转换和共享客户端数据 */
// skip fast tracks, as those are handled directly by FastCapture
if (activeTrack->isFastTrack()) {
continue;
}
// TODO: This code probably should be moved to RecordTrack.
// TODO: Update the activeTrack buffer converter in case of reconfigure.
// loop over getNextBuffer to handle circular sink
/* 这段做了转换工作和交付给客户端AudioRecord共享PCM数据的工作*/
for (;;) {
activeTrack->mSink.frameCount = ~0;
/* 说明的是这里的getNextBuffer仅仅是通过serverProxy申请了一个循环利用的和共享内存buffer*/
status_t status = activeTrack->getNextBuffer(&activeTrack->mSink);
size_t framesOut = activeTrack->mSink.frameCount;
LOG_ALWAYS_FATAL_IF((status == OK) != (framesOut > 0));
// check available frames and handle overrun conditions
// if the record track isn't draining fast enough.
bool hasOverrun;
size_t framesIn;
activeTrack->mResamplerBufferProvider->sync(&framesIn, &hasOverrun);
if (hasOverrun) {
overrun = OVERRUN_TRUE;
}
if (framesOut == 0 || framesIn == 0) {
break;
}
// Don't allow framesOut to be larger than what is possible with resampling
// from framesIn.
// This isn't strictly necessary but helps limit buffer resizing in
// RecordBufferConverter. TODO: remove when no longer needed.
framesOut = min(framesOut,
destinationFramesPossible(
framesIn, mSampleRate, activeTrack->mSampleRate));
// process frames from the RecordThread buffer provider to the RecordTrack buffer
/* 把mResamplerBufferProvider实际是从RecordThread的recordThread->mRsmpInBuffer拿到数据转换到activeTrack->mSink.raw*/
framesOut = activeTrack->mRecordBufferConverter->convert(
activeTrack->mSink.raw, activeTrack->mResamplerBufferProvider, framesOut);
if (framesOut > 0 && (overrun == OVERRUN_UNKNOWN)) {
overrun = OVERRUN_FALSE;
}
if (activeTrack->mFramesToDrop == 0) {
if (framesOut > 0) {
activeTrack->mSink.frameCount = framesOut;
activeTrack->releaseBuffer(&activeTrack->mSink);
}
} else {
// FIXME could do a partial drop of framesOut
if (activeTrack->mFramesToDrop > 0) {
activeTrack->mFramesToDrop -= framesOut;
if (activeTrack->mFramesToDrop <= 0) {
activeTrack->clearSyncStartEvent();
}
} else {
activeTrack->mFramesToDrop += framesOut;
if (activeTrack->mFramesToDrop >= 0 || activeTrack->mSyncStartEvent == 0 ||
activeTrack->mSyncStartEvent->isCancelled()) {
ALOGW("Synced record %s, session %d, trigger session %d",
(activeTrack->mFramesToDrop >= 0) ? "timed out" : "cancelled",
activeTrack->sessionId(),
(activeTrack->mSyncStartEvent != 0) ?
activeTrack->mSyncStartEvent->triggerSession() : 0);
activeTrack->clearSyncStartEvent();
}
}
}
if (framesOut == 0) {
break;
}
}
switch (overrun) {
case OVERRUN_TRUE:
// client isn't retrieving buffers fast enough
if (!activeTrack->setOverflow()) {
nsecs_t now = systemTime();
// FIXME should lastWarning per track?
if ((now - lastWarning) > kWarningThrottleNs) {
ALOGW("RecordThread: buffer overflow");
lastWarning = now;
}
}
break;
case OVERRUN_FALSE:
activeTrack->clearOverflow();
break;
case OVERRUN_UNKNOWN:
break;
}
}
unlock:
// enable changes in effect chain
unlockEffectChains(effectChains);
// effectChains doesn't need to be cleared, since it is cleared by destructor at scope end
}
standbyIfNotAlreadyInStandby();
{
Mutex::Autolock _l(mLock);
for (size_t i = 0; i < mTracks.size(); i++) {
sp<RecordTrack> track = mTracks[i];
track->invalidate();
}
mActiveTracks.clear();
mActiveTracksGen++;
mStartStopCond.broadcast();
}
releaseWakeLock();
ALOGV("RecordThread %p exiting", this);
return false;
}
代码里有解说,到这里,有疑问应用程序客户端的AudioRecord没看到共享内存的写入是吧,
status_t status = activeTrack->getNextBuffer(&activeTrack->mSink); 分析这里即可。
status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer,
int64_t pts __unused)
{
ServerProxy::Buffer buf;
buf.mFrameCount = buffer->frameCount;
status_t status = mServerProxy->obtainBuffer(&buf);
buffer->frameCount = buf.mFrameCount;
buffer->raw = buf.mRaw;
if (buf.mFrameCount == 0) {
// FIXME also wake futex so that overrun is noticed more quickly
(void) android_atomic_or(CBLK_OVERRUN, &mCblk->mFlags);
}
return status;
}
实际走的是mServerProxy->obtainBuffer
在/av/media/libmedia/AudioTrackShared.cpp ServerProxy类实现,
AudioRecordServerProxy初始化传进来的sp<IMemory> cblk的参与分配循环利用共享内存,看了下用到了原子操作实现了数据完整性,这段代码研读起来麻烦,就只看看。
status_t ServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush)
{
LOG_ALWAYS_FATAL_IF(buffer == NULL || buffer->mFrameCount == 0);
if (mIsShutdown) {
goto no_init;
}
{
audio_track_cblk_t* cblk = mCblk;
// compute number of frames available to write (AudioTrack) or read (AudioRecord),
// or use previous cached value from framesReady(), with added barrier if it omits.
int32_t front;
int32_t rear;
// See notes on barriers at ClientProxy::obtainBuffer()
if (mIsOut) {
int32_t flush = cblk->u.mStreaming.mFlush;
rear = android_atomic_acquire_load(&cblk->u.mStreaming.mRear);
front = cblk->u.mStreaming.mFront;
if (flush != mFlush) {
// effectively obtain then release whatever is in the buffer
const size_t overflowBit = mFrameCountP2 << 1;
const size_t mask = overflowBit - 1;
int32_t newFront = (front & ~mask) | (flush & mask);
ssize_t filled = rear - newFront;
if (filled >= (ssize_t)overflowBit) {
// front and rear offsets span the overflow bit of the p2 mask
// so rebasing newFront on the front offset is off by the overflow bit.
// adjust newFront to match rear offset.
ALOGV("flush wrap: filled %zx >= overflowBit %zx", filled, overflowBit);
newFront += overflowBit;
filled -= overflowBit;
}
// Rather than shutting down on a corrupt flush, just treat it as a full flush
if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
ALOGE("mFlush %#x -> %#x, front %#x, rear %#x, mask %#x, newFront %#x, "
"filled %zd=%#x",
mFlush, flush, front, rear,
(unsigned)mask, newFront, filled, (unsigned)filled);
newFront = rear;
}
mFlush = flush;
android_atomic_release_store(newFront, &cblk->u.mStreaming.mFront);
// There is no danger from a false positive, so err on the side of caution
if (true /*front != newFront*/) {
int32_t old = android_atomic_or(CBLK_FUTEX_WAKE, &cblk->mFutex);
if (!(old & CBLK_FUTEX_WAKE)) {
(void) syscall(__NR_futex, &cblk->mFutex,
mClientInServer ? FUTEX_WAKE_PRIVATE : FUTEX_WAKE, 1);
}
}
front = newFront;
}
} else {
front = android_atomic_acquire_load(&cblk->u.mStreaming.mFront);
rear = cblk->u.mStreaming.mRear;
}
ssize_t filled = rear - front;
// pipe should not already be overfull
if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
ALOGE("Shared memory control block is corrupt (filled=%zd); shutting down", filled);
mIsShutdown = true;
}
if (mIsShutdown) {
goto no_init;
}
// don't allow filling pipe beyond the nominal size
size_t availToServer;
if (mIsOut) {
availToServer = filled;
mAvailToClient = mFrameCount - filled;
} else {
availToServer = mFrameCount - filled;
mAvailToClient = filled;
}
// 'availToServer' may be non-contiguous, so return only the first contiguous chunk
size_t part1;
if (mIsOut) {
front &= mFrameCountP2 - 1;
part1 = mFrameCountP2 - front;
} else {
rear &= mFrameCountP2 - 1;
part1 = mFrameCountP2 - rear;
}
if (part1 > availToServer) {
part1 = availToServer;
}
size_t ask = buffer->mFrameCount;
if (part1 > ask) {
part1 = ask;
}
// is assignment redundant in some cases?
buffer->mFrameCount = part1;
buffer->mRaw = part1 > 0 ?
&((char *) mBuffers)[(mIsOut ? front : rear) * mFrameSize] : NULL;
buffer->mNonContig = availToServer - part1;
// After flush(), allow releaseBuffer() on a previously obtained buffer;
// see "Acknowledge any pending flush()" in audioflinger/Tracks.cpp.
if (!ackFlush) {
mUnreleased = part1;
}
return part1 > 0 ? NO_ERROR : WOULD_BLOCK;
}
no_init:
buffer->mFrameCount = 0;
buffer->mRaw = NULL;
buffer->mNonContig = 0;
mUnreleased = 0;
return NO_INIT;
}
AudioRecord 获取read 录音音频数据
前面既然有ServerProxy,那客户端自然有ClientProxy,AudioRecord这边也是直接可以跨进程来读取由IMemory共享出来的内存。
java 代码上面使用方法
int readSize = audioRecord.read(recordData, 0, recordbuffsize);
实测可以直接看下面
status_t ClientProxy::obtainBuffer(Buffer * buffer, const struct timespec *requested,struct timespec *elapsed)
同样是在/av/media/libmedia/AudioTrackShared.cpp ClientProxy类实现。