本篇介绍
本篇接着<<Android 音频低延时mmap介绍(2)>>继续介绍aaudio 的mmap机制,前面介绍了共享模式和独占模式的差异,本篇介绍aaudio的数据驱动流程。
aaudio mmap介绍
数据驱动的开头是AudioStreamInternal中的createThread_l,创建了数据驱动的线程, 执行的任务如下:
static void *aaudio_callback_thread_proc(void *context)
{
AudioStreamInternal *stream = (AudioStreamInternal *)context;
//LOGD("oboe_callback_thread, stream = %p", stream);
if (stream != nullptr) {
return stream->callbackLoop();
} else {
return nullptr;
}
}
接下来先看采集的callbackLoop:
// Read data from the stream and pass it to the callback for processing.
void *AudioStreamInternalCapture::callbackLoop() {
aaudio_result_t result = AAUDIO_OK;
aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
if (!isDataCallbackSet()) return nullptr;
// result might be a frame count
while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
// Read audio data from stream.
int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
// This is a BLOCKING READ!
result = read(mCallbackBuffer.get(), mCallbackFrames, timeoutNanos);
if ((result != mCallbackFrames)) {
ALOGE("callbackLoop: read() returned %d", result);
if (result >= 0) {
// Only read some of the frames requested. The stream can be disconnected
// or timed out.
processCommands();
result = isDisconnected() ? AAUDIO_ERROR_DISCONNECTED : AAUDIO_ERROR_TIMEOUT;
}
maybeCallErrorCallback(result);
break;
}
// Call application using the AAudio callback interface.
callbackResult = maybeCallDataCallback(mCallbackBuffer.get(), mCallbackFrames);
if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
ALOGD("%s(): callback returned AAUDIO_CALLBACK_RESULT_STOP", __func__);
result = systemStopInternal();
break;
}
}
ALOGD("callbackLoop() exiting, result = %d, isActive() = %d",
result, (int) isActive());
return nullptr;
}
我们假设使用的读取数据的方式是被动式,也就是依赖回调,那么就会进入while循环来驱动客户测。
可以看到这儿如下逻辑:
- 先计算读取mCallbackFrames对应的超时时间
- 从buffer中读取数据
- 回调给应用
接下来先看第一个逻辑:
int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) {
// Wait for at least a second or some number of callbacks to join the thread.
int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS
* framesPerOperation
* AAUDIO_NANOS_PER_SECOND)
/ getSampleRate();
if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
timeoutNanoseconds = MIN_TIMEOUT_NANOS;
}
return timeoutNanoseconds;
}
计算读取mCallbackFrames对应的超时时间其实就是按照回调的数据帧对应的时长,然后乘以一个阈值。该操作的逻辑就是比如要读取20ms的数据,那最多等待20ms的阈值倍数。
接下来看下数据读取:
// Write the data, block if needed and timeoutMillis > 0
aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
int64_t timeoutNanoseconds)
{
return processData(buffer, numFrames, timeoutNanoseconds);
}
跟着看下该函数:
// Read or write the data, block if needed and timeoutMillis > 0
aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames,
int64_t timeoutNanoseconds)
{
if (isDisconnected()) {
return AAUDIO_ERROR_DISCONNECTED;
}
if (!mInService &&
AAudioBinderClient::getInstance().getServiceLifetimeId() != getServiceLifetimeId()) {
// The service lifetime id will be changed whenever the binder died. In that case, if
// the service lifetime id from AAudioBinderClient is different from the cached one,
// returns AAUDIO_ERROR_DISCONNECTED.
// Note that only compare the service lifetime id if it is not in service as the streams
// in service will all be gone when aaudio service dies.
mClockModel.stop(AudioClock::getNanoseconds());
// Set the stream as disconnected as the service lifetime id will only change when
// the binder dies.
setDisconnected();
return AAUDIO_ERROR_DISCONNECTED;
}
const char * traceName = "aaProc";
const char * fifoName = "aaRdy";
ATRACE_BEGIN(traceName);
if (ATRACE_ENABLED()) {
int32_t fullFrames = mAudioEndpoint->getFullFramesAvailable();
ATRACE_INT(fifoName, fullFrames);
}
aaudio_result_t result = AAUDIO_OK;
int32_t loopCount = 0;
uint8_t* audioData = (uint8_t*)buffer;
int64_t currentTimeNanos = AudioClock::getNanoseconds();
const int64_t entryTimeNanos = currentTimeNanos;
const int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
int32_t framesLeft = numFrames;
// Loop until all the data has been processed or until a timeout occurs.
while (framesLeft > 0) {
// The call to processDataNow() will not block. It will just process as much as it can.
int64_t wakeTimeNanos = 0;
aaudio_result_t framesProcessed = processDataNow(audioData, framesLeft,
currentTimeNanos, &wakeTimeNanos);
if (framesProcessed < 0) {
result = framesProcessed;
break;
}
framesLeft -= (int32_t) framesProcessed;
audioData += framesProcessed * getBytesPerFrame();
// Should we block?
if (timeoutNanoseconds == 0) {
break; // don't block
} else if (wakeTimeNanos != 0) {
if (!mAudioEndpoint->isFreeRunning()) {
// If there is software on the other end of the FIFO then it may get delayed.
// So wake up just a little after we expect it to be ready.
wakeTimeNanos += mWakeupDelayNanos;
}
currentTimeNanos = AudioClock::getNanoseconds();
int64_t earliestWakeTime = currentTimeNanos + mMinimumSleepNanos;
// Guarantee a minimum sleep time.
if (wakeTimeNanos < earliestWakeTime) {
wakeTimeNanos = earliestWakeTime;
}
if (wakeTimeNanos > deadlineNanos) {
// If we time out, just return the framesWritten so far.
ALOGW("processData(): entered at %lld nanos, currently %lld",
(long long) entryTimeNanos, (long long) currentTimeNanos);
ALOGW("processData(): TIMEOUT after %lld nanos",
(long long) timeoutNanoseconds);
ALOGW("processData(): wakeTime = %lld, deadline = %lld nanos",
(long long) wakeTimeNanos, (long long) deadlineNanos);
ALOGW("processData(): past deadline by %d micros",
(int)((wakeTimeNanos - deadlineNanos) / AAUDIO_NANOS_PER_MICROSECOND));
mClockModel.dump();
mAudioEndpoint->dump();
break;
}
if (ATRACE_ENABLED()) {
int32_t fullFrames = mAudioEndpoint->getFullFramesAvailable();
ATRACE_INT(fifoName, fullFrames);
int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
ATRACE_INT("aaSlpNs", (int32_t)sleepForNanos);
}
AudioClock::sleepUntilNanoTime(wakeTimeNanos);
currentTimeNanos = AudioClock::getNanoseconds();
}
}
if (ATRACE_ENABLED()) {
int32_t fullFrames = mAudioEndpoint->getFullFramesAvailable();
ATRACE_INT(fifoName, fullFrames);
}
// return error or framesProcessed
(void) loopCount;
ATRACE_END();
return (result < 0) ? result : numFrames - framesLeft;
}
包含了如下逻辑:
- 如果对端service,也就是audioserver有crash过,那么就返回断开错误
- 从buffer中读取数据
- systrace记录,开发可以在systrace中看到buffer的实时信息
接下来看下processDataNow:
// Read as much data as we can without blocking.
aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t numFrames,
int64_t currentNanoTime, int64_t *wakeTimePtr) {
aaudio_result_t result = processCommands();
if (result != AAUDIO_OK) {
return result;
}
const char *traceName = "aaRdNow";
ATRACE_BEGIN(traceName);
if (mClockModel.isStarting()) {
// Still haven't got any timestamps from server.
// Keep waiting until we get some valid timestamps then start writing to the
// current buffer position.
ALOGD("processDataNow() wait for valid timestamps");
// Sleep very briefly and hope we get a timestamp soon.
*wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
ATRACE_END();
return 0;
}
// If we have gotten this far then we have at least one timestamp from server.
if (mAudioEndpoint->isFreeRunning()) {
//ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
// Update data queue based on the timing model.
// Jitter in the DSP can cause late writes to the FIFO.
// This might be caused by resampling.
// We want to read the FIFO after the latest possible time
// that the DSP could have written the data.
int64_t estimatedRemoteCounter = mClockModel.convertLatestTimeToPosition(currentNanoTime);
// TODO refactor, maybe use setRemoteCounter()
mAudioEndpoint->setDataWriteCounter(estimatedRemoteCounter);
}
// This code assumes that we have already received valid timestamps.
if (mNeedCatchUp.isRequested()) {
// Catch an MMAP pointer that is already advancing.
// This will avoid initial underruns caused by a slow cold start.
advanceClientToMatchServerPosition(0 /*serverMargin*/);
mNeedCatchUp.acknowledge();
}
// If the capture buffer is full beyond capacity then consider it an overrun.
// For shared streams, the xRunCount is passed up from the service.
if (mAudioEndpoint->isFreeRunning()
&& mAudioEndpoint->getFullFramesAvailable() > mAudioEndpoint->getBufferCapacityInFrames()) {
mXRunCount++;
if (ATRACE_ENABLED()) {
ATRACE_INT("aaOverRuns", mXRunCount);
}
}
// Read some data from the buffer.
//ALOGD("AudioStreamInternalCapture::processDataNow() - readNowWithConversion(%d)", numFrames);
int32_t framesProcessed = readNowWithConversion(buffer, numFrames);
//ALOGD("AudioStreamInternalCapture::processDataNow() - tried to read %d frames, read %d",
// numFrames, framesProcessed);
if (ATRACE_ENABLED()) {
ATRACE_INT("aaRead", framesProcessed);
}
// Calculate an ideal time to wake up.
if (wakeTimePtr != nullptr && framesProcessed >= 0) {
// By default wake up a few milliseconds from now. // TODO review
int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
aaudio_stream_state_t state = getState();
//ALOGD("AudioStreamInternalCapture::processDataNow() - wakeTime based on %s",
// AAudio_convertStreamStateToText(state));
switch (state) {
case AAUDIO_STREAM_STATE_OPEN:
case AAUDIO_STREAM_STATE_STARTING:
break;
case AAUDIO_STREAM_STATE_STARTED:
{
// When do we expect the next write burst to occur?
// Calculate frame position based off of the readCounter because
// the writeCounter might have just advanced in the background,
// causing us to sleep until a later burst.
const int64_t nextPosition = mAudioEndpoint->getDataReadCounter() +
getDeviceFramesPerBurst();
wakeTime = mClockModel.convertPositionToLatestTime(nextPosition);
}
break;
default:
break;
}
*wakeTimePtr = wakeTime;
}
ATRACE_END();
return framesProcessed;
}
这儿就是和server端的交互了,包括了如下流程:
- 处理server端的命令
- 和server端的读写同步
- 读取采集的数据
接下来我们挨个看下:
// Process all the commands coming from the server.
aaudio_result_t AudioStreamInternal::processCommands() {
aaudio_result_t result = AAUDIO_OK;
while (result == AAUDIO_OK) {
AAudioServiceMessage message;
if (!mAudioEndpoint) {
break;
}
if (mAudioEndpoint->readUpCommand(&message) != 1) {
break; // no command this time, no problem
}
switch (message.what) {
case AAudioServiceMessage::code::TIMESTAMP_SERVICE:
result = onTimestampService(&message);
break;
case AAudioServiceMessage::code::TIMESTAMP_HARDWARE:
result = onTimestampHardware(&message);
break;
case AAudioServiceMessage::code::EVENT:
result = onEventFromServer(&message);
break;
default:
ALOGE("%s - unrecognized message.what = %d", __func__, (int) message.what);
result = AAUDIO_ERROR_INTERNAL;
break;
}
}
return result;
}
我们在之前介绍的时候有提到,在创建mmap流时,应用这边会收到两个共享内存的fd,一个是用来存放指令的,一个是用来存放数据的,现在就是从第一个共享内存中读取指令。那server端什么时候发送指令呢?在server端open流的时候会启动一个专门发送指令的线程,如下:
aaudio_result_t AAudioServiceStreamBase::open(const aaudio::AAudioStreamRequest &request) {
...
// Make sure this object does not get deleted before the run() method
// can protect it by making a strong pointer.
mCommandQueue.startWaiting();
mThreadEnabled = true;
incStrong(nullptr); // See run() method.
result = mCommandThread.start(this);
}
mCommandThread就是对应的指令线程,再看下start逻辑:
void AAudioThread::dispatch() {
if (mRunnable != nullptr) {
mRunnable->run();
} else {
run();
}
}
aaudio_result_t AAudioThread::start(Runnable *runnable) {
if (mHasThread) {
ALOGE("start() - mHasThread already true");
return AAUDIO_ERROR_INVALID_STATE;
}
// mRunnable will be read by the new thread when it starts. A std::thread is created.
mRunnable = runnable;
mHasThread = true;
mThread = std::thread(&AAudioThread::dispatch, this);
return AAUDIO_OK;
}
由于在AAudioServiceStreamBase中将this传递给了AAudioThread,此时的Runnable就是AAudioServiceStreamBase对象了,运行的run也就是AAudioServiceStreamBase中的逻辑了:
void AAudioServiceStreamBase::run() {
ALOGD("%s() %s entering >>>>>>>>>>>>>> COMMANDS", __func__, getTypeText());
// Hold onto the ref counted stream until the end.
android::sp<AAudioServiceStreamBase> holdStream(this);
TimestampScheduler timestampScheduler;
int64_t nextTimestampReportTime;
int64_t nextDataReportTime;
// When to try to enter standby.
int64_t standbyTime = AudioClock::getNanoseconds() + IDLE_TIMEOUT_NANOS;
// Balance the incStrong from when the thread was launched.
holdStream->decStrong(nullptr);
// Taking mLock while starting the thread. All the operation must be able to
// run with holding the lock.
std::scoped_lock<std::mutex> _l(mLock);
int32_t loopCount = 0;
while (mThreadEnabled.load()) {
loopCount++;
int64_t timeoutNanos = -1; // wait forever
if (isDisconnected_l() || isIdle_l()) {
if (isStandbyImplemented() && !isStandby_l()) {
// If not in standby mode, wait until standby time.
timeoutNanos = standbyTime - AudioClock::getNanoseconds();
timeoutNanos = std::max<int64_t>(0, timeoutNanos);
}
// Otherwise, keep `timeoutNanos` as -1 to wait forever until next command.
} else if (isRunning()) {
timeoutNanos = std::min(nextTimestampReportTime, nextDataReportTime)
- AudioClock::getNanoseconds();
timeoutNanos = std::max<int64_t>(0, timeoutNanos);
}
auto command = mCommandQueue.waitForCommand(timeoutNanos);
if (!mThreadEnabled) {
// Break the loop if the thread is disabled.
break;
}
// Is it time to send timestamps?
if (isRunning() && !isDisconnected_l()) {
auto currentTimestamp = AudioClock::getNanoseconds();
if (currentTimestamp >= nextDataReportTime) {
reportData_l();
nextDataReportTime = nextDataReportTime_l();
}
if (currentTimestamp >= nextTimestampReportTime) {
// It is time to update timestamp.
if (sendCurrentTimestamp_l() != AAUDIO_OK) {
ALOGE("Failed to send current timestamp, stop updating timestamp");
disconnect_l();
}
nextTimestampReportTime = timestampScheduler.nextAbsoluteTime();
}
}
...
}
这儿有2个命令队列,一个是mCommandQueue,这个是server 内部用的,用来避免线程安全问题,不需要跨进程,一个是mUpMessageQueue,这个是给应用侧发送指令的,我们先看下如何同步的时间戳,该方法剩余的逻辑主要是处理server内部的调用指令,之前有介绍过,都是类似的。
可以看到,这儿会先看nextDataReportTime,如果当前时间大于nextDataReportTime,就执行reportData_l,同时继续更新nextDataReportTime。先看下reportData_l:
void AAudioServiceStreamMMAP::reportData_l() {
sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
if (endpoint == nullptr) {
ALOGE("%s() has no endpoint", __func__);
return;
}
sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP =
static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
return serviceEndpointMMAP->reportData();
}
可以看到并不是通知应用侧的,而是发给hal层的:
void AAudioServiceEndpointMMAP::reportData() {
const std::lock_guard<std::mutex> lock(mMmapStreamLock);
if (mMmapStream == nullptr) {
// This must not happen
ALOGE("%s() invalid state, mmap stream is not initialized", __func__);
return;
}
auto fifo = mAudioDataWrapper->getFifoBuffer();
if (fifo == nullptr) {
ALOGE("%s() fifo buffer is not initialized, cannot report data", __func__);
return;
}
WrappingBuffer wrappingBuffer;
fifo_frames_t framesAvailable = fifo->getFullDataAvailable(&wrappingBuffer);
for (size_t i = 0; i < WrappingBuffer::SIZE; ++i) {
if (wrappingBuffer.numFrames[i] > 0) {
mMmapStream->reportData(wrappingBuffer.data[i], wrappingBuffer.numFrames[i]);
}
}
fifo->advanceReadIndex(framesAvailable);
}
这儿的mAudioDataWrapper就是用来和hal共享内存的,而WrappingBuffer用来描述这块内存的可用数据,由于共享内存是以环形buffer使用的,这样一块连续的数据可能会被切成两部分,用WrappingBuffer就可以分别表示这两部分,用这儿也可以猜到这块逻辑就是用来通知hal层的可用数据的, 到了这儿也可以猜到,这个逻辑是针对播放的
status_t MmapPlaybackThread::reportData(const void* buffer, size_t frameCount) {
// Send to MelProcessor for sound dose measurement.
auto processor = mMelProcessor.load();
if (processor) {
processor->process(buffer, frameCount * mFrameSize);
}
return NO_ERROR;
}
这儿的mMelProcessor是用来计算MEL (momentary exposure levels) 。
接下来再看下nextDataReportTime_l:
int64_t AAudioServiceStreamMMAP::nextDataReportTime_l() {
sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
if (endpoint == nullptr) {
ALOGE("%s() has no endpoint", __func__);
return std::numeric_limits<int64_t>::max();
}
sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP =
static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
return serviceEndpointMMAP->nextDataReportTime();
}
这儿又是service内部的逻辑:
int64_t AAudioServiceEndpointMMAP::nextDataReportTime() {
return getDirection() == AAUDIO_DIRECTION_OUTPUT
? AudioClock::getNanoseconds() + mDataReportOffsetNanos
: std::numeric_limits<int64_t>::max();
}
可以也可以验证我们之前的逻辑,进针对播放,在当前时间上mDataReportOffsetNanos,这个值是在打开流的时候就固定下来了:
// If the position is not updated while the timestamp is updated for more than a certain amount,
// the timestamp reported from the HAL may not be accurate. Here, a timestamp grace period is
// set as 5 burst size. We may want to update this value if there is any report from OEMs saying
// that is too short.
static constexpr int kTimestampGraceBurstCount = 5;
mTimestampGracePeriodMs = ((int64_t) kTimestampGraceBurstCount * mFramesPerBurst
* AAUDIO_MILLIS_PER_SECOND) / getSampleRate();
mDataReportOffsetNanos = ((int64_t)mTimestampGracePeriodMs) * AAUDIO_NANOS_PER_MILLISECOND;
也就是按照5个burst对应的时间来的,如果这段时间内都没更新,说明hal层可能有异常了。
接下来回到AAudioServiceStreamBase继续看sendCurrentTimestamp_l:
aaudio_result_t AAudioServiceStreamBase::sendCurrentTimestamp_l() {
AAudioServiceMessage command;
// It is not worth filling up the queue with timestamps.
// That can cause the stream to get suspended.
// So just drop the timestamp if the queue is getting full.
if (isUpMessageQueueBusy()) {
return AAUDIO_OK;
}
// Send a timestamp for the clock model.
aaudio_result_t result = getFreeRunningPosition_l(&command.timestamp.position,
&command.timestamp.timestamp);
if (result == AAUDIO_OK) {
ALOGV("%s() SERVICE %8lld at %lld", __func__,
(long long) command.timestamp.position,
(long long) command.timestamp.timestamp);
command.what = AAudioServiceMessage::code::TIMESTAMP_SERVICE;
result = writeUpMessageQueue(&command);
if (result == AAUDIO_OK) {
// Send a hardware timestamp for presentation time.
result = getHardwareTimestamp_l(&command.timestamp.position,
&command.timestamp.timestamp);
if (result == AAUDIO_OK) {
ALOGV("%s() HARDWARE %8lld at %lld", __func__,
(long long) command.timestamp.position,
(long long) command.timestamp.timestamp);
command.what = AAudioServiceMessage::code::TIMESTAMP_HARDWARE;
result = writeUpMessageQueue(&command);
}
}
}
if (result == AAUDIO_ERROR_UNAVAILABLE) { // TODO review best error code
result = AAUDIO_OK; // just not available yet, try again later
}
return result;
}
这儿就可以看到发送TIMESTAMP_SERVICE和TIMESTAMP_HARDWARE指令。那TIMESTAMP_SERVICE中的信息是什么呢?
// Get free-running DSP or DMA hardware position from the HAL.
aaudio_result_t AAudioServiceStreamMMAP::getFreeRunningPosition_l(int64_t *positionFrames,
int64_t *timeNanos) {
sp<AAudioServiceEndpoint> endpoint = mServiceEndpointWeak.promote();
if (endpoint == nullptr) {
ALOGE("%s() has no endpoint", __func__);
return AAUDIO_ERROR_INVALID_STATE;
}
sp<AAudioServiceEndpointMMAP> serviceEndpointMMAP =
static_cast<AAudioServiceEndpointMMAP *>(endpoint.get());
aaudio_result_t result = serviceEndpointMMAP->getFreeRunningPosition(positionFrames, timeNanos);
if (result == AAUDIO_OK) {
Timestamp timestamp(*positionFrames, *timeNanos);
mAtomicStreamTimestamp.write(timestamp);
*positionFrames = timestamp.getPosition();
*timeNanos = timestamp.getNanoseconds();
} else if (result != AAUDIO_ERROR_UNAVAILABLE) {
disconnect_l();
}
return result;
}
这儿是尝试从hal中获取dma的硬件位置
// Get free-running DSP or DMA hardware position from the HAL.
aaudio_result_t AAudioServiceEndpointMMAP::getFreeRunningPosition(int64_t *positionFrames,
int64_t *timeNanos) {
const std::lock_guard<std::mutex> lock(mMmapStreamLock);
if (mMmapStream == nullptr) {
ALOGW("%s(): called after mMmapStream set to NULL", __func__);
return AAUDIO_ERROR_NULL;
}
struct audio_mmap_position position;
const status_t status = mMmapStream->getMmapPosition(&position);
ALOGV("%s() status= %d, pos = %d, nanos = %lld\n",
__func__, status, position.position_frames, (long long) position.time_nanoseconds);
const aaudio_result_t result = AAudioConvert_androidToAAudioResult(status);
if (result == AAUDIO_ERROR_UNAVAILABLE) {
ALOGW("%s(): getMmapPosition() has no position data available", __func__);
} else if (result != AAUDIO_OK) {
ALOGE("%s(): getMmapPosition() returned status %d", __func__, status);
} else {
// Convert 32-bit position to 64-bit position.
mFramesTransferred.update32(position.position_frames);
*positionFrames = mFramesTransferred.get();
*timeNanos = position.time_nanoseconds;
}
return result;
}
这儿就会从hal层查询到当前的读写位置与时间戳信息。
对应的hal层实现是:
static int out_get_mmap_position(const struct audio_stream_out *stream,
struct audio_mmap_position *position)
{
int ret = 0;
struct stream_out *out = (struct stream_out *)stream;
ALOGVV("%s", __func__);
if (position == NULL) {
return -EINVAL;
}
lock_output_stream(out);
if (out->usecase != USECASE_AUDIO_PLAYBACK_MMAP ||
out->pcm == NULL) {
ret = -ENOSYS;
goto exit;
}
struct timespec ts = { 0, 0 };
ret = pcm_mmap_get_hw_ptr(out->pcm, (unsigned int *)&position->position_frames, &ts);
if (ret < 0) {
ALOGE("%s: %s", __func__, pcm_get_error(out->pcm));
goto exit;
}
position->time_nanoseconds = audio_utils_ns_from_timespec(&ts)
+ out->mmap_time_offset_nanos;
exit:
pthread_mutex_unlock(&out->lock);
return ret;
}
接下来就会到tinyalsa中:
/* Returns current read/write position in the mmap buffer with associated time stamp. */
int pcm_mmap_get_hw_ptr(struct pcm* pcm, unsigned int *hw_ptr, struct timespec *tstamp)
{
int rc;
if (pcm == NULL || hw_ptr == NULL || tstamp == NULL)
return oops(pcm, EINVAL, "pcm %p, hw_ptr %p, tstamp %p", pcm, hw_ptr, tstamp);
if (!pcm_is_ready(pcm))
return oops(pcm, errno, "pcm_is_ready failed");
rc = pcm_sync_ptr(pcm, SNDRV_PCM_SYNC_PTR_HWSYNC);
if (rc < 0)
return oops(pcm, errno, "pcm_sync_ptr failed");
if (pcm->mmap_status == NULL)
return oops(pcm, EINVAL, "pcm %p, mmap_status is NULL", pcm);
if ((pcm->mmap_status->state != PCM_STATE_RUNNING) &&
(pcm->mmap_status->state != PCM_STATE_DRAINING))
return oops(pcm, ENOSYS, "invalid stream state %d", pcm->mmap_status->state);
*tstamp = pcm->mmap_status->tstamp;
if (tstamp->tv_sec == 0 && tstamp->tv_nsec == 0)
return oops(pcm, errno, "invalid time stamp");
*hw_ptr = pcm->mmap_status->hw_ptr;
return 0;
}
这儿的hw_ptr就是mmap数据读写的位置,对于