MediaSender
status_t MediaSender::initAsync(
.....1)new TS打包对象,将音视频track加入进去,实际TS打包是在playbacksession里创建
mTSPacketizer = new TSPacketizer(flags);
status_t err = OK;
for (size_t i = 0; i < mTrackInfos.size(); ++i) {
TrackInfo *info = &mTrackInfos.editItemAt(i);
ssize_t packetizerTrackIndex =
mTSPacketizer->addTrack(info->mFormat);
2)new RTPsender,发送
sp<AMessage> notify = new AMessage(kWhatSenderNotify, this);
notify->setInt32("generation", mGeneration);
mTSSender = new RTPSender(mNetSession, notify);
looper()->registerHandler(mTSSender);
RTPSender 比较简单 以188字节分包TS流,交由anetworksession发送,然后还有RR (丢包率) APP(平均延迟,最大延迟)等RTCP反馈
在mediasender中
MediaSender::queueAccessUnit(
先通过1) ts打包accessUnit
status_t err = packetizeAccessUnit(
minTrackIndex, accessUnit, &tsPackets); //HDCP加密是在打包里面做的
2)然后交由rtpsender发送
err = mTSSender->queueBuffer(
tsPackets,
33 /* packetType */,
RTPSender::PACKETIZATION_TRANSPORT_STREAM);
MediaSender是被 Converter::kWhatAccessUnit事件触发的
void WifiDisplaySource::PlaybackSession::onMessageReceived(
const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatConverterNotify:
if (what == Converter::kWhatAccessUnit) {
sp<ABuffer> accessUnit;
CHECK(msg->findBuffer("accessUnit", &accessUnit));
const sp<Track> &track = mTracks.valueFor(trackIndex);
status_t err = mMediaSender->queueAccessUnit(
track->mediaSenderTrackIndex(),
accessUnit);
只需要建一个mediasender,rtpsender,TSPacketizer即可
============================================================================================================TSPacketizer
Codec-specific数据也可以被包含在传递给configure方法的格式信息(MediaFormat)中,在ByteBuffer条目中以"csd-0", "csd-1"等key标记。这些keys一直包含在通过MediaExtractor获得的Audio Track or Video Track的MediaFormat中。一旦调用start()方法,MediaFormat中的Codec-specific数据会自动提交给编解码器;你不能显示的提交这些数据。如果MediaFormat中不包含编解码器指定的数据,你可以根据格式要求,按照正确的顺序使用指定数目的缓存来提交codec-specific数据。在H264 AVC编码格式下,你也可以连接所有的codec-specific数据并作为一个单独的codec-config buffer提交。
//将CSD信息封装在数据前面
sp<ABuffer> TSPacketizer::Track::prependCSD(
const sp<ABuffer> &accessUnit) const {
size_t size = 0;
for (size_t i = 0; i < mCSD.size(); ++i) {
size += mCSD.itemAt(i)->size();
}
sp<ABuffer> dup = new ABuffer(accessUnit->size() + size);
size_t offset = 0;
for (size_t i = 0; i < mCSD.size(); ++i) {
const sp<ABuffer> &csd = mCSD.itemAt(i);
memcpy(dup->data() + offset, csd->data(), csd->size());
offset += csd->size();
}
memcpy(dup->data() + offset, accessUnit->data(), accessUnit->size());
return dup;
}
IDR帧需要CSD,
if (track->isH264() && (flags & PREPEND_SPS_PPS_TO_IDR_FRAMES)
&& IsIDR(accessUnit)) {
// prepend codec specific data, i.e. SPS and PPS.
accessUnit = track->prependCSD(accessUnit);
} else if (track->isAAC() && track->lacksADTSHeader()) {
CHECK(!(flags & IS_ENCRYPTED));
accessUnit = track->prependADTSHeader(accessUnit);
}
TS打包里面的PTS
是从accessUnit 里面拿到的,不需要DTS,因为没有B帧
int64_t timeUs;
CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
rtpTime 是RTPSender::queueTSPackets()获取发送时当前时间 一个TS包应该是单纯的视频或音频包
int64_t nowUs = ALooper::GetNowUs();
uint32_t rtpTime = (nowUs * 9) / 100ll;
IPB帧的不同:
I frame:自身可以通过视频解压算法解压成一张单独的完整的图片。
P frame:需要参考其前面的一个I frame 或者B frame来生成一张完整的图片。
B frame:则要参考其前一个I或者P帧及其后面的一个P帧来生成一张完整的图片。
两个I frame之间形成一个GOP,在x264中同时可以通过参数来设定bf的大小,即:I 和p或者两个P之间B的数量。
通过上述基本可以说明如果有B frame 存在的情况下一个GOP的最后一个frame一定是P.
DTS和PTS的不同:
DTS主要用于视频的解码,在解码阶段使用.PTS主要用于视频的同步和输出.在display的时候使用.在没有B frame的情况下.DTS和PTS的输出顺序是一样的.
=====================================================================================
status_t WifiDisplaySource::PlaybackSession::addSource(
//创建了2个线程
sp<ALooper> pullLooper = new ALooper;
pullLooper->setName("pull_looper");
pullLooper->start(
false /* runOnCallingThread */,
false /* canCallJava */,
PRIORITY_AUDIO);
sp<ALooper> codecLooper = new ALooper;
codecLooper->setName("codec_looper");
codecLooper->start(
false /* runOnCallingThread */,
false /* canCallJava */,
PRIORITY_AUDIO);
//使用codeclooper线程
sp<Converter> converter = new Converter(notify, codecLooper, format);
//使用pull
sp<MediaPuller> puller = new MediaPuller(source, notify);
pullLooper->registerHandler(puller);
sp<Track> track = new Track(
notify, pullLooper, codecLooper, puller, converter);
looper()->registerHandler(track);
mTracks.add(trackIndex, track);
//最后在addSource处关联起mediasender
ssize_t mediaSenderTrackIndex =
mMediaSender->addTrack(converter->getOutputFormat(), flags);
CHECK_GE(mediaSenderTrackIndex, 0);
track->setMediaSenderTrackIndex(mediaSenderTrackIndex);
}
视频新建2个Looper 音频也需要新建2个Looper
=========================================================
struct WifiDisplaySource::PlaybackSession::Track : public AHandler { 是playbacksession子对象
包含 List<sp<ABuffer> > mQueuedAccessUnits;
sp<RepeaterSource> mRepeaterSource;
List<sp<ABuffer> > mQueuedOutputBuffers;
queueAccessUnit() dequeueAccessUnit()操作mQueuedAccessUnits
void WifiDisplaySource::PlaybackSession::Track::queueAccessUnit(
const sp<ABuffer> &accessUnit) {
mQueuedAccessUnits.push_back(accessUnit);
}
sp<ABuffer> WifiDisplaySource::PlaybackSession::Track::dequeueAccessUnit() {
if (mQueuedAccessUnits.empty()) {
return NULL;
}
sp<ABuffer> accessUnit = *mQueuedAccessUnits.begin();
CHECK(accessUnit != NULL);
mQueuedAccessUnits.erase(mQueuedAccessUnits.begin());
return accessUnit;
}
操作mQueuedOutputBuffers List
void WifiDisplaySource::PlaybackSession::Track::queueOutputBuffer(
const sp<ABuffer> &accessUnit) {
mQueuedOutputBuffers.push_back(accessUnit);
mLastOutputBufferQueuedTimeUs = ALooper::GetNowUs();
}
sp<ABuffer> WifiDisplaySource::PlaybackSession::Track::dequeueOutputBuffer() {
CHECK(!mQueuedOutputBuffers.empty());
sp<ABuffer> outputBuffer = *mQueuedOutputBuffers.begin();
mQueuedOutputBuffers.erase(mQueuedOutputBuffers.begin());
return outputBuffer;
}
===================
截屏数据读取
1)Repeatersource
不停触发kWhatRead事件,从 mSource = SurfaceMediaSource 中读取到buffer,然后broadcast
void RepeaterSource::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatRead:
{
status_t err = mSource->read(&buffer);
mBuffer = buffer;
mCondition.broadcast();
if (err == OK) {
postRead();
2) RepeaterSource::read()里面会等待从surfacemediasource读到数据后,才结束wait,
将mBuffer 指针赋给入参MediaBuffer **buffer,这样mediapuller就获取到数据了,然后下一步才可以将数据传给codec
status_t RepeaterSource::read(
MediaBuffer **buffer, const ReadOptions *options) {
if (mStartTimeUs < 0ll) {
Mutex::Autolock autoLock(mLock);
while ((mLastBufferUpdateUs < 0ll || mBuffer == NULL)
&& mResult == OK) {
mCondition.wait(mLock);
}
mBuffer->add_ref();
*buffer = mBuffer;
3) MediaPuller 拿到surfaceMediaSource的Abuffer数据
sp<MediaPuller> puller = new MediaPuller(source, notify);
source = new RepeaterSource(source, framesPerSecond);
void MediaPuller::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatPull:
status_t err = mSource->read(&mbuf); //这实际是repeatersource的阻塞等待
sp<ABuffer> accessUnit = new ABuffer(mbuf->range_length());
memcpy(accessUnit->data(),
(const uint8_t *)mbuf->data() + mbuf->range_offset(),
mbuf->range_length()); //将mbuf数据重新拷贝到新accessUnit
notify->setInt32("what", kWhatAccessUnit);
notify->setBuffer("accessUnit", accessUnit);
notify->post();
4) kWhatAccessUnit 将发往Converter,放入mInputBufferQueue.push_back(accessUnit);
然后status_t Converter::feedEncoderInputBuffers() {
将mInputBufferQueue 中buffer内容拷贝到codec 的mEncoderInputBuffers
mEncoder->queueInputBuffer(
bufferIndex, 0, (buffer == NULL) ? 0 : buffer->size(),
timeUs, flags); 让codec去获取进行编码
这样一个截屏到编码的流程
编码后到发送是另外的线程循环
==========================================
WifiDisplay 创建,可以看出是新起了一个Looper线程
RemoteDisplay::RemoteDisplay(
const String16 &opPackageName,
const sp<IRemoteDisplayClient> &client,
const char *iface)
: mLooper(new ALooper),
mNetSession(new ANetworkSession) {
mLooper->setName("wfd_looper");
mSource = new WifiDisplaySource(opPackageName, mNetSession, client);
mLooper->registerHandler(mSource);
mNetSession->start();
mLooper->start();
mSource->start(iface);
}