Skia现在为来自After Effects的Bodymovin插件的JSON动画提供了高性能,安全的本机播放器。它可以在使用Skia的任何平台上使用,包括Android和iOS。
该播放器的目标是在当今广泛用于动画的Lottie播放器的基础上,为我们的客户改善性能,功能集和平台凝聚力。我们是Bodymovin格式的忠实拥护者,并在可能的情况下为Bodymovin / Lottie做出贡献。
https://skia.org/user/modules/skottie
AE的一些基本概念
skottie代码结构
解析得到RenderTree
● AnimationBuilder::parse
● CompositionBuilder::CompositionBuilder
● CompositionBuilder::build
● LayerBuilder::buildRenderTree
AnimationBuilder::parse解析得到Animation
Animation = Animation::Builder::makeFromFile(lottie_json) {
// 解析json第一层信息:版本、宽高、帧率、开始结束帧
// parse v/w/h/fr/ip/op/...
// 解析json得到AnimationInfo
struct AnimationInfo {
std::unique_ptr<sksg::Scene> fScene;
AnimatorScope fAnimators;
};
internal::AnimationBuilder builder(std::move(resolvedProvider), fFontMgr,
std::move(fPropertyObserver),
std::move(fLogger),
std::move(fMarkerObserver),
std::move(fPrecompInterceptor),
&fStats, size, duration, fps, fFlags,
audio_collector);
auto ainfo = builder.parse(json);
// 创建Animation对象
return sk_sp<Animation>(new Animation(std::move(ainfo.fScene),
std::move(ainfo.fAnimators),
std::move(version),
size,
inPoint,
outPoint,
duration,
fps,
flags,
audio_collector));
}
AnimationBuilder::AnimationInfo AnimationBuilder::parse(const skjson::ObjectValue& jroot) {
// 解析markers字段
this->dispatchMarkers(jroot["markers"]);
// 解析assets字段
this->parseAssets(jroot["assets"]);
// 解析fonts字段、chars字段?
this->parseFonts(jroot["fonts"], jroot["chars"]);
// CompositionBuilder.build
// 生成 sksg::Group::Make(std::move(layers));
// 生成 animators
AutoScope ascope(this);
auto root = CompositionBuilder(*this, fCompSize, jroot).build(*this);
auto animators = ascope.release();
fStats->fAnimatorCount = animators.size();
// 返回AnimationInfo
return { sksg::Scene::Make(std::move(root)), std::move(animators) };
}
CompositionBuilder构造函数解析得到fLayerBuilders & fCameraTransform
CompositionBuilder::CompositionBuilder(const AnimationBuilder& abuilder,
const SkSize& size,
const skjson::ObjectValue& jcomp)
: fSize(size) {
// 可选 motion blur参数
// Optional motion blur params.
if (const skjson::ObjectValue* jmb = jcomp["mb"]) {
static constexpr size_t kMaxSamplesPerFrame = 64;
fMotionBlurSamples = std::min(ParseDefault<size_t>((*jmb)["spf"], 1ul),
kMaxSamplesPerFrame);
fMotionBlurAngle = SkTPin(ParseDefault((*jmb)["sa"], 0.0f), 0.0f, 720.0f);
fMotionBlurPhase = SkTPin(ParseDefault((*jmb)["sp"], 0.0f), -360.0f, 360.0f);
}
int camera_builder_index = -1;
// 准备生成layer builders
// Prepare layer builders.
if (const skjson::ArrayValue* jlayers = jcomp["layers"]) {
fLayerBuilders.reserve(SkToInt(jlayers->size()));
for (const skjson::ObjectValue* jlayer : *jlayers) {
if (!jlayer) continue;
const auto lbuilder_index = fLayerBuilders.size();
fLayerBuilders.emplace_back(*jlayer, fSize);
const auto& lbuilder = fLayerBuilders.back();
fLayerIndexMap.set(lbuilder.index(), lbuilder_index);
// Keep track of the camera builder.
if (lbuilder.isCamera()) {
// 目前只支持单个Camera
// We only support one (first) camera for now.
if (camera_builder_index < 0) {
camera_builder_index = SkToInt(lbuilder_index);
} else {
abuilder.log(Logger::Level::kWarning, jlayer,
"Ignoring duplicate camera layer.");
}
}
}
}
// 如果有需要,生成一个camera/3d layer的fCameraTransform
// Attach a camera transform upfront, if needed (required to build
// all other 3D transform chains).
if (camera_builder_index >= 0) {
// Explicit camera.
fCameraTransform = fLayerBuilders[camera_builder_index].buildTransform(abuilder, this);
} else if (ParseDefault<int>(jcomp["ddd"], 0)) {
// Default/implicit camera when 3D layers are present.
fCameraTransform = CameraAdaper::DefaultCameraTransform(fSize);
}
}
CompositionBuilder::build解析得到layers
// 1) layerBuilder保存layer transform
// 2) build render tree
// 把transform放到render node中
// buildRenderTree {
// layer = sksg::TransformEffect::Make(std::move(layer), fLayerTransform);
// }
sk_sp<sksg::RenderNode> CompositionBuilder::build(const AnimationBuilder& abuilder) {
// 第一步:可传递地附加图层变换链。
// First pass - transitively attach layer transform chains.
for (auto& lbuilder : fLayerBuilders) {
lbuilder.buildTransform(abuilder, this);
}
// 第二步:附加实际的图层内容并最终确定图层渲染树。
// Second pass - attach actual layer contents and finalize the layer render tree.
std::vector<sk_sp<sksg::RenderNode>> layers;
layers.reserve(fLayerBuilders.size());
LayerBuilder* prev_layer = nullptr;
for (auto& lbuilder : fLayerBuilders) {
if (auto layer = lbuilder.buildRenderTree(abuilder, this, prev_layer)) {
layers.push_back(std::move(layer));
}
prev_layer = &lbuilder;
}
if (layers.empty()) {
return nullptr;
}
if (layers.size() == 1) {
return std::move(layers[0]);
}
// Layers are painted in bottom->top order.
std::reverse(layers.begin(), layers.end());
layers.shrink_to_fit();
return sksg::Group::Make(std::move(layers));
}
***LayerBuilder::buildRenderTree解析得到RenderTree
sk_sp<sksg::RenderNode> LayerBuilder::buildRenderTree(const AnimationBuilder& abuilder,
CompositionBuilder* cbuilder,
const LayerBuilder* prev_layer) {
const AnimationBuilder::AutoPropertyTracker apt(&abuilder, fJlayer);
using LayerBuilder =
sk_sp<sksg::RenderNode> (AnimationBuilder::*)(const skjson::ObjectValue&,
AnimationBuilder::LayerInfo*) const;
// AE is annoyingly inconsistent in how effects interact with layer transforms: depending on
// the layer type, effects are applied before or after the content is transformed.
//
// Empirically, pre-rendered layers (for some loose meaning of "pre-rendered") are in the
// former category (effects are subject to transformation), while the remaining types are in
// the latter.
enum : uint32_t {
kTransformEffects = 0x01, // The layer transform also applies to its effects.
kForceSeek = 0x02, // Dispatch all seek() events even when the layer is inactive.
};
static constexpr struct {
LayerBuilder fBuilder;
uint32_t fFlags;
} gLayerBuildInfo[] = {
{ &AnimationBuilder::attachPrecompLayer, kTransformEffects }, // 'ty': 0 -> precomp
{ &AnimationBuilder::attachSolidLayer , kTransformEffects }, // 'ty': 1 -> solid
{ &AnimationBuilder::attachFootageLayer, kTransformEffects }, // 'ty': 2 -> image
{ &AnimationBuilder::attachNullLayer , 0 }, // 'ty': 3 -> null
{ &AnimationBuilder::attachShapeLayer , 0 }, // 'ty': 4 -> shape
{ &AnimationBuilder::attachTextLayer , 0 }, // 'ty': 5 -> text
{ &AnimationBuilder::attachAudioLayer , kForceSeek }, // 'ty': 6 -> audio
{ nullptr , 0 }, // 'ty': 7 -> pholderVideo
{ nullptr , 0 }, // 'ty': 8 -> imageSeq
{ &AnimationBuilder::attachFootageLayer, kTransformEffects }, // 'ty': 9 -> video
{ nullptr , 0 }, // 'ty': 10 -> pholderStill
{ nullptr , 0 }, // 'ty': 11 -> guide
{ nullptr , 0 }, // 'ty': 12 -> adjustment
{ &AnimationBuilder::attachNullLayer , 0 }, // 'ty': 13 -> camera
{ nullptr , 0 }, // 'ty': 14 -> light
};
const auto type = SkToSizeT(fType);
if (type >= SK_ARRAY_COUNT(gLayerBuildInfo)) {
return nullptr;
}
const auto& build_info = gLayerBuildInfo[type];
// Switch to the layer animator scope (which at this point holds transform-only animators).
AnimationBuilder::AutoScope ascope(&abuilder, std::move(fLayerScope));
// Potentially null.
sk_sp<sksg::RenderNode> layer;
// Build the layer content fragment.
if (build_info.fBuilder) {
layer = (abuilder.*(build_info.fBuilder))(fJlayer, &fInfo);
}
// Clip layers with explicit dimensions.
float w = 0, h = 0;
if (Parse<float>(fJlayer["w"], &w) && Parse<float>(fJlayer["h"], &h)) {
layer = sksg::ClipEffect::Make(std::move(layer),
sksg::Rect::Make(SkRect::MakeWH(w, h)),
true);
}
// AE|蒙版、遮罩、轨道遮罩全面解析 https://zhuanlan.zhihu.com/p/56928976
// (蒙版作为路径是可以添加效果的,比如音频波形、描边、填充、勾画等,是否支持?)
//
// 蒙版: 作用到当前图层
// Optional layer mask.
layer = AttachMask(fJlayer["masksProperties"], &abuilder, std::move(layer));
// Does the transform apply to effects also?
// (AE quirk: it doesn't - except for solid layers)
const auto transform_effects = (build_info.fFlags & kTransformEffects);
// 如有需要,在特效之前附加transform
// Attach the transform before effects, when needed.
if (fLayerTransform && !transform_effects) {
layer = sksg::TransformEffect::Make(std::move(layer), fLayerTransform);
}
// 附加特效到layer
// Optional layer effects.
if (const skjson::ArrayValue* jeffects = fJlayer["ef"]) {
layer = EffectBuilder(&abuilder, fInfo.fSize, cbuilder)
.attachEffects(*jeffects, std::move(layer));
}
// 如有需要,在特效之后附加transform
// Attach the transform after effects, when needed.
if (fLayerTransform && transform_effects) {
layer = sksg::TransformEffect::Make(std::move(layer), std::move(fLayerTransform));
}
// Optional layer styles.
if (const skjson::ArrayValue* jstyles = fJlayer["sy"]) {
layer = EffectBuilder(&abuilder, fInfo.fSize, cbuilder)
.attachStyles(*jstyles, std::move(layer));
}
// Optional layer opacity.
// TODO: de-dupe this "ks" lookup with matrix above.
if (const skjson::ObjectValue* jtransform = fJlayer["ks"]) {
layer = abuilder.attachOpacity(*jtransform, std::move(layer));
}
// 存储layer,用于后续遮罩
// Stash the content tree in case it is needed for later mattes.
fContentTree = layer;
if (ParseDefault<bool>(fJlayer["hd"], false)) {
layer = nullptr;
}
const auto has_animators = !abuilder.fCurrentAnimatorScope->empty();
const auto force_seek_count = build_info.fFlags & kForceSeek
? abuilder.fCurrentAnimatorScope->size()
: fTransformAnimatorCount;
sk_sp<Animator> controller = sk_make_sp<LayerController>(ascope.release(),
layer,
force_seek_count,
fInfo.fInPoint,
fInfo.fOutPoint);
// Optional motion blur.
if (layer && has_animators && this->hasMotionBlur(cbuilder)) {
// Wrap both the layer node and the controller.
auto motion_blur = MotionBlurEffect::Make(std::move(controller), std::move(layer),
cbuilder->fMotionBlurSamples,
cbuilder->fMotionBlurAngle,
cbuilder->fMotionBlurPhase);
controller = sk_make_sp<MotionBlurController>(motion_blur);
layer = std::move(motion_blur);
}
abuilder.fCurrentAnimatorScope->push_back(std::move(controller));
// 遮罩: 作用到下一个图层
if (ParseDefault<bool>(fJlayer["td"], false)) {
// |layer| is a track matte. We apply it as a mask to the next layer.
return nullptr;
}
// 可选 遮罩
// Optional matte.
size_t matte_mode;
if (prev_layer && Parse(fJlayer["tt"], &matte_mode)) {
static constexpr sksg::MaskEffect::Mode gMatteModes[] = {
sksg::MaskEffect::Mode::kAlphaNormal, // tt: 1
sksg::MaskEffect::Mode::kAlphaInvert, // tt: 2
sksg::MaskEffect::Mode::kLumaNormal, // tt: 3
sksg::MaskEffect::Mode::kLumaInvert, // tt: 4
};
if (matte_mode > 0 && matte_mode <= SK_ARRAY_COUNT(gMatteModes)) {
// The current layer is masked with the previous layer *content*.
layer = sksg::MaskEffect::Make(std::move(layer),
prev_layer->fContentTree,
gMatteModes[matte_mode - 1]);
} else {
abuilder.log(Logger::Level::kError, nullptr,
"Unknown track matte mode: %zu\n", matte_mode);
}
}
// Finally, attach an optional blend mode.
// NB: blend modes are never applied to matte sources (layer content only).
return abuilder.attachBlendMode(fJlayer, std::move(layer));
}
skia支持本地视频解码
skia/experimental/ffmpeg/SkVideoDecoder
假如自己实现
- 使用FFmpeg打开url
- 将AVFrame转成SkBitmap用于最终绘制到画布上
skia实现过程中可以借鉴的地方
- 使用SkData
- 使用FFmpeg自定义读取文件方式打开url
- AVFrame转成SkImage过程中判断ColorSpace
sk_sp<SkData> SkData::MakeFromFILE(FILE* f) {
size_t size;
void* addr = sk_fmmap(f, &size);
if (nullptr == addr) {
return nullptr;
}
return SkData::MakeWithProc(addr, size, sk_mmap_releaseproc, reinterpret_cast<void*>(size));
}
sk_sp<SkData> SkData::MakeFromFileName(const char path[]) {
FILE* f = path ? sk_fopen(path, kRead_SkFILE_Flag) : nullptr;
if (nullptr == f) {
return nullptr;
}
auto data = MakeFromFILE(f);
sk_fclose(f);
return data;
}
void* sk_fdmmap(int fd, size_t* size) {
struct stat status;
if (0 != fstat(fd, &status)) {
return nullptr;
}
if (!S_ISREG(status.st_mode)) {
return nullptr;
}
if (!SkTFitsIn<size_t>(status.st_size)) {
return nullptr;
}
size_t fileSize = static_cast<size_t>(status.st_size);
void* addr = mmap(nullptr, fileSize, PROT_READ, MAP_PRIVATE, fd, 0);
if (MAP_FAILED == addr) {
return nullptr;
}
*size = fileSize;
return addr;
}
这里使用了mmap这个函数
https://www.zhihu.com/question/48161206
mmap的主要的好处在于,减少一次内存拷贝。在我们平时vfs的read/write系统调用中,文件内容的拷贝要多经历内核缓冲区这个阶段,所以比mmap多了一次内存拷贝,mmap只有用户空间的内存拷贝(这个阶段read/write也有)。正是因为减少了从Linux的页缓存到用户空间的缓冲区的这一次拷贝,所以mmap大大提高了性能,mmap也被称为zero-copy技术。
https://www.gnu.org/software/libc/manual/html_node/Memory_002dmapped-I_002fO.html
This is more efficient than read
or write
, as only the regions of the file that a program actually accesses are loaded. Accesses to not-yet-loaded parts of the mmapped region are handled in the same way as swapped out pages.
https://nieyong.github.io/wiki_cpu/mmap%E8%AF%A6%E8%A7%A3.html
FFmpeg自定义读取
static int skstream_read_packet(void* ctx, uint8_t* dstBuffer, int dstSize) {
SkStream* stream = (SkStream*)ctx;
int result = (int)stream->read(dstBuffer, dstSize);
if (result == 0) {
result = AVERROR_EOF;
}
return result;
}
static int64_t skstream_seek_packet(void* ctx, int64_t pos, int whence) {
SkStream* stream = (SkStream*)ctx;
switch (whence) {
case SEEK_SET:
break;
case SEEK_CUR:
pos = (int64_t)stream->getPosition() + pos;
break;
case SEEK_END:
pos = (int64_t)stream->getLength() + pos;
break;
default:
return -1;
}
return stream->seek(SkToSizeT(pos)) ? pos : -1;
}
{
int bufferSize = 4 * 1024;
uint8_t* buffer = (uint8_t*)av_malloc(bufferSize);
if (!buffer) {
return false;
}
fStream = std::move(stream);
fStreamCtx = avio_alloc_context(buffer, bufferSize, 0, fStream.get(),
skstream_read_packet, nullptr, skstream_seek_packet);
if (!fStreamCtx) {
av_freep(buffer);
this->reset();
return false;
}
fFormatCtx = avformat_alloc_context();
if (!fFormatCtx) {
this->reset();
return false;
}
fFormatCtx->pb = fStreamCtx;
int err = avformat_open_input(&fFormatCtx, nullptr, nullptr, nullptr);
if (err < 0) {
SkDebugf("avformat_open_input failed %d\n", err);
return false;
}
}
AVFrame转成SkImage过程中判断ColorSpace
static SkYUVColorSpace get_yuvspace(AVColorSpace space) {
// this is pretty incomplete -- TODO: look to convert more AVColorSpaces
switch (space) {
case AVCOL_SPC_RGB: return kIdentity_SkYUVColorSpace;
case AVCOL_SPC_BT709: return kRec709_SkYUVColorSpace;
case AVCOL_SPC_SMPTE170M:
case AVCOL_SPC_SMPTE240M:
case AVCOL_SPC_BT470BG: return kRec601_SkYUVColorSpace;
default: break;
}
return kRec709_SkYUVColorSpace;
}
struct av_transfer_characteristics {
// if x < beta delta * x
// else alpha * (x^gama)
float alpha, beta, gamma, delta;
};
// Tables extracted from vf_colorspace.c
const av_transfer_characteristics gTransfer[AVCOL_TRC_NB] = {
[AVCOL_TRC_BT709] = { 1.099, 0.018, 0.45, 4.5 },
[AVCOL_TRC_GAMMA22] = { 1.0, 0.0, 1.0 / 2.2, 0.0 },
[AVCOL_TRC_GAMMA28] = { 1.0, 0.0, 1.0 / 2.8, 0.0 },
[AVCOL_TRC_SMPTE170M] = { 1.099, 0.018, 0.45, 4.5 },
[AVCOL_TRC_SMPTE240M] = { 1.1115, 0.0228, 0.45, 4.0 },
[AVCOL_TRC_IEC61966_2_1] = { 1.055, 0.0031308, 1.0 / 2.4, 12.92 },
[AVCOL_TRC_IEC61966_2_4] = { 1.099, 0.018, 0.45, 4.5 },
[AVCOL_TRC_BT2020_10] = { 1.099, 0.018, 0.45, 4.5 },
[AVCOL_TRC_BT2020_12] = { 1.0993, 0.0181, 0.45, 4.5 },
};