背景:本来是做的一个跟随机器人,已经实现了人的自动识别和跟随,后客户额外提出需求还要将跟随视频实时推送到服务器,本着客户至上的原则,开干。就这个需求而言,就是要使用FFmpeg将视频流数据推送到RTMP服务。
首先需要确保安装了FFmpeg库和OpenCV库。下面是一个C++示例,说明如何实现这一目标:
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/videoio.hpp>
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/imgutils.h>
#include <libswscale/swscale.h>
}
void matToRtmp(const cv::Mat& frame, AVCodecContext* codecContext, AVFrame* avFrame, AVPacket* avPacket, AVFormatContext* formatContext) {
int ret;
av_init_packet(avPacket);
avFrame->data[0] = frame.data;
avFrame->pts++;
ret = avcodec_send_frame(codecContext, avFrame);
if (ret < 0) {
std::cerr << "Error sending frame to codec context." << std::endl;
exit(1);
}
ret = avcodec_receive_packet(codecContext, avPacket);
if (ret < 0) {
std::cerr << "Error receiving packet from codec context." << std::endl;
exit(1);
}
avPacket->stream_index = 0;
av_interleaved_write_frame(formatContext, avPacket);
}
int main() {
// 初始化FFmpeg库
av_register_all();
avformat_network_init();
// 打开输入视频或摄像头
const std::string inputVideo = "input_video.mp4";
cv::VideoCapture cap(inputVideo);
if (!cap.isOpened()) {
std::cerr << "Could not open the video file: " << inputVideo << std::endl;
return -1;
}
// 设置输出RTMP服务URL
const std::string rtmpUrl = "rtmp://your_rtmp_server/live/stream";
// 设置编解码器和输出文件格式
AVOutputFormat* outputFormat = av_guess_format("flv", nullptr, nullptr);
AVFormatContext* formatContext = nullptr;
avformat_alloc_output_context2(&formatContext, outputFormat, nullptr, rtmpUrl.c_str());
// 设置视频编解码器和编解码器上下文
AVCodec* videoCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
AVStream* videoStream = avformat_new_stream(formatContext, videoCodec);
AVCodecContext* codecContext = avcodec_alloc_context3(videoCodec);
codecContext->width = static_cast<int>(cap.get(cv::CAP_PROP_FRAME_WIDTH));
codecContext->height = static_cast<int>(cap.get(cv::CAP_PROP_FRAME_HEIGHT));
codecContext->time_base = {1, 25};
codecContext->pix_fmt = AV_PIX_FMT_YUV420P;
codecContext->bit_rate = 400000;
codecContext->gop_size = 12;
avcodec_open2(codecContext, videoCodec, nullptr);
avcodec_parameters_from_context(videoStream->codecpar, codecContext);
// 打开RTMP服务
if (!(formatContext->oformat->flags & AVFMT_NOFILE)) {
int ret = avio_open(&formatContext->pb, rtmpUrl.c_str(), AVIO_FLAG_WRITE);
if (ret < 0) {
std::cerr << "Could not open the output URL: " << rtmpUrl << std::endl;
return -1;
}
}
// 写入输出流头部信息
avformat_write_header(formatContext, nullptr);
// 设置AVFrame和AVPacket
AVFrame* avFrame = av_frame_alloc();
avFrame->format = codecContext->pix_fmt;
avFrame->width = codecContext->width;
avFrame->height = codecContext->height;
av_frame_get_buffer(avFrame, 0);
AVPacket* avPacket = av_packet_alloc();
av_new_packet(avPacket, codecContext->width * codecContext->height * 3);
cv::Mat frame;while (cap.read(frame)) {
cv::Mat yuvFrame;
cv::cvtColor(frame, yuvFrame, cv::COLOR_BGR2YUV_I420);
matToRtmp(yuvFrame, codecContext, avFrame, avPacket, formatContext);
}
// 写入输出流尾部信息
av_write_trailer(formatContext);
// 释放资源
av_packet_unref(avPacket);
av_frame_unref(avFrame);
av_frame_free(&avFrame);
av_packet_free(&avPacket);
avcodec_close(codecContext);
avcodec_free_context(&codecContext);
avio_close(formatContext->pb);
avformat_free_context(formatContext);
return 0;
这个示例中,首先使用OpenCV打开输入视频文件,然后通过FFmpeg将cv::Mat数据推送到RTMP服务。请确保替换为适当的输入视频文件路径和RTMP服务URL。
注:在实际应用中,建议使用更详细的错误处理和资源管理。