1、FFmpeg日志的使用及目录操作
1.1、日志
日志级别:ERROR > WARNING > INFO > DEBUG
使用下log:
#include <stdio.h>
#include <libavutil/log.h> // 引入头文件
int main(int argc, char* argv[])
{
av_log_set_level(AV_LOG_DEBUG); // 设置log级别
av_log(NULL, AV_LOG_INFO, "hello world %s\n", "wsm"); // 相当于printf,级别需要>=设置的才会打出来
return 0;
}
编译:
gcc -g -o ffmpeg_log ffmpeg_log.c -lavutil
# 也可以指定头文件和库文件的搜索路径
gcc -g -o ff_log ffmpeg_log.c -I/usr/local/ffmpeg/include -L/usr/local/ffmpeg/lib -lavutil
# 也可以用pkg-config
gcc -g -o ffmpeg_log ffmpeg_log.c `pkg-config --cflags --libs libavutil`
(
find / -name "libavutil.pc" 2>/null
打开 ~/.bashrc
加上 export PKG_CONFIG_PATH=/usr/local/lib/pkgconfig:$PKG_CONFIG_PATH
)
1.2、文件
删除:avpriv_io_delete()
重命名:avpriv_io_move()
在做实验时版本对不上了,参考:https://cloud.tencent.com/developer/article/1880282
// ffmpeg_delete.c
#include <libavformat/avformat.h>
int main()
{
int ret;
ret = avpriv_io_delete("test.txt");
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Fail\n");
return -1;
}
return 0;
}
编译:
gcc -g -o ffmpeg_delete ffmpeg_delete.c `pkg-config --libs libavformat libavutil`
更改文件名字
avpriv_io_move("111.txt", "222.txt"); // 将111.txt改为222.txt
1.3、目录
打开目录:avio_open_dir
读取每一项的信息:avio_read_dir
关闭:avio_close_dir
操作目录的上下文的结构体:AVIODirContext
目录项。用于存放文件名、文件大小等信息的结构体:AVIODirEntry
【实现简单的ls命令】
#include <libavutil/log.h>
#include <libavformat/avformat.h>
int main(int argc, char *argv[])
{
int ret;
AVIODirContext *ctx = NULL;
AVIODirEntry *entry = NULL;
av_log_set_level(AV_LOG_INFO);
ret = avio_open_dir(&ctx, "./", NULL);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cant open Dir:%s\n", av_err2str(ret));
goto __fail;
}
while(1)
{
ret = avio_read_dir(ctx, &entry);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cant read dir:%s\n", av_err2str(ret));
goto __fail;
}
if (!entry) // 末尾了
{
break;
}
av_log(NULL, AV_LOG_INFO, "%12"PRId64" %s\n", entry->size, entry->name); // 也可以写成 %12ld
avio_free_directory_entry(&entry);
}
__fail:
avio_close_dir(&ctx);
return 0;
}
编译
gcc -g -o list ffmpeg_list.c `pkg-config --libs libavformat libavutil`
2、FFmpeg的基本概念及常用结构体
多媒体文件其实是个容器,在容器里面有很多流(数据没有交叉)。
每种流是由不同的编码器编码的,从流中读出的数据称为包,在一个包中包含着一个或多个帧。
格式上下文结构体:AVFormatContext
流:AVStream
包:AVPacket
FFmpeg操作流数据的基本步骤
3、对复用/解复用及流操作的各种实战
3.1、抽取音频数据
将aac格式写成了acc,然后程序运行一直报Segmentation fault,百思不得其解🤣
流程写在代码里面
#include <stdio.h>
#include <libavutil/log.h>
#include <libavutil/avutil.h>
#include <libavformat/avformat.h>
int main(int argc, char *argv[]){
int ret = -1;
int idx = -1;
//1. 处理一些参数;
char* src;
char* dst;
AVFormatContext *pFmtCtx = NULL;
AVFormatContext *oFmtCtx = NULL;
const AVOutputFormat *outFmt = NULL;
AVStream *outStream = NULL;
AVStream *inStream = NULL;
AVPacket pkt;
av_log_set_level(AV_LOG_DEBUG);
if(argc < 3){ //argv[0], extra_audio
av_log(NULL, AV_LOG_INFO, "arguments must be more than 3!\n");
exit(-1);
}
src = argv[1];
dst = argv[2];
//2. 打开多媒体文件
if((ret = avformat_open_input(&pFmtCtx, src, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "%s\n", av_err2str(ret));
exit(-1);
}
//3. 从多媒体文件中找到音频流
idx = av_find_best_stream(pFmtCtx, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
if(idx < 0) {
av_log(pFmtCtx, AV_LOG_ERROR, "Does not include audio stream!\n");
goto _ERROR;
}
//4. 打开目的文件的上下文
oFmtCtx = avformat_alloc_context();
if(!oFmtCtx){
av_log(NULL, AV_LOG_ERROR, "NO Memory!\n");
goto _ERROR;
}
outFmt = av_guess_format(NULL, dst, NULL);
oFmtCtx->oformat = outFmt;
//5. 为目的文件,创建一个新的音频流
outStream = avformat_new_stream(oFmtCtx, NULL);
//6. 设置输出音频参数
inStream = pFmtCtx->streams[idx];
avcodec_parameters_copy(outStream->codecpar, inStream->codecpar);
outStream->codecpar->codec_tag = 0;
//绑定
ret = avio_open2(&oFmtCtx->pb, dst, AVIO_FLAG_WRITE, NULL, NULL);
if(ret < 0 ){
av_log(oFmtCtx, AV_LOG_ERROR, "%s", av_err2str(ret));
goto _ERROR;
}
//7. 写多媒体文件头到目的文件
ret = avformat_write_header(oFmtCtx, NULL);
if(ret < 0 ){
av_log(oFmtCtx, AV_LOG_ERROR, "%s", av_err2str(ret));
goto _ERROR;
}
//8. 从源多媒体文件中读到音频数据到目的文件中
while(av_read_frame(pFmtCtx, &pkt) >= 0) {
if(pkt.stream_index == idx) {
pkt.pts = av_rescale_q_rnd(pkt.pts, inStream->time_base, outStream->time_base, (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.dts = pkt.pts;
pkt.duration = av_rescale_q(pkt.duration, inStream->time_base, outStream->time_base);
pkt.stream_index = 0;
pkt.pos = -1;
av_interleaved_write_frame(oFmtCtx, &pkt);
av_packet_unref(&pkt);
}
}
//9. 写多媒体文件尾到文件中
av_write_trailer(oFmtCtx);
//10. 将申请的资源释放掉
_ERROR:
if(pFmtCtx){
avformat_close_input(&pFmtCtx);
pFmtCtx = NULL;
}
if(oFmtCtx->pb){
avio_close(oFmtCtx->pb);
}
if(oFmtCtx){
avformat_free_context(oFmtCtx);
oFmtCtx = NULL;
}
printf("hello, world!\n");
return 0;
}
运行:
./extra_audio 1.mp4 1.aac
3.2、抽取视频数据
和上面的代码两个不同:
1、时间戳dts改为从输入流获取(音频是直接让dts = pts)
2、idx 是从流媒体中找到视频 idx = av_find_best_stream(pFmtCtx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
整体代码:
#include <stdio.h>
#include <libavutil/log.h>
#include <libavutil/avutil.h>
#include <libavformat/avformat.h>
int main(int argc, char *argv[]){
int ret = -1;
int idx = -1;
//1. 处理一些参数;
char* src;
char* dst;
AVFormatContext *pFmtCtx = NULL;
AVFormatContext *oFmtCtx = NULL;
const AVOutputFormat *outFmt = NULL;
AVStream *outStream = NULL;
AVStream *inStream = NULL;
AVPacket pkt;
av_log_set_level(AV_LOG_DEBUG);
if(argc < 3){ //argv[0], extra_audio
av_log(NULL, AV_LOG_INFO, "arguments must be more than 3!\n");
exit(-1);
}
src = argv[1];
dst = argv[2];
//2. 打开多媒体文件
if((ret = avformat_open_input(&pFmtCtx, src, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "%s\n", av_err2str(ret));
exit(-1);
}
//3. 从多媒体文件中找到视频流
idx = av_find_best_stream(pFmtCtx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
if(idx < 0) {
av_log(pFmtCtx, AV_LOG_ERROR, "Does not include audio stream!\n");
goto _ERROR;
}
//4. 打开目的文件的上下文
oFmtCtx = avformat_alloc_context();
if(!oFmtCtx){
av_log(NULL, AV_LOG_ERROR, "NO Memory!\n");
goto _ERROR;
}
outFmt = av_guess_format(NULL, dst, NULL);
oFmtCtx->oformat = outFmt;
//5. 为目的文件,创建一个新的视频流
outStream = avformat_new_stream(oFmtCtx, NULL);
//6. 设置输出视频参数
inStream = pFmtCtx->streams[idx];
avcodec_parameters_copy(outStream->codecpar, inStream->codecpar);
outStream->codecpar->codec_tag = 0;
//绑定
ret = avio_open2(&oFmtCtx->pb, dst, AVIO_FLAG_WRITE, NULL, NULL);
if(ret < 0 ){
av_log(oFmtCtx, AV_LOG_ERROR, "%s", av_err2str(ret));
goto _ERROR;
}
//7. 写多媒体文件头到目的文件
ret = avformat_write_header(oFmtCtx, NULL);
if(ret < 0 ){
av_log(oFmtCtx, AV_LOG_ERROR, "%s", av_err2str(ret));
goto _ERROR;
}
//8. 从源多媒体文件中读到视频数据到目的文件中
while(av_read_frame(pFmtCtx, &pkt) >= 0) {
if(pkt.stream_index == idx) {
pkt.pts = av_rescale_q_rnd(pkt.pts, inStream->time_base, outStream->time_base, (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, inStream->time_base, outStream->time_base, (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.duration = av_rescale_q(pkt.duration, inStream->time_base, outStream->time_base);
pkt.stream_index = 0;
pkt.pos = -1;
av_interleaved_write_frame(oFmtCtx, &pkt);
av_packet_unref(&pkt);
}
}
//9. 写多媒体文件尾到文件中
av_write_trailer(oFmtCtx);
//10. 将申请的资源释放掉
_ERROR:
if(pFmtCtx){
avformat_close_input(&pFmtCtx);
pFmtCtx = NULL;
}
if(oFmtCtx->pb){
avio_close(oFmtCtx->pb);
}
if(oFmtCtx){
avformat_free_context(oFmtCtx);
oFmtCtx = NULL;
}
printf("hello, world!\n");
return 0;
}
运行:
./extra_video 1.mp4 1.hevc
原始是old.mov,帧率是60
Stream #0:0[0x1]: Video: h264 (High) (avc1 / 0x31637661), yuvj420p(pc, bt470bg/unknown/unknown, progressive), 1280x720 [SAR 1:1 DAR 16:9], 1724 kb/s, 60 fps, 60 tbr, 15360 tbn (default)
将h264抽取出来却是25帧,因为这个格式是不记载帧率信息的
Stream #0:0: Video: h264 (High), yuvj420p(pc, bt470bg/unknown/unknown, progressive), 1280x720 [SAR 1:1 DAR 16:9], 25 fps, 120 tbr, 1200k tbn
其他格式如mp4就仍然是60帧,通过时间戳计算出来
Stream #0:0[0x1](und): Video: h264 (High) (avc1 / 0x31637661), yuvj420p(pc, bt470bg/unknown/unknown, progressive), 1280x720 [SAR 1:1 DAR 16:9], 1724 kb/s, 60 fps, 60 tbr, 90k tbn (default)
3.3、多媒体文件转封装
将所有的音频、视频、字幕抽取。先将所有符合条件的标记,再将他们抽取出来。
代码如下:
#include <stdio.h>
#include <libavutil/log.h>
#include <libavutil/avutil.h>
#include <libavformat/avformat.h>
int main(int argc, char *argv[]){
int ret = -1;
int idx = -1;
int stream_idx = 0;
int i = 0;
//1. 处理一些参数;
char* src;
char* dst;
int *stream_map = NULL;
AVFormatContext *pFmtCtx = NULL;
AVFormatContext *oFmtCtx = NULL;
const AVOutputFormat *outFmt = NULL;
AVPacket pkt;
av_log_set_level(AV_LOG_DEBUG);
if(argc < 3){ //argv[0], extra_audio
av_log(NULL, AV_LOG_INFO, "arguments must be more than 3!\n");
exit(-1);
}
src = argv[1];
dst = argv[2];
//2. 打开多媒体文件
if((ret = avformat_open_input(&pFmtCtx, src, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "%s\n", av_err2str(ret));
exit(-1);
}
//4. 打开目的文件的上下文
avformat_alloc_output_context2(&oFmtCtx, NULL, NULL, dst);
if(!oFmtCtx){
av_log(NULL, AV_LOG_ERROR, "NO MEMORY!\n");
goto _ERROR;
}
stream_map = av_calloc(pFmtCtx->nb_streams, sizeof(int));
if(!stream_map){
av_log(NULL, AV_LOG_ERROR, "NO MEMORY!\n");
goto _ERROR;
}
for(i=0; i < pFmtCtx->nb_streams; i++){
AVStream *outStream = NULL;
AVStream *inStream = pFmtCtx->streams[i];
AVCodecParameters *inCodecPar = inStream->codecpar;
if(inCodecPar->codec_type != AVMEDIA_TYPE_AUDIO &&
inCodecPar->codec_type != AVMEDIA_TYPE_VIDEO &&
inCodecPar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
stream_map[i] = -1;
continue;
}
stream_map[i] = stream_idx++;
//5. 为目的文件,创建一个新的视频流
outStream = avformat_new_stream(oFmtCtx, NULL);
if(!outStream){
av_log(oFmtCtx, AV_LOG_ERROR, "NO MEMORY!\n");
goto _ERROR;
}
avcodec_parameters_copy(outStream->codecpar, inStream->codecpar);
outStream->codecpar->codec_tag = 0;
}
//绑定
ret = avio_open2(&oFmtCtx->pb, dst, AVIO_FLAG_WRITE, NULL, NULL);
if(ret < 0 ){
av_log(oFmtCtx, AV_LOG_ERROR, "%s", av_err2str(ret));
goto _ERROR;
}
//7. 写多媒体文件头到目的文件
ret = avformat_write_header(oFmtCtx, NULL);
if(ret < 0 ){
av_log(oFmtCtx, AV_LOG_ERROR, "%s", av_err2str(ret));
goto _ERROR;
}
//8. 从源多媒体文件中读取音频/视频/字幕数据到目的文件中
while(av_read_frame(pFmtCtx, &pkt) >= 0) {
AVStream *inStream, *outStream;
inStream = pFmtCtx->streams[pkt.stream_index];
if(stream_map[pkt.stream_index] < 0){
av_packet_unref(&pkt);
continue;
}
pkt.stream_index = stream_map[pkt.stream_index];
outStream = oFmtCtx->streams[pkt.stream_index];
av_packet_rescale_ts(&pkt, inStream->time_base, outStream->time_base);
pkt.pos = -1;
av_interleaved_write_frame(oFmtCtx, &pkt);
av_packet_unref(&pkt);
}
//9. 写多媒体文件尾到文件中
av_write_trailer(oFmtCtx);
//10. 将申请的资源释放掉
_ERROR:
if(pFmtCtx){
avformat_close_input(&pFmtCtx);
pFmtCtx = NULL;
}
if(oFmtCtx->pb){
avio_close(oFmtCtx->pb);
}
if(oFmtCtx){
avformat_free_context(oFmtCtx);
oFmtCtx = NULL;
}
if(stream_map){
av_free(stream_map);
}
printf("hello, world!\n");
return 0;
}
原始数据
Stream #0:0[0x1](und): Video: h264 (High) (avc1 / 0x31637661), yuv420p(progressive), 1280x720 [SAR 1:1 DAR 16:9], 575 kb/s, 25 fps, 25 tbr, 12800 tbn (default)
Metadata:
handler_name : VideoHandler
vendor_id : [0][0][0][0]
encoder : Lavc60.23.100 libx264
Stream #0:1[0x2](eng): Audio: aac (LC) (mp4a / 0x6134706D), 48000 Hz, stereo, fltp, 128 kb/s (default)
Metadata:
handler_name : Libquicktime Sound Media Handler
vendor_id : [0][0][0][0]
1.59 A-V: 0.012 fd= 0 aq= 22KB vq= 41KB sq= 0B f=0/0
转换后的数据
Stream #0:0[0x1](eng): Video: h264 (High) (avc1 / 0x31637661), yuv420p(progressive), 1280x720 [SAR 1:1 DAR 16:9], 575 kb/s, 25 fps, 25 tbr, 90k tbn (default)
Metadata:
handler_name : VideoHandler
vendor_id : FFMP
Stream #0:1[0x2](eng): Audio: aac (LC) (mp4a / 0x6134706D), 48000 Hz, stereo, fltp, 128 kb/s (default)
Metadata:
handler_name : SoundHandler
vendor_id : [0][0][0][0]
2.19 A-V: -0.006 fd= 0 aq= 22KB vq= 34KB sq= 0B f=0/0
3.4、视频裁剪
时间戳要处理下,一路流有很多帧,每一路流的第一个时间戳要看成0
#include <stdio.h>
#include <stdlib.h>
#include <libavutil/log.h>
#include <libavutil/timestamp.h>
#include <libavutil/avutil.h>
#include <libavformat/avformat.h>
static void log_packet(AVFormatContext *fmtCtx, const AVPacket *pkt, int64_t pts_start, int64_t dts_start){
// = &fmtCtx->streams[pkt->stream_index]->time_base;
av_log(fmtCtx,
AV_LOG_INFO,
"pts:%s dts:%s pts_diff:%lld dts_diff:%lld stream_idx:%d pts_start:%lld dts_start:%lld\n",
av_ts2str(pkt->pts),
av_ts2str(pkt->dts),
pkt->pts - pts_start,
pkt->dts - dts_start,
pkt->stream_index,
pts_start,
dts_start);
}
int main(int argc, char *argv[]){
int ret = -1;
int idx = -1;
int stream_idx = 0;
int i = 0;
//1. 处理一些参数;
char* src;
char* dst;
double starttime = 0;
double endtime = 0;
int *stream_map = NULL;
int64_t *dts_start_time = NULL;
int64_t *pts_start_time = NULL;
AVFormatContext *pFmtCtx = NULL;
AVFormatContext *oFmtCtx = NULL;
const AVOutputFormat *outFmt = NULL;
AVPacket pkt;
av_log_set_level(AV_LOG_DEBUG);
//cut src dst start end
if(argc < 5){ //argv[0], extra_audio
av_log(NULL, AV_LOG_INFO, "arguments must be more than 5!\n");
exit(-1);
}
src = argv[1];
dst = argv[2];
starttime = atof(argv[3]);
endtime = atof(argv[4]);
//2. 打开多媒体文件
if((ret = avformat_open_input(&pFmtCtx, src, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "%s\n", av_err2str(ret));
exit(-1);
}
//4. 打开目的文件的上下文
avformat_alloc_output_context2(&oFmtCtx, NULL, NULL, dst);
if(!oFmtCtx){
av_log(NULL, AV_LOG_ERROR, "NO MEMORY!\n");
goto _ERROR;
}
stream_map = av_calloc(pFmtCtx->nb_streams, sizeof(int));
if(!stream_map){
av_log(NULL, AV_LOG_ERROR, "NO MEMORY!\n");
goto _ERROR;
}
for(i=0; i < pFmtCtx->nb_streams; i++){
AVStream *outStream = NULL;
AVStream *inStream = pFmtCtx->streams[i];
AVCodecParameters *inCodecPar = inStream->codecpar;
if(inCodecPar->codec_type != AVMEDIA_TYPE_AUDIO &&
inCodecPar->codec_type != AVMEDIA_TYPE_VIDEO &&
inCodecPar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
stream_map[i] = -1;
continue;
}
stream_map[i] = stream_idx++;
//5. 为目的文件,创建一个新的视频流
outStream = avformat_new_stream(oFmtCtx, NULL);
if(!outStream){
av_log(oFmtCtx, AV_LOG_ERROR, "NO MEMORY!\n");
goto _ERROR;
}
avcodec_parameters_copy(outStream->codecpar, inStream->codecpar);
outStream->codecpar->codec_tag = 0;
}
//绑定
ret = avio_open2(&oFmtCtx->pb, dst, AVIO_FLAG_WRITE, NULL, NULL);
if(ret < 0 ){
av_log(oFmtCtx, AV_LOG_ERROR, "%s", av_err2str(ret));
goto _ERROR;
}
//7. 写多媒体文件头到目的文件
ret = avformat_write_header(oFmtCtx, NULL);
if(ret < 0 ){
av_log(oFmtCtx, AV_LOG_ERROR, "%s", av_err2str(ret));
goto _ERROR;
}
//seek
ret = av_seek_frame(pFmtCtx, -1, starttime*AV_TIME_BASE, AVSEEK_FLAG_BACKWARD);
if(ret < 0) {
av_log(oFmtCtx, AV_LOG_ERROR, "%s", av_err2str(ret));
goto _ERROR;
}
dts_start_time = av_calloc(pFmtCtx->nb_streams, sizeof(int64_t));
for(int t=0; t < pFmtCtx->nb_streams; t++){
dts_start_time[t] = -1;
}
pts_start_time = av_calloc(pFmtCtx->nb_streams, sizeof(int64_t));
for(int t=0; t < pFmtCtx->nb_streams; t++){
pts_start_time[t] = -1;
}
//8. 从源多媒体文件中读取音频/视频/字幕数据到目的文件中
while(av_read_frame(pFmtCtx, &pkt) >= 0) {
AVStream *inStream, *outStream;
if(dts_start_time[pkt.stream_index] == -1 && pkt.dts >= 0){ // 一路流的开始时间
dts_start_time[pkt.stream_index] = pkt.dts;
}
if(pts_start_time[pkt.stream_index] == -1 && pkt.pts >= 0){
pts_start_time[pkt.stream_index] = pkt.pts;
}
inStream = pFmtCtx->streams[pkt.stream_index];
if(av_q2d(inStream->time_base) * pkt.pts > endtime) {
av_log(oFmtCtx, AV_LOG_INFO, "success!\n");
break;
}
if(stream_map[pkt.stream_index] < 0){
av_packet_unref(&pkt);
continue;
}
//printf("pkt.pts=%lld, pkt.dts=%lld\n", pkt.pts, pkt.dts);
log_packet(pFmtCtx, &pkt, pts_start_time[pkt.stream_index], dts_start_time[pkt.stream_index]);
pkt.pts = pkt.pts - pts_start_time[pkt.stream_index];
pkt.dts = pkt.dts - dts_start_time[pkt.stream_index];
if(pkt.dts > pkt.pts){
pkt.pts = pkt.dts;
}
pkt.stream_index = stream_map[pkt.stream_index];
outStream = oFmtCtx->streams[pkt.stream_index];
av_packet_rescale_ts(&pkt, inStream->time_base, outStream->time_base);
pkt.pos = -1;
av_interleaved_write_frame(oFmtCtx, &pkt);
av_packet_unref(&pkt);
}
//9. 写多媒体文件尾到文件中
av_write_trailer(oFmtCtx);
//10. 将申请的资源释放掉
_ERROR:
if(pFmtCtx){
avformat_close_input(&pFmtCtx);
pFmtCtx = NULL;
}
if(oFmtCtx->pb){
avio_close(oFmtCtx->pb);
}
if(oFmtCtx){
avformat_free_context(oFmtCtx);
oFmtCtx = NULL;
}
if(stream_map){
av_free(stream_map);
}
if(dts_start_time){
av_free(dts_start_time);
}
if(pts_start_time){
av_free(pts_start_time);
}
printf("hello, world!\n");
return 0;
}
4、实现超简易版的小咖秀
运行指令如下:
./avmerge 1.mp4 2.mp4 3.mp4 5 15
将1.mp4的视频和2.mp4的音频合并,并裁剪5~15秒的内容,结果是3.mp4
整个步骤可以分为:抽取、合并、剪切。
自己写代码的时候在合并这里卡住了,其实就是转封装最后合并的内容!
#include <stdio.h>
#include <stdlib.h>
#include <libavutil/log.h>
#include <libavutil/timestamp.h>
#include <libavutil/avutil.h>
#include <libavformat/avformat.h>
// 分别抽取 合并 剪切
// ./avmerge 1.mp4 2.mp4 3.mp4 5 15
int main(int argc, char *argv[])
{
// 1、处理一些参数
char *src1; // 源1文件名
char *src2; // 源2文件名
char *dst; // 目标文件名
double starttime = 0; // 剪切起始时间
double endtime = 0; // 剪切结束时间
int idx1 = -1; // 1视频流的下标
int idx2 = -1; // 2音频流的下标
AVFormatContext *pFmtCtx1 = NULL; // 多媒体1的上下文
AVFormatContext *pFmtCtx2 = NULL; // 多媒体2的上下文
AVFormatContext *oFmtCtx = NULL;
AVOutputFormat *outFmt = NULL;
AVStream *outStream1 = NULL;
AVStream *outStream2 = NULL;
AVStream *inStream1 = NULL;
AVStream *inStream2 = NULL;
AVPacket pkt;
int64_t *dts_start_time = NULL;
int64_t *pts_start_time = NULL;
int ret = -1;
av_log_set_level(AV_LOG_INFO);
if (argc != 6)
{
av_log(NULL, AV_LOG_INFO, "example: ./avmerge 1.mp4 2.mp4 3.mp4 5 15\n");
exit(-1);
}
src1 = argv[1];
src2 = argv[2];
dst = argv[3];
starttime = atof(argv[4]);
endtime = atof(argv[5]);
// 2、将两个多媒体文件打开
ret = avformat_open_input(&pFmtCtx1, src1, NULL, NULL);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "open file 1 error %s\n", av_err2str(ret));
goto _ERROR;
}
ret = avformat_open_input(&pFmtCtx2, src2, NULL, NULL);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "open file 2 error %s\n", av_err2str(ret));
goto _ERROR;
}
// 3、打开目标文件的上下文
oFmtCtx = avformat_alloc_context();
if(!oFmtCtx){
av_log(NULL, AV_LOG_ERROR, "NO MEMORY!\n");
goto _ERROR;
}
outFmt = av_guess_format(NULL, dst, NULL);
oFmtCtx->oformat = outFmt; // 格式的基本信息
// 为源1创建一个新的视频流 源2音频流 再将这两个流剪切、再合并(依次写入)
// 4、从1里面找到视频流、从2里面找到音频流
idx1 = av_find_best_stream(pFmtCtx1, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
if(idx1 < 0) {
av_log(pFmtCtx1, AV_LOG_ERROR, "Does not include audio stream!\n");
goto _ERROR;
}
idx2 = av_find_best_stream(pFmtCtx2, AVMEDIA_TYPE_AUDIO, -1, -1, NULL, 0);
if(idx2 < 0) {
av_log(pFmtCtx2, AV_LOG_ERROR, "Does not include audio stream!\n");
goto _ERROR;
}
// 5、为目的文件 创建一个视频流和一个音频流
outStream1 = avformat_new_stream(oFmtCtx, NULL);
outStream2 = avformat_new_stream(oFmtCtx, NULL);
// 6、设置视频参数 和 音频参数
inStream1 = pFmtCtx1->streams[idx1];
avcodec_parameters_copy(outStream1->codecpar, inStream1->codecpar);
outStream1->codecpar->codec_tag = 0;
inStream2 = pFmtCtx2->streams[idx2];
avcodec_parameters_copy(outStream2->codecpar, inStream2->codecpar);
outStream2->codecpar->codec_tag = 0;
// 绑定
ret = avio_open2(&oFmtCtx->pb, dst, AVIO_FLAG_WRITE, NULL, NULL);
if (ret < 0)
{
av_log(oFmtCtx, AV_LOG_ERROR, "%s", av_err2str(ret));
goto _ERROR;
}
// 7、写多媒体头到目标文件
ret = avformat_write_header(oFmtCtx, NULL);
if (ret < 0)
{
av_log(oFmtCtx, AV_LOG_ERROR, "%s", av_err2str(ret));
goto _ERROR;
}
//seek
ret = av_seek_frame(pFmtCtx1, -1, starttime*AV_TIME_BASE, AVSEEK_FLAG_BACKWARD);
if(ret < 0) {
av_log(oFmtCtx, AV_LOG_ERROR, "%s", av_err2str(ret));
goto _ERROR;
}
ret = av_seek_frame(pFmtCtx2, -1, starttime*AV_TIME_BASE, AVSEEK_FLAG_BACKWARD);
if(ret < 0) {
av_log(oFmtCtx, AV_LOG_ERROR, "%s", av_err2str(ret));
goto _ERROR;
}
// 8、从源多媒体文件中将视频、音频 写到目的文件中 并剪切
int dts_start_time1 = -1;
int pts_start_time1 = -1;
int dts_start_time2 = -1;
int pts_start_time2 = -1;
while (av_read_frame(pFmtCtx1, &pkt) >= 0)
{
AVStream *inStream, *outStream;
if (pkt.stream_index == idx1)
{
if (dts_start_time1 == -1 && pkt.dts >= 0) dts_start_time1 = pkt.dts;
if (pts_start_time1 == -1 && pkt.pts >= 0) pts_start_time1 = pkt.pts;
pkt.stream_index = 0;
inStream = pFmtCtx1->streams[idx1];
if(av_q2d(inStream->time_base) * pkt.pts > endtime) {
av_log(oFmtCtx, AV_LOG_INFO, "success!\n");
break;
}
pkt.pts = pkt.pts - pts_start_time1;
pkt.dts = pkt.dts - dts_start_time1;
if (pkt.dts > pkt.pts)
{
pkt.pts = pkt.dts;
}
outStream = oFmtCtx->streams[pkt.stream_index];
av_packet_rescale_ts(&pkt, inStream->time_base, outStream->time_base);
pkt.pos = -1;
av_interleaved_write_frame(oFmtCtx, &pkt);
av_packet_unref(&pkt);
}
}
while (av_read_frame(pFmtCtx2, &pkt) >= 0)
{
AVStream *inStream, *outStream;
if (pkt.stream_index == idx2)
{
if (dts_start_time2 == -1 && pkt.dts >= 0) dts_start_time2 = pkt.dts;
if (pts_start_time2 == -1 && pkt.pts >= 0) pts_start_time2 = pkt.pts;
pkt.stream_index = 1;
inStream = pFmtCtx2->streams[idx2];
if(av_q2d(inStream->time_base) * pkt.pts > endtime) {
av_log(oFmtCtx, AV_LOG_INFO, "success!\n");
break;
}
pkt.pts = pkt.pts - pts_start_time2;
pkt.dts = pkt.dts - dts_start_time2;
if (pkt.dts > pkt.pts)
{
pkt.pts = pkt.dts;
}
outStream = oFmtCtx->streams[pkt.stream_index];
av_packet_rescale_ts(&pkt, inStream->time_base, outStream->time_base);
pkt.pos = -1;
av_interleaved_write_frame(oFmtCtx, &pkt);
av_packet_unref(&pkt);
}
}
//9. 写多媒体文件尾到文件中
av_write_trailer(oFmtCtx);
//11. 将申请的资源释放掉
_ERROR:
if(pFmtCtx1){
avformat_close_input(&pFmtCtx1);
pFmtCtx1= NULL;
}
if(pFmtCtx2){
avformat_close_input(&pFmtCtx2);
pFmtCtx2= NULL;
}
if(oFmtCtx->pb){
avio_close(oFmtCtx->pb);
}
if(oFmtCtx){
avformat_free_context(oFmtCtx);
oFmtCtx = NULL;
}
if(dts_start_time){
av_free(dts_start_time);
}
if(pts_start_time){
av_free(pts_start_time);
}
return 0;
}
录了个视频嘿嘿