0%

ffmpeg的c语言实现对视频的切片


概述

之前使用ffmpeg的c语言实现过对视频的截图,这里研究一下切片(也就是录制)的实现方法。


实现思路

其实实现很简单,思路几乎和截图的实现一样:

  1. 读取原始视频文件(或直播流)。
  2. 解封装。
  3. 可选:解码后转码。
  4. 写入输出文件(当到达切片时长时,写入新的输出文件即可)。

关键就在第四步,在达到想要的切片时长的时候,重新写入到新的输出文件就可以了, 其它的步骤就是基本的编解码流程。


代码

segment.c

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
// 参考 https://www.cnblogs.com/leisure_chn/p/10584901.html
// https://blog.csdn.net/lightfish_zhang/article/details/86594694

#define _CRT_NONSTDC_NO_DEPRECATE
#define _CRT_SECURE_NO_WARNINGS

#include <segment.h>
#include <map.h>
#include <pthread.h>

pthread_mutex_t taskMapMute;
int mapInit = 0;
map_int_t taskStateMap;

pthread_mutex_t ffLogInitMute;
int ffLogInit = 0;

void initFFLogCallBack() {
pthread_mutex_lock(&ffLogInitMute);
if (!ffLogInit) {
ffLogInit = 1;
av_log_set_callback(LogForFF);
}
pthread_mutex_unlock(&ffLogInitMute);
}

void initTaskStateMap() {
pthread_mutex_lock(&taskMapMute);
if (!mapInit) {
mapInit = 1;
map_init(&taskStateMap);
}
pthread_mutex_unlock(&taskMapMute);
}

// 全局初始化
void initGlobal() {
//initFFLogCallBack(); // 这个会将ffmpeg的日志传送到go中,可以不开启
initTaskStateMap(); // 初始化任务的状态队列
}

// 如果map本来没有,则会新添加
void setTaskState(char* taskId, int state) {
pthread_mutex_lock(&taskMapMute);
map_set(&taskStateMap, taskId, state);
pthread_mutex_unlock(&taskMapMute);
}

void delTaskState(char* taskId) {
pthread_mutex_lock(&taskMapMute);
map_remove(&taskStateMap, taskId);
pthread_mutex_unlock(&taskMapMute);
}

int* getTaskState(char* taskId) {
pthread_mutex_lock(&taskMapMute);
int* val = map_get(&taskStateMap, taskId);
pthread_mutex_unlock(&taskMapMute);
return val;
}

int isTaskStateRunning(char* taskId) {
int* state = getTaskState(taskId);
if (state == NULL) {
return 0;
}
return *state == RUNNING;
}

int interruptCallBack(void* taskId) {
if (isTaskStateRunning((char*)taskId)) {
return 0;
}
return 1;
}

void StopTaskForGo(char* taskId) {
pthread_mutex_lock(&taskMapMute);
int* val = map_get(&taskStateMap, taskId);
if (val == NULL) { // 不存在这个 task
LogError("task %s not exists", taskId);
} else {
map_set(&taskStateMap, taskId, STOP);
}
pthread_mutex_unlock(&taskMapMute);
}

//static void log_packet(const AVFormatContext* fmt_ctx, const AVPacket* pkt, const char* tag)
//{
// AVRational* time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
//
// printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
// tag,
// av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
// av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
// av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
// pkt->stream_index);
//}


// 初始化解码相关变量
int openInput(Segment* ss) {
int ret;

// 先申请输入的context,添加 interruptCallBack
// 作用是当输入流停止时,仍能通过这个 callback 来控制是否跳出阻塞
ss->ifmt_ctx = avformat_alloc_context();

ss->ifmt_ctx->interrupt_callback.callback = interruptCallBack;
ss->ifmt_ctx->interrupt_callback.opaque = ss->taskId;

if ((ret = avformat_open_input(&(ss->ifmt_ctx), ss->inputUrl, 0, 0)) < 0) {
LogError("task %s could not open input '%s'", ss->taskId, ss->inputUrl);
return ret;
}

if ((ret = avformat_find_stream_info(ss->ifmt_ctx, 0)) < 0) {
LogError("task %s failed to retrieve input stream information", ss->taskId);
return ret;
}

av_dump_format(ss->ifmt_ctx, 0, ss->inputUrl, 0);

ss->stream_mapping_size = ss->ifmt_ctx->nb_streams;
ss->stream_mapping = av_mallocz_array(ss->stream_mapping_size, sizeof(*(ss->stream_mapping)));
if (!ss->stream_mapping) {
ret = AVERROR(ENOMEM);
return ret;
}

int i, stream_index = 0;
for (i = 0; i < ss->ifmt_ctx->nb_streams; i++) {
AVStream* in_stream = ss->ifmt_ctx->streams[i];
AVCodecParameters* in_codecpar = in_stream->codecpar;

if (in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO &&
in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
in_codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
ss->stream_mapping[i] = -1;
continue;
}

if (in_codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
ss->video_index = i;
}

ss->stream_mapping[i] = stream_index++;
}

return 0;
}

int openOutput(Segment* ss) {
int ret = 0;

ss->tsCount++;

freeOutput(ss);

constuctTsFileName(ss);

avformat_alloc_output_context2(&(ss->ofmt_ctx), NULL, NULL, ss->nameBuffer);
if (!(ss->ofmt_ctx)) {
LogError("task %s could not create output context", ss->taskId);
ret = AVERROR_UNKNOWN;
return ret;
}

ss->ofmt = ss->ofmt_ctx->oformat;

int i;
for (i = 0; i < ss->ifmt_ctx->nb_streams; i++) {

if (ss->stream_mapping[i] == -1) {
continue;
}

AVStream* out_stream;
AVStream* in_stream = ss->ifmt_ctx->streams[i];
AVCodecParameters* in_codecpar = in_stream->codecpar;

out_stream = avformat_new_stream(ss->ofmt_ctx, NULL);
if (!out_stream) {
LogError("task %s failed allocating output stream", ss->taskId);
ret = AVERROR_UNKNOWN;
return ret;
}

ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
if (ret < 0) {
LogError("task %s failed to copy codec parameters", ss->taskId);
return ret;
}
out_stream->codecpar->codec_tag = 0;
}

//av_dump_format(ss->ofmt_ctx, 0, ss->nameBuffer, 1);

if (!(ss->ofmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&(ss->ofmt_ctx->pb), ss->nameBuffer, AVIO_FLAG_WRITE);
if (ret < 0) {
LogError("task %s could not open output file '%s'", ss->taskId, ss->nameBuffer);
return ret;
}
}

ret = avformat_write_header(ss->ofmt_ctx, NULL);
if (ret < 0) {
LogError("task %s error occurred when write header to output file '%s'", ss->taskId, ss->nameBuffer);
return ret;
}


return 0;
}


void freeInput(Segment* ss) {
av_packet_free(&(ss->pkt));
avformat_free_context(ss->ifmt_ctx);
}

void freeOutput(Segment* ss) {
if (ss->ofmt_ctx != NULL) {
av_write_trailer(ss->ofmt_ctx);
}
if (ss->ofmt_ctx && !(ss->ofmt_ctx->flags & AVFMT_NOFILE)) {
avio_closep(&(ss->ofmt_ctx->pb));
}
if (ss->ofmt_ctx != NULL) {
avformat_free_context(ss->ofmt_ctx);

// 在这里将ts回调给go程序
GoTsCallBackForC(ss->taskId, ss->nameBuffer, ss->tsBeginTime, ss->tsLastTime);
ss->ofmt_ctx = NULL;
}
}


// 构建输出文件名,例如 0.ts 1.ts 2.ts 前面假设文件夹路径
void constuctTsFileName(Segment* ss) {
char intBuf[32];
sprintf(intBuf, "%d", ss->tsWrapCount);

memset(ss->nameBuffer, 0, sizeof(ss->nameBuffer));
strcat(ss->nameBuffer, ss->outputFolder);
strcat(ss->nameBuffer, "/");
strcat(ss->nameBuffer, intBuf);
strcat(ss->nameBuffer, ".ts");

ss->tsWrapCount = (ss->tsWrapCount + 1) % ss->tsWrapLimit;
}

Segment* initSegmentStruct(char* taskId, char* inputUrl, char* outputFolder, int tsTimeInterval, int tsWrapLimit, int snapTimeInterval, int snapWrapLimit) {

Segment* ss = (Segment*)malloc(sizeof(Segment));

strcpy(ss->taskId, taskId);
strcpy(ss->inputUrl, inputUrl);
strcpy(ss->outputFolder, outputFolder);

ss->tsTimeInterval = tsTimeInterval;
ss->tsWrapLimit = tsWrapLimit;

ss->tsBeginTime = -1;
ss->tsLastTime = -1;

ss->tsCount = 0;
ss->tsWrapCount = 0;

ss->snapTimeInterval = snapTimeInterval;
ss->snapWrapLimit = snapWrapLimit;

ss->snapCount = 0;
ss->snapWrapCount = 0;

ss->ifmt_ctx = NULL;
ss->ofmt_ctx = NULL;
ss->ofmt = NULL;
ss->pkt = NULL;

return ss;
}

int SegmentStructRun(char* taskId, char* inputUrl, char* outputFolder, int tsTimeInterval, int tsWrapLimit, int snapTimeInterval, int snapWrapLimit) {

// 初始化各种参数
initGlobal();

int ret = 0;

Segment* ss = initSegmentStruct(taskId, inputUrl, outputFolder, tsTimeInterval, tsWrapLimit, snapTimeInterval, snapWrapLimit);

// 设置任务状态
setTaskState(ss->taskId, RUNNING);

LogInfo("------------- task %s begin -------------", ss->taskId);

// openInput
ret = openInput(ss);
if (ret < 0) {
LogError("task %s openInput error", ss->taskId);
goto end;
}

LogInfo("------------- task %s open input '%s' success -------------", ss->taskId, ss->inputUrl);


/**********************************************************************/
/******************************** 分配pkt *****************************/
ss->pkt = av_packet_alloc();
if (!ss->pkt) {
LogError("task %s could not allocate pkt", ss->taskId);
ret = AVERROR_UNKNOWN;
goto end;
}

/**********************************************************************/
/****************************** 循环 切 ts ***************************/
int isFirst = 1;
while (1) {

ret = av_read_frame(ss->ifmt_ctx, ss->pkt);
if (ret < 0) {
LogError("task %s av_read_frame failed, maybe cause by interrupt", ss->taskId);
break;
}

// 判断是不是已经被上层通知结束了
if (!isTaskStateRunning(ss->taskId)) {
LogWarn("------------- task %s state is stopped, break the loop -------------", ss->taskId);
break;
}

// 首先一定要创建文件
if (isFirst) {
isFirst = 0;

double timeStamp = ss->pkt->pts * av_q2d(ss->ifmt_ctx->streams[ss->video_index]->time_base);
ss->tsBeginTime = timeStamp;
ss->tsLastTime = timeStamp;
ret = openOutput(ss);
if (ret < 0) {
LogError("task %s openOutput error", ss->taskId);
goto end;
}
}

// 遇到关键帧判断一下是否要新开切片
if ((ss->pkt->stream_index == ss->video_index) && (ss->pkt->flags & AV_PKT_FLAG_KEY)) {
double timeStamp = ss->pkt->pts * av_q2d(ss->ifmt_ctx->streams[ss->video_index]->time_base);

ss->tsLastTime = timeStamp;
if ((ss->tsLastTime - ss->tsBeginTime) >= ss->tsTimeInterval) {
ret = openOutput(ss);
ss->tsBeginTime = timeStamp;
if (ret < 0) {
LogError("task %s openOutput error", ss->taskId);
goto end;
}
}
}

AVStream* in_stream, * out_stream;

in_stream = ss->ifmt_ctx->streams[ss->pkt->stream_index];
if (ss->pkt->stream_index >= ss->stream_mapping_size ||
ss->stream_mapping[ss->pkt->stream_index] < 0) {
av_packet_unref(ss->pkt);
continue;
}

ss->pkt->stream_index = ss->stream_mapping[ss->pkt->stream_index];
out_stream = ss->ofmt_ctx->streams[ss->pkt->stream_index];

// 调试使用
//log_packet(ss->ifmt_ctx, ss->pkt, "in");

/* copy packet */
ss->pkt->pts = av_rescale_q_rnd(ss->pkt->pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
ss->pkt->dts = av_rescale_q_rnd(ss->pkt->dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
ss->pkt->duration = av_rescale_q(ss->pkt->duration, in_stream->time_base, out_stream->time_base);
ss->pkt->pos = -1;

// 调试使用
//log_packet(ss->ofmt_ctx, ss->pkt, "out");

ret = av_interleaved_write_frame(ss->ofmt_ctx, ss->pkt);
if (ret < 0) {
LogError("task %s error muxing packet", ss->taskId);
break;
}
av_packet_unref(ss->pkt);
}

end:

if (ret < 0) {
LogError("task %s ret %d error occurred: %s", ss->taskId, ret, av_err2str(ret));
}
else {
LogInfo("task %s exit success", ss->taskId);
}

// 释放资源
freeOutput(ss);

freeInput(ss);

delTaskState(ss->taskId);

free(ss);

return ret;
}

segment.h

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
#define _CRT_NONSTDC_NO_DEPRECATE
#define _CRT_SECURE_NO_WARNINGS

#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
//#include <libavutil/timestamp.h>
#include <log.h>
#include <callback.h>

typedef enum TASK_STATE {
RUNNING,
STOP,
FAULT,
} TASK_STATE;

// 内部结构体,不给外部使用
typedef struct Segment {

char taskId[128]; // 任务id,给上层回调使用

// 一些命名文件用的东西
char nameBuffer[128];
char inputUrl[128];
char outputFolder[128];

// ts的时间间隔 与 数量wrap
int tsTimeInterval; // 间隔多少时间进行一次切片,可以通过输入参数来自己定义,但是下限是GOP的大小,设置比GOP小就等于GOP
int tsWrapLimit; // 轮转数量,比如设置为 5,那么就会截图名称 0-4 轮转覆盖。-1 代表不轮转

double tsBeginTime; // ts起始时间
double tsLastTime; // ts结束时间

int tsCount;
int tsWrapCount;

// 截图的时间间隔
int snapTimeInterval;
int snapWrapLimit;

int snapCount;
int snapWrapCount;

AVOutputFormat* ofmt;
AVFormatContext* ifmt_ctx, * ofmt_ctx;
AVPacket* pkt;

int video_index;
int* stream_mapping;
int stream_mapping_size;
} Segment;

int SegmentStructRun(char* taskId, char* inputUrl, char* outputFolder, int tsTimeInterval, int tsWrapLimit, int snapTimeInterval, int snapWrapLimit);

void StopTaskForGo(char* taskId);

代码大致如上,因为这里是需要给go程序直接调用的,所以是以函数调用形式实现的。