//创建AVFormatContext, 根据文件后缀来推测output format avformat_alloc_output_context2(&oc, NULL, NULL, filename); if (!oc) { printf("Could not deduce output format from file extension: using MPEG.\n"); //无法推测output format, 使用mpeg avformat_alloc_output_context2(&oc, NULL, "mpeg", filename); }
/* Now that all the parameters are set, we can open the audio and * video codecs and allocate the necessary encode buffers. */ if (have_video) open_video(oc, video_codec, &video_st, opt);
if (have_audio) open_audio(oc, audio_codec, &audio_st, opt);
//打印oc的信息 av_dump_format(oc, 0, filename, 1);
/* open the output file, if needed */ if (!(fmt->flags & AVFMT_NOFILE)) { //打开文件 ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE); if (ret < 0) { fprintf(stderr, "Could not open '%s': %s\n", filename, av_err2str(ret)); return1; } }
staticvoidopen_video(AVFormatContext *oc, const AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg) { int ret; AVCodecContext *c = ost->enc; AVDictionary *opt = NULL; //将opt_arg中的内容拷贝到opt中 av_dict_copy(&opt, opt_arg, 0); /* open the codec */ //打开编码器 ret = avcodec_open2(c, codec, &opt); //释放opt av_dict_free(&opt); if (ret < 0) { fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret)); exit(1); }
/* allocate and init a re-usable frame */ //根据格式,宽高,创建一个复用的AVFrame ost->frame = alloc_picture(c->pix_fmt, c->width, c->height); if (!ost->frame) { fprintf(stderr, "Could not allocate video frame\n"); exit(1); }
/* If the output format is not YUV420P, then a temporary YUV420P * picture is needed too. It is then converted to the required * output format. */ ost->tmp_frame = NULL; if (c->pix_fmt != AV_PIX_FMT_YUV420P) { //如果编码器对应的pix_fmt不是yuv420p, 创建一个yuv420p格式的AVFrame,保存在ost->tmp_frame中 ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height); if (!ost->tmp_frame) { fprintf(stderr, "Could not allocate temporary picture\n"); exit(1); } }
/* copy the stream parameters to the muxer */ //将编码器的参数拷贝到stream对应的编码参数中 ret = avcodec_parameters_from_context(ost->st->codecpar, c); if (ret < 0) { fprintf(stderr, "Could not copy the stream parameters\n"); exit(1); } }
/* Write the trailer, if any. The trailer must be written before you * close the CodecContexts open when you wrote the header; otherwise * av_write_trailer() may try to use memory that was freed on * av_codec_close(). */ av_write_trailer(oc);
关闭编码器,关闭文件,释放资源
1 2 3 4 5 6 7 8 9 10 11 12
/* Close each codec. */ if (have_video) close_stream(oc, &video_st); if (have_audio) close_stream(oc, &audio_st);
if (!(fmt->flags & AVFMT_NOFILE)) /* Close the output file. */ avio_closep(&oc->pb);
// send the frame to the encoder //将frame送给编码器去编码 ret = avcodec_send_frame(c, frame); if (ret < 0) { fprintf(stderr, "Error sending a frame to the encoder: %s\n", av_err2str(ret)); exit(1); }
while (ret >= 0) { //从编码器中读出编码后的pkt ret = avcodec_receive_packet(c, pkt); if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) break; elseif (ret < 0) { fprintf(stderr, "Error encoding a frame: %s\n", av_err2str(ret)); exit(1); }
/* rescale output packet timestamp values from codec to stream timebase */ //调整pkt的pts,pkt的时间基参编码器的时间基,将其转换为参考stream的时间基 av_packet_rescale_ts(pkt, c->time_base, st->time_base); //设置pkt的stream_index和stream对应的一致,音视频分别对应于不同的stream_index pkt->stream_index = st->index;
/* Write the compressed frame to the media file. */ log_packet(fmt_ctx, pkt); //将pkt写入视频文件 ret = av_interleaved_write_frame(fmt_ctx, pkt); /* pkt is now blank (av_interleaved_write_frame() takes ownership of * its contents and resets pkt), so that no unreferencing is necessary. * This would be different if one used av_write_frame(). */ if (ret < 0) { fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret)); exit(1); } }
/* check if we want to generate more frames */ if (av_compare_ts(ost->next_pts, c->time_base, STREAM_DURATION, (AVRational){ 1, 1 }) > 0) returnNULL;
/* when we pass a frame to the encoder, it may keep a reference to it * internally; make sure we do not overwrite it here */ if (av_frame_make_writable(ost->frame) < 0) exit(1);
if (c->pix_fmt != AV_PIX_FMT_YUV420P) { /* as we only generate a YUV420P picture, we must convert it * to the codec pixel format if needed */ if (!ost->sws_ctx) { ost->sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P, c->width, c->height, c->pix_fmt, SCALE_FLAGS, NULL, NULL, NULL); if (!ost->sws_ctx) { fprintf(stderr, "Could not initialize the conversion context\n"); exit(1); } } fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height); sws_scale(ost->sws_ctx, (constuint8_t * const *) ost->tmp_frame->data, ost->tmp_frame->linesize, 0, c->height, ost->frame->data, ost->frame->linesize); } else { fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height); }
/* * encode one audio frame and send it to the muxer * return 1 when encoding is finished, 0 otherwise */ staticintwrite_audio_frame(AVFormatContext *oc, OutputStream *ost) { AVCodecContext *c; AVFrame *frame; int ret; int dst_nb_samples;
c = ost->enc;
//获取音频帧 frame = get_audio_frame(ost);
if (frame) { /* convert samples from native format to destination codec format, using the resampler */ /* compute destination number of samples */
/* when we pass a frame to the encoder, it may keep a reference to it * internally; * make sure we do not overwrite it here */ //使ost->frame可写 ret = av_frame_make_writable(ost->frame); if (ret < 0) exit(1);
/* convert to destination format */ //重采样 ret = swr_convert(ost->swr_ctx, ost->frame->data, dst_nb_samples, (constuint8_t **)frame->data, frame->nb_samples); if (ret < 0) { fprintf(stderr, "Error while converting\n"); exit(1); }