网页标准化对网站开发维护的好处,第一ppt课件免费下载官网,网络设计的原理,百度下载安装免费参考链接
ffmpeg 源代码简单分析 #xff1a; av_read_frame()_雷霄骅的博客-CSDN博客_ffmpeg frame
av_read_frame()
ffmpeg中的av_read_frame()的作用是读取码流中的音频若干帧或者视频一帧。例如#xff0c;解码视频的时候#xff0c;每解码一个视频帧#xff0c;需要…参考链接
ffmpeg 源代码简单分析 av_read_frame()_雷霄骅的博客-CSDN博客_ffmpeg frame
av_read_frame()
ffmpeg中的av_read_frame()的作用是读取码流中的音频若干帧或者视频一帧。例如解码视频的时候每解码一个视频帧需要先调用 av_read_frame()获得一帧视频的压缩数据然后才能对该数据进行解码例如H.264中一帧压缩数据通常对应一个NAL通过av_read_packet(***)读取一个包需要说明的是此函数必须是包含整数帧的不存在半帧的情况以ts流为例是读取一个完整的PES包一个完整pes包包含若干视频或音频es包读取完毕后通过av_parser_parse2(***)分析出视频一帧或音频若干帧返回下次进入循环的时候如果上次的数据没有完全取完则st s-cur_st;不会是NULL即再此进入av_parser_parse2(***)流程而不是下面的av_read_packet**流程这样就保证了如果读取一次包含了N帧视频数据以视频为例则调用av_read_frame***N次都不会去读数据而是返回第一次读取的数据直到全部解析完毕。 av_read_frame()的声明位于libavformat\avformat.h如下所示
/*** Return the next frame of a stream.* This function returns what is stored in the file, and does not validate* that what is there are valid frames for the decoder. It will split what is* stored in the file into frames and return one for each call. It will not* omit invalid data between valid frames so as to give the decoder the maximum* information possible for decoding.** On success, the returned packet is reference-counted (pkt-buf is set) and* valid indefinitely. The packet must be freed with av_packet_unref() when* it is no longer needed. For video, the packet contains exactly one frame.* For audio, it contains an integer number of frames if each frame has* a known fixed size (e.g. PCM or ADPCM data). If the audio frames have* a variable size (e.g. MPEG audio), then it contains one frame.** pkt-pts, pkt-dts and pkt-duration are always set to correct* values in AVStream.time_base units (and guessed if the format cannot* provide them). pkt-pts can be AV_NOPTS_VALUE if the video format* has B-frames, so it is better to rely on pkt-dts if you do not* decompress the payload.** return 0 if OK, 0 on error or end of file. On error, pkt will be blank* (as if it came from av_packet_alloc()).** note pkt will be initialized, so it may be uninitialized, but it must not* contain data that needs to be freed.*/
int av_read_frame(AVFormatContext *s, AVPacket *pkt);
av_read_frame()使用方法在注释中写得很详细用中文简单描述一下它的两个参数 s输入的AVFormatContextpkt输出的AVPacket如果返回0则说明读取正常。
函数调用结构图
函数调用结构图如下所示可以从源代码中看出av_read_frame()调用了read_frame_internal()。
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
{FFFormatContext *const si ffformatcontext(s);const int genpts s-flags AVFMT_FLAG_GENPTS;int eof 0;int ret;AVStream *st;if (!genpts) {ret si-packet_buffer.head? avpriv_packet_list_get(si-packet_buffer, pkt): read_frame_internal(s, pkt);if (ret 0)return ret;goto return_packet;}for (;;) {PacketListEntry *pktl si-packet_buffer.head;if (pktl) {AVPacket *next_pkt pktl-pkt;if (next_pkt-dts ! AV_NOPTS_VALUE) {int wrap_bits s-streams[next_pkt-stream_index]-pts_wrap_bits;// last dts seen for this stream. if any of packets following// current one had no dts, we will set this to AV_NOPTS_VALUE.int64_t last_dts next_pkt-dts;av_assert2(wrap_bits 64);while (pktl next_pkt-pts AV_NOPTS_VALUE) {if (pktl-pkt.stream_index next_pkt-stream_index av_compare_mod(next_pkt-dts, pktl-pkt.dts, 2ULL (wrap_bits - 1)) 0) {if (av_compare_mod(pktl-pkt.pts, pktl-pkt.dts, 2ULL (wrap_bits - 1))) {// not B-framenext_pkt-pts pktl-pkt.dts;}if (last_dts ! AV_NOPTS_VALUE) {// Once last dts was set to AV_NOPTS_VALUE, we dont change it.last_dts pktl-pkt.dts;}}pktl pktl-next;}if (eof next_pkt-pts AV_NOPTS_VALUE last_dts ! AV_NOPTS_VALUE) {// Fixing the last reference frame had none pts issue (For MXF etc).// We only do this when// 1. eof.// 2. we are not able to resolve a pts value for current packet.// 3. the packets for this stream at the end of the files had valid dts.next_pkt-pts last_dts next_pkt-duration;}pktl si-packet_buffer.head;}/* read packet from packet buffer, if there is data */st s-streams[next_pkt-stream_index];if (!(next_pkt-pts AV_NOPTS_VALUE st-discard AVDISCARD_ALL next_pkt-dts ! AV_NOPTS_VALUE !eof)) {ret avpriv_packet_list_get(si-packet_buffer, pkt);goto return_packet;}}ret read_frame_internal(s, pkt);if (ret 0) {if (pktl ret ! AVERROR(EAGAIN)) {eof 1;continue;} elsereturn ret;}ret avpriv_packet_list_put(si-packet_buffer,pkt, NULL, 0);if (ret 0) {av_packet_unref(pkt);return ret;}}return_packet:st s-streams[pkt-stream_index];if ((s-iformat-flags AVFMT_GENERIC_INDEX) pkt-flags AV_PKT_FLAG_KEY) {ff_reduce_index(s, st-index);av_add_index_entry(st, pkt-pos, pkt-dts, 0, 0, AVINDEX_KEYFRAME);}if (is_relative(pkt-dts))pkt-dts - RELATIVE_TS_BASE;if (is_relative(pkt-pts))pkt-pts - RELATIVE_TS_BASE;return ret;
}
read_frame_internal
read_frame_internal()代码比较长这里只简单看一下它前面的部分。它前面部分有2步是十分关键的1调用了ff_read_packet()从相应的AVInputFormat读取数据。2如果媒体频流需要使用AVCodecParser则调用parse_packet()解析相应的AVPacket。下面我们分成分别看一下ff_read_packet()和parse_packet()的源代码。
static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
{FFFormatContext *const si ffformatcontext(s);int ret, got_packet 0;AVDictionary *metadata NULL;while (!got_packet !si-parse_queue.head) {AVStream *st;FFStream *sti;/* read next packet */ret ff_read_packet(s, pkt);if (ret 0) {if (ret AVERROR(EAGAIN))return ret;/* flush the parsers */for (unsigned i 0; i s-nb_streams; i) {AVStream *const st s-streams[i];FFStream *const sti ffstream(st);if (sti-parser sti-need_parsing)parse_packet(s, pkt, st-index, 1);}/* all remaining packets are now in parse_queue * really terminate parsing */break;}ret 0;st s-streams[pkt-stream_index];sti ffstream(st);st-event_flags | AVSTREAM_EVENT_FLAG_NEW_PACKETS;/* update context if required */if (sti-need_context_update) {if (avcodec_is_open(sti-avctx)) {av_log(s, AV_LOG_DEBUG, Demuxer context update while decoder is open, closing and trying to re-open\n);avcodec_close(sti-avctx);sti-info-found_decoder 0;}/* close parser, because it depends on the codec */if (sti-parser sti-avctx-codec_id ! st-codecpar-codec_id) {av_parser_close(sti-parser);sti-parser NULL;}ret avcodec_parameters_to_context(sti-avctx, st-codecpar);if (ret 0) {av_packet_unref(pkt);return ret;}sti-need_context_update 0;}if (pkt-pts ! AV_NOPTS_VALUE pkt-dts ! AV_NOPTS_VALUE pkt-pts pkt-dts) {av_log(s, AV_LOG_WARNING,Invalid timestamps stream%d, pts%s, dts%s, size%d\n,pkt-stream_index,av_ts2str(pkt-pts),av_ts2str(pkt-dts),pkt-size);}if (s-debug FF_FDEBUG_TS)av_log(s, AV_LOG_DEBUG,ff_read_packet stream%d, pts%s, dts%s, size%d, duration%PRId64, flags%d\n,pkt-stream_index,av_ts2str(pkt-pts),av_ts2str(pkt-dts),pkt-size, pkt-duration, pkt-flags);if (sti-need_parsing !sti-parser !(s-flags AVFMT_FLAG_NOPARSE)) {sti-parser av_parser_init(st-codecpar-codec_id);if (!sti-parser) {av_log(s, AV_LOG_VERBOSE, parser not found for codec %s, packets or times may be invalid.\n,avcodec_get_name(st-codecpar-codec_id));/* no parser available: just output the raw packets */sti-need_parsing AVSTREAM_PARSE_NONE;} else if (sti-need_parsing AVSTREAM_PARSE_HEADERS)sti-parser-flags | PARSER_FLAG_COMPLETE_FRAMES;else if (sti-need_parsing AVSTREAM_PARSE_FULL_ONCE)sti-parser-flags | PARSER_FLAG_ONCE;else if (sti-need_parsing AVSTREAM_PARSE_FULL_RAW)sti-parser-flags | PARSER_FLAG_USE_CODEC_TS;}if (!sti-need_parsing || !sti-parser) {/* no parsing needed: we just output the packet as is */compute_pkt_fields(s, st, NULL, pkt, AV_NOPTS_VALUE, AV_NOPTS_VALUE);if ((s-iformat-flags AVFMT_GENERIC_INDEX) (pkt-flags AV_PKT_FLAG_KEY) pkt-dts ! AV_NOPTS_VALUE) {ff_reduce_index(s, st-index);av_add_index_entry(st, pkt-pos, pkt-dts,0, 0, AVINDEX_KEYFRAME);}got_packet 1;} else if (st-discard AVDISCARD_ALL) {if ((ret parse_packet(s, pkt, pkt-stream_index, 0)) 0)return ret;st-codecpar-sample_rate sti-avctx-sample_rate;st-codecpar-bit_rate sti-avctx-bit_rate;
#if FF_API_OLD_CHANNEL_LAYOUT
FF_DISABLE_DEPRECATION_WARNINGSst-codecpar-channels sti-avctx-ch_layout.nb_channels;st-codecpar-channel_layout sti-avctx-ch_layout.order AV_CHANNEL_ORDER_NATIVE ?sti-avctx-ch_layout.u.mask : 0;
FF_ENABLE_DEPRECATION_WARNINGS
#endifret av_channel_layout_copy(st-codecpar-ch_layout, sti-avctx-ch_layout);if (ret 0)return ret;st-codecpar-codec_id sti-avctx-codec_id;} else {/* free packet */av_packet_unref(pkt);}if (pkt-flags AV_PKT_FLAG_KEY)sti-skip_to_keyframe 0;if (sti-skip_to_keyframe) {av_packet_unref(pkt);got_packet 0;}}if (!got_packet si-parse_queue.head)ret avpriv_packet_list_get(si-parse_queue, pkt);if (ret 0) {AVStream *const st s-streams[pkt-stream_index];FFStream *const sti ffstream(st);int discard_padding 0;if (sti-first_discard_sample pkt-pts ! AV_NOPTS_VALUE) {int64_t pts pkt-pts - (is_relative(pkt-pts) ? RELATIVE_TS_BASE : 0);int64_t sample ts_to_samples(st, pts);int64_t duration ts_to_samples(st, pkt-duration);int64_t end_sample sample duration;if (duration 0 end_sample sti-first_discard_sample sample sti-last_discard_sample)discard_padding FFMIN(end_sample - sti-first_discard_sample, duration);}if (sti-start_skip_samples (pkt-pts 0 || pkt-pts RELATIVE_TS_BASE))sti-skip_samples sti-start_skip_samples;sti-skip_samples FFMAX(0, sti-skip_samples);if (sti-skip_samples || discard_padding) {uint8_t *p av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10);if (p) {AV_WL32(p, sti-skip_samples);AV_WL32(p 4, discard_padding);av_log(s, AV_LOG_DEBUG, demuxer injecting skip %u / discard %u\n,(unsigned)sti-skip_samples, (unsigned)discard_padding);}sti-skip_samples 0;}if (sti-inject_global_side_data) {for (int i 0; i st-nb_side_data; i) {const AVPacketSideData *const src_sd st-side_data[i];uint8_t *dst_data;if (av_packet_get_side_data(pkt, src_sd-type, NULL))continue;dst_data av_packet_new_side_data(pkt, src_sd-type, src_sd-size);if (!dst_data) {av_log(s, AV_LOG_WARNING, Could not inject global side data\n);continue;}memcpy(dst_data, src_sd-data, src_sd-size);}sti-inject_global_side_data 0;}}if (!si-metafree) {int metaret av_opt_get_dict_val(s, metadata, AV_OPT_SEARCH_CHILDREN, metadata);if (metadata) {s-event_flags | AVFMT_EVENT_FLAG_METADATA_UPDATED;av_dict_copy(s-metadata, metadata, 0);av_dict_free(metadata);av_opt_set_dict_val(s, metadata, NULL, AV_OPT_SEARCH_CHILDREN);}si-metafree metaret AVERROR_OPTION_NOT_FOUND;}if (s-debug FF_FDEBUG_TS)av_log(s, AV_LOG_DEBUG,read_frame_internal stream%d, pts%s, dts%s, size%d, duration%PRId64, flags%d\n,pkt-stream_index,av_ts2str(pkt-pts),av_ts2str(pkt-dts),pkt-size, pkt-duration, pkt-flags);/* A demuxer might have returned EOF because of an IO error, lets* propagate this back to the user. */if (ret AVERROR_EOF s-pb s-pb-error 0 s-pb-error ! AVERROR(EAGAIN))ret s-pb-error;return ret;
}ff_read_packet()
ff_read_packet()的代码比较长如下所示。ff_read_packet()中最关键的地方就是调用了AVInputFormat的read_packet()方法。
int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
{FFFormatContext *const si ffformatcontext(s);int err;#if FF_API_INIT_PACKET
FF_DISABLE_DEPRECATION_WARNINGSpkt-data NULL;pkt-size 0;av_init_packet(pkt);
FF_ENABLE_DEPRECATION_WARNINGS
#elseav_packet_unref(pkt);
#endiffor (;;) {PacketListEntry *pktl si-raw_packet_buffer.head;AVStream *st;FFStream *sti;const AVPacket *pkt1;if (pktl) {AVStream *const st s-streams[pktl-pkt.stream_index];if (si-raw_packet_buffer_size s-probesize)if ((err probe_codec(s, st, NULL)) 0)return err;if (ffstream(st)-request_probe 0) {avpriv_packet_list_get(si-raw_packet_buffer, pkt);si-raw_packet_buffer_size - pkt-size;return 0;}}err s-iformat-read_packet(s, pkt);if (err 0) {av_packet_unref(pkt);/* Some demuxers return FFERROR_REDO when they consumedata and discard it (ignored streams, junk, extradata).We must re-call the demuxer to get the real packet. */if (err FFERROR_REDO)continue;if (!pktl || err AVERROR(EAGAIN))return err;for (unsigned i 0; i s-nb_streams; i) {AVStream *const st s-streams[i];FFStream *const sti ffstream(st);if (sti-probe_packets || sti-request_probe 0)if ((err probe_codec(s, st, NULL)) 0)return err;av_assert0(sti-request_probe 0);}continue;}err av_packet_make_refcounted(pkt);if (err 0) {av_packet_unref(pkt);return err;}if (pkt-flags AV_PKT_FLAG_CORRUPT) {av_log(s, AV_LOG_WARNING,Packet corrupt (stream %d, dts %s),pkt-stream_index, av_ts2str(pkt-dts));if (s-flags AVFMT_FLAG_DISCARD_CORRUPT) {av_log(s, AV_LOG_WARNING, , dropping it.\n);av_packet_unref(pkt);continue;}av_log(s, AV_LOG_WARNING, .\n);}av_assert0(pkt-stream_index (unsigned)s-nb_streams Invalid stream index.\n);st s-streams[pkt-stream_index];sti ffstream(st);if (update_wrap_reference(s, st, pkt-stream_index, pkt) sti-pts_wrap_behavior AV_PTS_WRAP_SUB_OFFSET) {// correct first time stamps to negative valuesif (!is_relative(sti-first_dts))sti-first_dts wrap_timestamp(st, sti-first_dts);if (!is_relative(st-start_time))st-start_time wrap_timestamp(st, st-start_time);if (!is_relative(sti-cur_dts))sti-cur_dts wrap_timestamp(st, sti-cur_dts);}pkt-dts wrap_timestamp(st, pkt-dts);pkt-pts wrap_timestamp(st, pkt-pts);force_codec_ids(s, st);/* TODO: audio: time filter; video: frame reordering (pts ! dts) */if (s-use_wallclock_as_timestamps)pkt-dts pkt-pts av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st-time_base);if (!pktl sti-request_probe 0)return 0;err avpriv_packet_list_put(si-raw_packet_buffer,pkt, NULL, 0);if (err 0) {av_packet_unref(pkt);return err;}pkt1 si-raw_packet_buffer.tail-pkt;si-raw_packet_buffer_size pkt1-size;if ((err probe_codec(s, st, pkt1)) 0)return err;}
}AVInputFormat的read_packet()是一个函数指针指向当前的AVInputFormat的读取数据的函数。在这里我们以FLV封装格式对应的AVInputFormat为例看看read_packet()的实现函数是什么样子的。FLV封装格式对应的AVInputFormat的定义位于libavformat\flvdec.c如下所示。
const AVInputFormat ff_flv_demuxer {.name flv,.long_name NULL_IF_CONFIG_SMALL(FLV (Flash Video)),.priv_data_size sizeof(FLVContext),.read_probe flv_probe,.read_header flv_read_header,.read_packet flv_read_packet,.read_seek flv_read_seek,.read_close flv_read_close,.extensions flv,.priv_class flv_kux_class,
};从ff_flv_demuxer的定义可以看出read_packet()对应的是flv_read_packet()函数。在看flv_read_packet()函数之前我们先回顾一下FLV封装格式的结构如下图所示。从图中可以看出FLV文件体部分是由一个一个的Tag连接起来的中间间隔着Previous Tag Size。每个Tag包含了Tag Header和Tag Data两个部分。Tag Data根据Tag的Type不同而不同可以分为音频Tag Data视频Tag Data以及Script Tag Data。下面简述一下音频Tag Data和视频Tag Data。
Audio Tag Data
Audio Tag在官方标准中定义如下Audio Tag开始的第1个字节包含了音频数据的参数信息从第2个字节开始为音频流数据。第1个字节的前4位的数值表示了音频数据格式 0 Linear PCM, platform endian1 ADPCM2 MP33 Linear PCM, little endian4 Nellymoser 16-kHz mono5 Nellymoser 8-kHz mono6 Nellymoser7 G.711 A-law logarithmic PCM8 G.711 mu-law logarithmic PCM9 reserved10 AAC14 MP3 8-Khz15 Device-specific sound第1个字节的第5-6位的数值表示采样率0 5.5kHz1 11KHz2 22 kHz3 44 kHz。第1个字节的第7位表示采样精度0 8bits1 16bits。第1个字节的第8位表示音频类型0 sndMono1 sndStereo。其中当音频编码为AAC的时候第一个字节后面存储的是AACAUDIODATA格式如下所示。Video Tag Data
Video Tag在官方标准中的定义如下。Video Tag也用开始的第1个字节包含视频数据的参数信息从第2个字节为视频流数据。 第1个字节的前4位的数值表示帧类型FrameType 1: keyframe (for AVC, a seekableframe)关键帧2: inter frame (for AVC, a nonseekableframe)3: disposable inter frame (H.263only)4: generated keyframe (reservedfor server use only)5: video info/command frame第1个字节的后4位的数值表示视频编码IDCodecID1: JPEG (currently unused)2: Sorenson H.2633: Screen video4: On2 VP65: On2 VP6 with alpha channel6: Screen video version 27: AVC 其中当音频编码为AVCH.264的时候第一个字节后面存储的是AVCVIDEOPACKET格式如下所示。flv_read_packet()
flv_read_packet()的定义位于libavformat\flvdec.c如下所示。
static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
{FLVContext *flv s-priv_data;int ret, i, size, flags;enum FlvTagType type;int stream_type-1;int64_t next, pos, meta_pos;int64_t dts, pts AV_NOPTS_VALUE;int av_uninit(channels);int av_uninit(sample_rate);AVStream *st NULL;int last -1;int orig_size;retry:/* pkt size is repeated at end. skip it */pos avio_tell(s-pb);type (avio_r8(s-pb) 0x1F);orig_size size avio_rb24(s-pb);flv-sum_flv_tag_size size 11;dts avio_rb24(s-pb);dts | (unsigned)avio_r8(s-pb) 24;av_log(s, AV_LOG_TRACE, type:%d, size:%d, last:%d, dts:%PRId64 pos:%PRId64\n, type, size, last, dts, avio_tell(s-pb));if (avio_feof(s-pb))return AVERROR_EOF;avio_skip(s-pb, 3); /* stream id, always 0 */flags 0;if (flv-validate_next flv-validate_count) {int64_t validate_pos flv-validate_index[flv-validate_next].pos;if (pos validate_pos) {if (FFABS(dts - flv-validate_index[flv-validate_next].dts) VALIDATE_INDEX_TS_THRESH) {flv-validate_next;} else {clear_index_entries(s, validate_pos);flv-validate_count 0;}} else if (pos validate_pos) {clear_index_entries(s, validate_pos);flv-validate_count 0;}}if (size 0) {ret FFERROR_REDO;goto leave;}next size avio_tell(s-pb);if (type FLV_TAG_TYPE_AUDIO) {stream_type FLV_STREAM_TYPE_AUDIO;flags avio_r8(s-pb);size--;} else if (type FLV_TAG_TYPE_VIDEO) {stream_type FLV_STREAM_TYPE_VIDEO;flags avio_r8(s-pb);size--;if ((flags FLV_VIDEO_FRAMETYPE_MASK) FLV_FRAME_VIDEO_INFO_CMD)goto skip;} else if (type FLV_TAG_TYPE_META) {stream_typeFLV_STREAM_TYPE_SUBTITLE;if (size 13 1 4) { // Header-type metadata stuffint type;meta_pos avio_tell(s-pb);type flv_read_metabody(s, next);if (type 0 dts 0 || type 0) {if (type 0 flv-validate_count flv-validate_index[0].pos next flv-validate_index[0].pos - 4 next) {av_log(s, AV_LOG_WARNING, Adjusting next position due to index mismatch\n);next flv-validate_index[0].pos - 4;}goto skip;} else if (type TYPE_ONTEXTDATA) {avpriv_request_sample(s, OnTextData packet);return flv_data_packet(s, pkt, dts, next);} else if (type TYPE_ONCAPTION) {return flv_data_packet(s, pkt, dts, next);} else if (type TYPE_UNKNOWN) {stream_type FLV_STREAM_TYPE_DATA;}avio_seek(s-pb, meta_pos, SEEK_SET);}} else {av_log(s, AV_LOG_DEBUG,Skipping flv packet: type %d, size %d, flags %d.\n,type, size, flags);
skip:if (avio_seek(s-pb, next, SEEK_SET) ! next) {// This can happen if flv_read_metabody above read past// next, on a non-seekable input, and the preceding data has// been flushed out from the IO buffer.av_log(s, AV_LOG_ERROR, Unable to seek to the next packet\n);return AVERROR_INVALIDDATA;}ret FFERROR_REDO;goto leave;}/* skip empty data packets */if (!size) {ret FFERROR_REDO;goto leave;}/* now find stream */for (i 0; i s-nb_streams; i) {st s-streams[i];if (stream_type FLV_STREAM_TYPE_AUDIO) {if (st-codecpar-codec_type AVMEDIA_TYPE_AUDIO (s-audio_codec_id || flv_same_audio_codec(st-codecpar, flags)))break;} else if (stream_type FLV_STREAM_TYPE_VIDEO) {if (st-codecpar-codec_type AVMEDIA_TYPE_VIDEO (s-video_codec_id || flv_same_video_codec(st-codecpar, flags)))break;} else if (stream_type FLV_STREAM_TYPE_SUBTITLE) {if (st-codecpar-codec_type AVMEDIA_TYPE_SUBTITLE)break;} else if (stream_type FLV_STREAM_TYPE_DATA) {if (st-codecpar-codec_type AVMEDIA_TYPE_DATA)break;}}if (i s-nb_streams) {static const enum AVMediaType stream_types[] {AVMEDIA_TYPE_VIDEO, AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_SUBTITLE, AVMEDIA_TYPE_DATA};st create_stream(s, stream_types[stream_type]);if (!st)return AVERROR(ENOMEM);}av_log(s, AV_LOG_TRACE, %d %X %d \n, stream_type, flags, st-discard);if (flv-time_pos pos) {dts flv-time_offset;}if ((s-pb-seekable AVIO_SEEKABLE_NORMAL) ((flags FLV_VIDEO_FRAMETYPE_MASK) FLV_FRAME_KEY ||stream_type FLV_STREAM_TYPE_AUDIO))av_add_index_entry(st, pos, dts, size, 0, AVINDEX_KEYFRAME);if ((st-discard AVDISCARD_NONKEY !((flags FLV_VIDEO_FRAMETYPE_MASK) FLV_FRAME_KEY || stream_type FLV_STREAM_TYPE_AUDIO)) ||(st-discard AVDISCARD_BIDIR ((flags FLV_VIDEO_FRAMETYPE_MASK) FLV_FRAME_DISP_INTER stream_type FLV_STREAM_TYPE_VIDEO)) ||st-discard AVDISCARD_ALL) {avio_seek(s-pb, next, SEEK_SET);ret FFERROR_REDO;goto leave;}// if not streamed and no duration from metadata then seek to end to find// the duration from the timestampsif ((s-pb-seekable AVIO_SEEKABLE_NORMAL) (!s-duration || s-duration AV_NOPTS_VALUE) !flv-searched_for_end) {int size;const int64_t pos avio_tell(s-pb);// Read the last 4 bytes of the file, this should be the size of the// previous FLV tag. Use the timestamp of its payload as duration.int64_t fsize avio_size(s-pb);
retry_duration:avio_seek(s-pb, fsize - 4, SEEK_SET);size avio_rb32(s-pb);if (size 0 size fsize) {// Seek to the start of the last FLV tag at position (fsize - 4 - size)// but skip the byte indicating the type.avio_seek(s-pb, fsize - 3 - size, SEEK_SET);if (size avio_rb24(s-pb) 11) {uint32_t ts avio_rb24(s-pb);ts | (unsigned)avio_r8(s-pb) 24;if (ts)s-duration ts * (int64_t)AV_TIME_BASE / 1000;else if (fsize 8 fsize - 8 size) {fsize - size4;goto retry_duration;}}}avio_seek(s-pb, pos, SEEK_SET);flv-searched_for_end 1;}if (stream_type FLV_STREAM_TYPE_AUDIO) {int bits_per_coded_sample;channels (flags FLV_AUDIO_CHANNEL_MASK) FLV_STEREO ? 2 : 1;sample_rate 44100 ((flags FLV_AUDIO_SAMPLERATE_MASK) FLV_AUDIO_SAMPLERATE_OFFSET) 3;bits_per_coded_sample (flags FLV_AUDIO_SAMPLESIZE_MASK) ? 16 : 8;if (!av_channel_layout_check(st-codecpar-ch_layout) ||!st-codecpar-sample_rate ||!st-codecpar-bits_per_coded_sample) {av_channel_layout_default(st-codecpar-ch_layout, channels);st-codecpar-sample_rate sample_rate;st-codecpar-bits_per_coded_sample bits_per_coded_sample;}if (!st-codecpar-codec_id) {flv_set_audio_codec(s, st, st-codecpar,flags FLV_AUDIO_CODECID_MASK);flv-last_sample_rate sample_rate st-codecpar-sample_rate;flv-last_channels channels st-codecpar-ch_layout.nb_channels;} else {AVCodecParameters *par avcodec_parameters_alloc();if (!par) {ret AVERROR(ENOMEM);goto leave;}par-sample_rate sample_rate;par-bits_per_coded_sample bits_per_coded_sample;flv_set_audio_codec(s, st, par, flags FLV_AUDIO_CODECID_MASK);sample_rate par-sample_rate;avcodec_parameters_free(par);}} else if (stream_type FLV_STREAM_TYPE_VIDEO) {int ret flv_set_video_codec(s, st, flags FLV_VIDEO_CODECID_MASK, 1);if (ret 0)return ret;size - ret;} else if (stream_type FLV_STREAM_TYPE_SUBTITLE) {st-codecpar-codec_id AV_CODEC_ID_TEXT;} else if (stream_type FLV_STREAM_TYPE_DATA) {st-codecpar-codec_id AV_CODEC_ID_NONE; // Opaque AMF data}if (st-codecpar-codec_id AV_CODEC_ID_AAC ||st-codecpar-codec_id AV_CODEC_ID_H264 ||st-codecpar-codec_id AV_CODEC_ID_MPEG4) {int type avio_r8(s-pb);size--;if (size 0) {ret AVERROR_INVALIDDATA;goto leave;}if (st-codecpar-codec_id AV_CODEC_ID_H264 || st-codecpar-codec_id AV_CODEC_ID_MPEG4) {// sign extensionint32_t cts (avio_rb24(s-pb) 0xff800000) ^ 0xff800000;pts av_sat_add64(dts, cts);if (cts 0) { // dts might be wrongif (!flv-wrong_dts)av_log(s, AV_LOG_WARNING,Negative cts, previous timestamps might be wrong.\n);flv-wrong_dts 1;} else if (FFABS(dts - pts) 1000*60*15) {av_log(s, AV_LOG_WARNING,invalid timestamps %PRId64 %PRId64\n, dts, pts);dts pts AV_NOPTS_VALUE;}}if (type 0 (!st-codecpar-extradata || st-codecpar-codec_id AV_CODEC_ID_AAC ||st-codecpar-codec_id AV_CODEC_ID_H264)) {AVDictionaryEntry *t;if (st-codecpar-extradata) {if ((ret flv_queue_extradata(flv, s-pb, stream_type, size)) 0)return ret;ret FFERROR_REDO;goto leave;}if ((ret flv_get_extradata(s, st, size)) 0)return ret;/* Workaround for buggy Omnia A/XE encoder */t av_dict_get(s-metadata, Encoder, NULL, 0);if (st-codecpar-codec_id AV_CODEC_ID_AAC t !strcmp(t-value, Omnia A/XE))st-codecpar-extradata_size 2;ret FFERROR_REDO;goto leave;}}/* skip empty data packets */if (!size) {ret FFERROR_REDO;goto leave;}ret av_get_packet(s-pb, pkt, size);if (ret 0)return ret;pkt-dts dts;pkt-pts pts AV_NOPTS_VALUE ? dts : pts;pkt-stream_index st-index;pkt-pos pos;if (flv-new_extradata[stream_type]) {int ret av_packet_add_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA,flv-new_extradata[stream_type],flv-new_extradata_size[stream_type]);if (ret 0) {flv-new_extradata[stream_type] NULL;flv-new_extradata_size[stream_type] 0;}}if (stream_type FLV_STREAM_TYPE_AUDIO (sample_rate ! flv-last_sample_rate ||channels ! flv-last_channels)) {flv-last_sample_rate sample_rate;flv-last_channels channels;ff_add_param_change(pkt, channels, 0, sample_rate, 0, 0);}if (stream_type FLV_STREAM_TYPE_AUDIO ||(flags FLV_VIDEO_FRAMETYPE_MASK) FLV_FRAME_KEY ||stream_type FLV_STREAM_TYPE_SUBTITLE ||stream_type FLV_STREAM_TYPE_DATA)pkt-flags | AV_PKT_FLAG_KEY;leave:last avio_rb32(s-pb);if (!flv-trust_datasize) {if (last ! orig_size 11 last ! orig_size 10 !avio_feof(s-pb) (last ! orig_size || !last) last ! flv-sum_flv_tag_size !flv-broken_sizes) {av_log(s, AV_LOG_ERROR, Packet mismatch %d %d %d\n, last, orig_size 11, flv-sum_flv_tag_size);avio_seek(s-pb, pos 1, SEEK_SET);ret resync(s);av_packet_unref(pkt);if (ret 0) {goto retry;}}}if (ret 0)flv-last_ts pkt-dts;return ret;
}flv_read_packet()的代码比较长但是逻辑比较简单。它的主要功能就是根据FLV文件格式的规范逐层解析Tag以及TagData获取Tag以及TagData中的信息。比较关键的地方已经写上了注释不再详细叙述。
parse_packet()
parse_packet()给需要AVCodecParser的媒体流提供解析AVPacket的功能。它的代码如下所示从代码中可以看出最终调用了相应AVCodecParser的av_parser_parse2()函数解析出来AVPacket。此后根据解析的信息还进行了一系列的赋值工作不再详细叙述。
/*** Parse a packet, add all split parts to parse_queue.** param pkt Packet to parse; must not be NULL.* param flush Indicates whether to flush. If set, pkt must be blank.*/
static int parse_packet(AVFormatContext *s, AVPacket *pkt,int stream_index, int flush)
{FFFormatContext *const si ffformatcontext(s);AVPacket *out_pkt si-parse_pkt;AVStream *st s-streams[stream_index];FFStream *const sti ffstream(st);const uint8_t *data pkt-data;int size pkt-size;int ret 0, got_output flush;if (!size !flush sti-parser-flags PARSER_FLAG_COMPLETE_FRAMES) {// preserve 0-size sync packetscompute_pkt_fields(s, st, sti-parser, pkt, AV_NOPTS_VALUE, AV_NOPTS_VALUE);}while (size 0 || (flush got_output)) {int64_t next_pts pkt-pts;int64_t next_dts pkt-dts;int len;len av_parser_parse2(sti-parser, sti-avctx,out_pkt-data, out_pkt-size, data, size,pkt-pts, pkt-dts, pkt-pos);pkt-pts pkt-dts AV_NOPTS_VALUE;pkt-pos -1;/* increment read pointer */av_assert1(data || !len);data len ? data len : data;size - len;got_output !!out_pkt-size;if (!out_pkt-size)continue;if (pkt-buf out_pkt-data pkt-data) {/* reference pkt-buf only when out_pkt-data is guaranteed to point* to data in it and not in the parsers internal buffer. *//* XXX: Ensure this is the case with all parsers when sti-parser-flags* is PARSER_FLAG_COMPLETE_FRAMES and check for that instead? */out_pkt-buf av_buffer_ref(pkt-buf);if (!out_pkt-buf) {ret AVERROR(ENOMEM);goto fail;}} else {ret av_packet_make_refcounted(out_pkt);if (ret 0)goto fail;}if (pkt-side_data) {out_pkt-side_data pkt-side_data;out_pkt-side_data_elems pkt-side_data_elems;pkt-side_data NULL;pkt-side_data_elems 0;}/* set the duration */out_pkt-duration (sti-parser-flags PARSER_FLAG_COMPLETE_FRAMES) ? pkt-duration : 0;if (st-codecpar-codec_type AVMEDIA_TYPE_AUDIO) {if (sti-avctx-sample_rate 0) {out_pkt-duration av_rescale_q_rnd(sti-parser-duration,(AVRational) { 1, sti-avctx-sample_rate },st-time_base,AV_ROUND_DOWN);}}out_pkt-stream_index st-index;out_pkt-pts sti-parser-pts;out_pkt-dts sti-parser-dts;out_pkt-pos sti-parser-pos;out_pkt-flags | pkt-flags (AV_PKT_FLAG_DISCARD | AV_PKT_FLAG_CORRUPT);if (sti-need_parsing AVSTREAM_PARSE_FULL_RAW)out_pkt-pos sti-parser-frame_offset;if (sti-parser-key_frame 1 ||(sti-parser-key_frame -1 sti-parser-pict_type AV_PICTURE_TYPE_I))out_pkt-flags | AV_PKT_FLAG_KEY;if (sti-parser-key_frame -1 sti-parser-pict_type AV_PICTURE_TYPE_NONE (pkt-flagsAV_PKT_FLAG_KEY))out_pkt-flags | AV_PKT_FLAG_KEY;compute_pkt_fields(s, st, sti-parser, out_pkt, next_dts, next_pts);ret avpriv_packet_list_put(si-parse_queue,out_pkt, NULL, 0);if (ret 0)goto fail;}/* end of the stream close and free the parser */if (flush) {av_parser_close(sti-parser);sti-parser NULL;}fail:if (ret 0)av_packet_unref(out_pkt);av_packet_unref(pkt);return ret;
}请使用手机扫一扫x