avformat: move AVStream.{first,cur}_dts to AVStreamInternal

They are private fields, no reason to have them exposed in a public header.

Signed-off-by: James Almer <jamrial@gmail.com>
This commit is contained in:
James Almer 2021-06-05 11:12:03 -03:00
parent 39affa5f8e
commit 591b88e678
15 changed files with 83 additions and 83 deletions

View File

@ -992,17 +992,6 @@ typedef struct AVStream {
int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */
// Timestamp generation support:
/**
* Timestamp corresponding to the last dts sync point.
*
* Initialized when AVCodecParserContext.dts_sync_point >= 0 and
* a DTS is received from the underlying container. Otherwise set to
* AV_NOPTS_VALUE by default.
*/
int64_t first_dts;
int64_t cur_dts;
/**
* An opaque field for libavformat internal usage.
* Must not be accessed in any way by callers.

View File

@ -638,8 +638,8 @@ static int concat_read_packet(AVFormatContext *avf, AVPacket *pkt)
}
}
if (cat->cur_file->duration == AV_NOPTS_VALUE && st->cur_dts != AV_NOPTS_VALUE) {
int64_t next_dts = av_rescale_q(st->cur_dts, st->time_base, AV_TIME_BASE_Q);
if (cat->cur_file->duration == AV_NOPTS_VALUE && st->internal->cur_dts != AV_NOPTS_VALUE) {
int64_t next_dts = av_rescale_q(st->internal->cur_dts, st->time_base, AV_TIME_BASE_Q);
if (cat->cur_file->next_dts == AV_NOPTS_VALUE || next_dts > cat->cur_file->next_dts) {
cat->cur_file->next_dts = next_dts;
}

View File

@ -139,7 +139,7 @@ static int fifo_thread_write_header(FifoThreadContext *ctx)
}
for (i = 0;i < avf2->nb_streams; i++)
avf2->streams[i]->cur_dts = 0;
avf2->streams[i]->internal->cur_dts = 0;
ret = avformat_write_header(avf2, &format_options);
if (!ret)

View File

@ -504,11 +504,11 @@ static int hds_write_packet(AVFormatContext *s, AVPacket *pkt)
int64_t end_dts = os->fragment_index * (int64_t)c->min_frag_duration;
int ret;
if (st->first_dts == AV_NOPTS_VALUE)
st->first_dts = pkt->dts;
if (st->internal->first_dts == AV_NOPTS_VALUE)
st->internal->first_dts = pkt->dts;
if ((!os->has_video || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
av_compare_ts(pkt->dts - st->first_dts, st->time_base,
av_compare_ts(pkt->dts - st->internal->first_dts, st->time_base,
end_dts, AV_TIME_BASE_Q) >= 0 &&
pkt->flags & AV_PKT_FLAG_KEY && os->packets_written) {

View File

@ -389,6 +389,17 @@ struct AVStreamInternal {
* 0 means unknown
*/
int stream_identifier;
// Timestamp generation support:
/**
* Timestamp corresponding to the last dts sync point.
*
* Initialized when AVCodecParserContext.dts_sync_point >= 0 and
* a DTS is received from the underlying container. Otherwise set to
* AV_NOPTS_VALUE by default.
*/
int64_t first_dts;
int64_t cur_dts;
};
void avpriv_stream_set_need_parsing(AVStream *st, enum AVStreamParseType type);

View File

@ -525,7 +525,7 @@ static int compute_muxer_pkt_fields(AVFormatContext *s, AVStream *st, AVPacket *
if (s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG, "compute_muxer_pkt_fields: pts:%s dts:%s cur_dts:%s b:%d size:%d st:%d\n",
av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), delay, pkt->size, pkt->stream_index);
av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->internal->cur_dts), delay, pkt->size, pkt->stream_index);
if (pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay == 0)
pkt->pts = pkt->dts;
@ -553,14 +553,14 @@ static int compute_muxer_pkt_fields(AVFormatContext *s, AVStream *st, AVPacket *
pkt->dts = st->internal->pts_buffer[0];
}
if (st->cur_dts && st->cur_dts != AV_NOPTS_VALUE &&
if (st->internal->cur_dts && st->internal->cur_dts != AV_NOPTS_VALUE &&
((!(s->oformat->flags & AVFMT_TS_NONSTRICT) &&
st->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE &&
st->codecpar->codec_type != AVMEDIA_TYPE_DATA &&
st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)) {
st->internal->cur_dts >= pkt->dts) || st->internal->cur_dts > pkt->dts)) {
av_log(s, AV_LOG_ERROR,
"Application provided invalid, non monotonically increasing dts to muxer in stream %d: %s >= %s\n",
st->index, av_ts2str(st->cur_dts), av_ts2str(pkt->dts));
st->index, av_ts2str(st->internal->cur_dts), av_ts2str(pkt->dts));
return AVERROR(EINVAL);
}
if (pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts) {
@ -575,7 +575,7 @@ static int compute_muxer_pkt_fields(AVFormatContext *s, AVStream *st, AVPacket *
av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%s dts2:%s\n",
av_ts2str(pkt->pts), av_ts2str(pkt->dts));
st->cur_dts = pkt->dts;
st->internal->cur_dts = pkt->dts;
st->internal->priv_pts->val = pkt->dts;
/* update pts */
@ -763,13 +763,13 @@ static int prepare_input_packet(AVFormatContext *s, AVStream *st, AVPacket *pkt)
/* check that the dts are increasing (or at least non-decreasing,
* if the format allows it */
if (st->cur_dts != AV_NOPTS_VALUE &&
((!(s->oformat->flags & AVFMT_TS_NONSTRICT) && st->cur_dts >= pkt->dts) ||
st->cur_dts > pkt->dts)) {
if (st->internal->cur_dts != AV_NOPTS_VALUE &&
((!(s->oformat->flags & AVFMT_TS_NONSTRICT) && st->internal->cur_dts >= pkt->dts) ||
st->internal->cur_dts > pkt->dts)) {
av_log(s, AV_LOG_ERROR,
"Application provided invalid, non monotonically increasing "
"dts to muxer in stream %d: %" PRId64 " >= %" PRId64 "\n",
st->index, st->cur_dts, pkt->dts);
st->index, st->internal->cur_dts, pkt->dts);
return AVERROR(EINVAL);
}

View File

@ -1766,7 +1766,7 @@ static int mxf_compute_ptses_fake_index(MXFContext *mxf, MXFIndexTable *index_ta
* 6: 5 5
*
* We do this by bucket sorting x by x+TemporalOffset[x] into mxf->ptses,
* then settings mxf->first_dts = -max(TemporalOffset[x]).
* then settings mxf->internal->first_dts = -max(TemporalOffset[x]).
* The latter makes DTS <= PTS.
*/
for (i = x = 0; i < index_table->nb_segments; i++) {

View File

@ -79,7 +79,7 @@ int ff_pcm_read_seek(AVFormatContext *s,
pos *= block_align;
/* recompute exact position */
st->cur_dts = av_rescale(pos, st->time_base.den, byte_rate * (int64_t)st->time_base.num);
st->internal->cur_dts = av_rescale(pos, st->time_base.den, byte_rate * (int64_t)st->time_base.num);
if ((ret = avio_seek(s->pb, pos + s->internal->data_offset, SEEK_SET)) < 0)
return ret;
return 0;

View File

@ -415,7 +415,7 @@ rdt_parse_sdp_line (AVFormatContext *s, int st_index,
if (av_strstart(p, "OpaqueData:buffer;", &p)) {
rdt->mlti_data = rdt_parse_b64buf(&rdt->mlti_data_size, p);
} else if (av_strstart(p, "StartTime:integer;", &p))
stream->first_dts = atoi(p);
stream->internal->first_dts = atoi(p);
else if (av_strstart(p, "ASMRuleBook:string;", &p)) {
int n, first = -1;
@ -465,7 +465,7 @@ add_dstream(AVFormatContext *s, AVStream *orig_st)
return NULL;
st->id = orig_st->id;
st->codecpar->codec_type = orig_st->codecpar->codec_type;
st->first_dts = orig_st->first_dts;
st->internal->first_dts = orig_st->internal->first_dts;
return st;
}

View File

@ -1444,7 +1444,7 @@ static av_cold int sbg_read_header(AVFormatContext *avf)
st->duration = script.end_ts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE :
av_rescale(script.end_ts - script.start_ts,
sbg->sample_rate, AV_TIME_BASE);
st->cur_dts = st->start_time;
st->internal->cur_dts = st->start_time;
r = encode_intervals(&script, st->codecpar, &inter);
if (r < 0)
goto fail;
@ -1465,7 +1465,7 @@ static int sbg_read_packet(AVFormatContext *avf, AVPacket *packet)
int64_t ts, end_ts;
int ret;
ts = avf->streams[0]->cur_dts;
ts = avf->streams[0]->internal->cur_dts;
end_ts = ts + avf->streams[0]->codecpar->frame_size;
if (avf->streams[0]->duration != AV_NOPTS_VALUE)
end_ts = FFMIN(avf->streams[0]->start_time + avf->streams[0]->duration,
@ -1488,7 +1488,7 @@ static int sbg_read_seek2(AVFormatContext *avf, int stream_index,
return AVERROR(EINVAL);
if (stream_index < 0)
ts = av_rescale_q(ts, AV_TIME_BASE_Q, avf->streams[0]->time_base);
avf->streams[0]->cur_dts = ts;
avf->streams[0]->internal->cur_dts = ts;
return 0;
}

View File

@ -586,11 +586,11 @@ static int ism_write_packet(AVFormatContext *s, AVPacket *pkt)
int64_t end_dts = (c->nb_fragments + 1) * (int64_t) c->min_frag_duration;
int ret;
if (st->first_dts == AV_NOPTS_VALUE)
st->first_dts = pkt->dts;
if (st->internal->first_dts == AV_NOPTS_VALUE)
st->internal->first_dts = pkt->dts;
if ((!c->has_video || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
av_compare_ts(pkt->dts - st->first_dts, st->time_base,
av_compare_ts(pkt->dts - st->internal->first_dts, st->time_base,
end_dts, AV_TIME_BASE_Q) >= 0 &&
pkt->flags & AV_PKT_FLAG_KEY && os->packets_written) {

View File

@ -302,7 +302,7 @@ static av_cold int tedcaptions_read_header(AVFormatContext *avf)
st->internal->probe_packets = 0;
st->start_time = 0;
st->duration = last->pts + last->duration;
st->cur_dts = 0;
st->internal->cur_dts = 0;
return 0;
}

View File

@ -842,12 +842,12 @@ FF_ENABLE_DEPRECATION_WARNINGS
if (update_wrap_reference(s, st, pkt->stream_index, pkt) && st->internal->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) {
// correct first time stamps to negative values
if (!is_relative(st->first_dts))
st->first_dts = wrap_timestamp(st, st->first_dts);
if (!is_relative(st->internal->first_dts))
st->internal->first_dts = wrap_timestamp(st, st->internal->first_dts);
if (!is_relative(st->start_time))
st->start_time = wrap_timestamp(st, st->start_time);
if (!is_relative(st->cur_dts))
st->cur_dts = wrap_timestamp(st, st->cur_dts);
if (!is_relative(st->internal->cur_dts))
st->internal->cur_dts = wrap_timestamp(st, st->internal->cur_dts);
}
pkt->dts = wrap_timestamp(st, pkt->dts);
@ -1071,17 +1071,17 @@ static void update_initial_timestamps(AVFormatContext *s, int stream_index,
uint64_t shift;
if (st->first_dts != AV_NOPTS_VALUE ||
if (st->internal->first_dts != AV_NOPTS_VALUE ||
dts == AV_NOPTS_VALUE ||
st->cur_dts == AV_NOPTS_VALUE ||
st->cur_dts < INT_MIN + RELATIVE_TS_BASE ||
dts < INT_MIN + (st->cur_dts - RELATIVE_TS_BASE) ||
st->internal->cur_dts == AV_NOPTS_VALUE ||
st->internal->cur_dts < INT_MIN + RELATIVE_TS_BASE ||
dts < INT_MIN + (st->internal->cur_dts - RELATIVE_TS_BASE) ||
is_relative(dts))
return;
st->first_dts = dts - (st->cur_dts - RELATIVE_TS_BASE);
st->cur_dts = dts;
shift = (uint64_t)st->first_dts - RELATIVE_TS_BASE;
st->internal->first_dts = dts - (st->internal->cur_dts - RELATIVE_TS_BASE);
st->internal->cur_dts = dts;
shift = (uint64_t)st->internal->first_dts - RELATIVE_TS_BASE;
if (is_relative(pts))
pts += shift;
@ -1121,11 +1121,11 @@ static void update_initial_durations(AVFormatContext *s, AVStream *st,
PacketList *pktl = s->internal->packet_buffer ? s->internal->packet_buffer : s->internal->parse_queue;
int64_t cur_dts = RELATIVE_TS_BASE;
if (st->first_dts != AV_NOPTS_VALUE) {
if (st->internal->first_dts != AV_NOPTS_VALUE) {
if (st->internal->update_initial_durations_done)
return;
st->internal->update_initial_durations_done = 1;
cur_dts = st->first_dts;
cur_dts = st->internal->first_dts;
for (; pktl; pktl = get_next_pkt(s, st, pktl)) {
if (pktl->pkt.stream_index == stream_index) {
if (pktl->pkt.pts != pktl->pkt.dts ||
@ -1135,18 +1135,18 @@ static void update_initial_durations(AVFormatContext *s, AVStream *st,
cur_dts -= duration;
}
}
if (pktl && pktl->pkt.dts != st->first_dts) {
if (pktl && pktl->pkt.dts != st->internal->first_dts) {
av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s (pts %s, duration %"PRId64") in the queue\n",
av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts), av_ts2str(pktl->pkt.pts), pktl->pkt.duration);
av_ts2str(st->internal->first_dts), av_ts2str(pktl->pkt.dts), av_ts2str(pktl->pkt.pts), pktl->pkt.duration);
return;
}
if (!pktl) {
av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in the queue\n", av_ts2str(st->first_dts));
av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in the queue\n", av_ts2str(st->internal->first_dts));
return;
}
pktl = s->internal->packet_buffer ? s->internal->packet_buffer : s->internal->parse_queue;
st->first_dts = cur_dts;
} else if (st->cur_dts != RELATIVE_TS_BASE)
st->internal->first_dts = cur_dts;
} else if (st->internal->cur_dts != RELATIVE_TS_BASE)
return;
for (; pktl; pktl = get_next_pkt(s, st, pktl)) {
@ -1155,7 +1155,7 @@ static void update_initial_durations(AVFormatContext *s, AVStream *st,
if ((pktl->pkt.pts == pktl->pkt.dts ||
pktl->pkt.pts == AV_NOPTS_VALUE) &&
(pktl->pkt.dts == AV_NOPTS_VALUE ||
pktl->pkt.dts == st->first_dts ||
pktl->pkt.dts == st->internal->first_dts ||
pktl->pkt.dts == RELATIVE_TS_BASE) &&
!pktl->pkt.duration) {
pktl->pkt.dts = cur_dts;
@ -1167,7 +1167,7 @@ static void update_initial_durations(AVFormatContext *s, AVStream *st,
cur_dts = pktl->pkt.dts + pktl->pkt.duration;
}
if (!pktl)
st->cur_dts = cur_dts;
st->internal->cur_dts = cur_dts;
}
static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
@ -1226,7 +1226,7 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
st->pts_wrap_bits < 63 && pkt->dts > INT64_MIN + (1LL << st->pts_wrap_bits) &&
pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
if (is_relative(st->cur_dts) || pkt->dts - (1LL<<(st->pts_wrap_bits - 1)) > st->cur_dts) {
if (is_relative(st->internal->cur_dts) || pkt->dts - (1LL<<(st->pts_wrap_bits - 1)) > st->internal->cur_dts) {
pkt->dts -= 1LL << st->pts_wrap_bits;
} else
pkt->pts += 1LL << st->pts_wrap_bits;
@ -1279,7 +1279,7 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
if (s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG,
"IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%"PRId64" delay:%d onein_oneout:%d\n",
presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts),
presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->internal->cur_dts),
pkt->stream_index, pc, pkt->duration, delay, onein_oneout);
/* Interpolate PTS and DTS if they are not present. We skip H264
@ -1293,18 +1293,18 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
pkt->dts = st->internal->last_IP_pts;
update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
if (pkt->dts == AV_NOPTS_VALUE)
pkt->dts = st->cur_dts;
pkt->dts = st->internal->cur_dts;
/* This is tricky: the dts must be incremented by the duration
* of the frame we are displaying, i.e. the last I- or P-frame. */
if (st->internal->last_IP_duration == 0 && (uint64_t)pkt->duration <= INT32_MAX)
st->internal->last_IP_duration = pkt->duration;
if (pkt->dts != AV_NOPTS_VALUE)
st->cur_dts = av_sat_add64(pkt->dts, st->internal->last_IP_duration);
st->internal->cur_dts = av_sat_add64(pkt->dts, st->internal->last_IP_duration);
if (pkt->dts != AV_NOPTS_VALUE &&
pkt->pts == AV_NOPTS_VALUE &&
st->internal->last_IP_duration > 0 &&
((uint64_t)st->cur_dts - (uint64_t)next_dts + 1) <= 2 &&
((uint64_t)st->internal->cur_dts - (uint64_t)next_dts + 1) <= 2 &&
next_dts != next_pts &&
next_pts != AV_NOPTS_VALUE)
pkt->pts = next_dts;
@ -1324,10 +1324,10 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
update_initial_timestamps(s, pkt->stream_index, pkt->pts,
pkt->pts, pkt);
if (pkt->pts == AV_NOPTS_VALUE)
pkt->pts = st->cur_dts;
pkt->pts = st->internal->cur_dts;
pkt->dts = pkt->pts;
if (pkt->pts != AV_NOPTS_VALUE && duration.num >= 0)
st->cur_dts = av_add_stable(st->time_base, pkt->pts, duration, 1);
st->internal->cur_dts = av_add_stable(st->time_base, pkt->pts, duration, 1);
}
}
@ -1343,12 +1343,12 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
if (!onein_oneout)
// This should happen on the first packet
update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
if (pkt->dts > st->cur_dts)
st->cur_dts = pkt->dts;
if (pkt->dts > st->internal->cur_dts)
st->internal->cur_dts = pkt->dts;
if (s->debug & FF_FDEBUG_TS)
av_log(s, AV_LOG_DEBUG, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s st:%d (%d)\n",
presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), st->index, st->id);
presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->internal->cur_dts), st->index, st->id);
/* update flags */
if (st->codecpar->codec_type == AVMEDIA_TYPE_DATA || ff_is_intra_only(st->codecpar->codec_id))
@ -1830,11 +1830,11 @@ void ff_read_frame_flush(AVFormatContext *s)
}
st->internal->last_IP_pts = AV_NOPTS_VALUE;
st->internal->last_dts_for_order_check = AV_NOPTS_VALUE;
if (st->first_dts == AV_NOPTS_VALUE)
st->cur_dts = RELATIVE_TS_BASE;
if (st->internal->first_dts == AV_NOPTS_VALUE)
st->internal->cur_dts = RELATIVE_TS_BASE;
else
/* We set the current DTS to an unspecified origin. */
st->cur_dts = AV_NOPTS_VALUE;
st->internal->cur_dts = AV_NOPTS_VALUE;
st->internal->probe_packets = s->max_probe_packets;
@ -1855,7 +1855,7 @@ void avpriv_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timesta
for (i = 0; i < s->nb_streams; i++) {
AVStream *st = s->streams[i];
st->cur_dts =
st->internal->cur_dts =
av_rescale(timestamp,
st->time_base.den * (int64_t) ref_st->time_base.num,
st->time_base.num * (int64_t) ref_st->time_base.den);
@ -2736,7 +2736,7 @@ static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
for (i = 0; i < ic->nb_streams; i++) {
st = ic->streams[i];
if (st->start_time == AV_NOPTS_VALUE &&
st->first_dts == AV_NOPTS_VALUE &&
st->internal->first_dts == AV_NOPTS_VALUE &&
st->codecpar->codec_type != AVMEDIA_TYPE_UNKNOWN)
av_log(ic, AV_LOG_WARNING,
"start time for stream %d is not set in estimate_timings_from_pts\n", i);
@ -2777,7 +2777,7 @@ static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
st = ic->streams[pkt->stream_index];
if (pkt->pts != AV_NOPTS_VALUE &&
(st->start_time != AV_NOPTS_VALUE ||
st->first_dts != AV_NOPTS_VALUE)) {
st->internal->first_dts != AV_NOPTS_VALUE)) {
if (pkt->duration == 0) {
ff_compute_frame_duration(ic, &num, &den, st, st->internal->parser, pkt);
if (den && num) {
@ -2792,7 +2792,7 @@ static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
if (st->start_time != AV_NOPTS_VALUE)
duration -= st->start_time;
else
duration -= st->first_dts;
duration -= st->internal->first_dts;
if (duration > 0) {
if (st->duration == AV_NOPTS_VALUE || st->internal->info->last_duration<= 0 ||
(st->duration < duration && FFABS(duration - st->internal->info->last_duration) < 60LL*st->time_base.den / st->time_base.num))
@ -2829,7 +2829,7 @@ static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
switch (st->codecpar->codec_type) {
case AVMEDIA_TYPE_VIDEO:
case AVMEDIA_TYPE_AUDIO:
if (st->start_time != AV_NOPTS_VALUE || st->first_dts != AV_NOPTS_VALUE) {
if (st->start_time != AV_NOPTS_VALUE || st->internal->first_dts != AV_NOPTS_VALUE) {
av_log(ic, AV_LOG_WARNING, "stream %d : no PTS found at end of file, duration not set\n", i);
} else
av_log(ic, AV_LOG_WARNING, "stream %d : no TS found at start of file, duration not set\n", i);
@ -2844,7 +2844,7 @@ skip_duration_calc:
int j;
st = ic->streams[i];
st->cur_dts = st->first_dts;
st->internal->cur_dts = st->internal->first_dts;
st->internal->last_IP_pts = AV_NOPTS_VALUE;
st->internal->last_dts_for_order_check = AV_NOPTS_VALUE;
for (j = 0; j < MAX_REORDER_DELAY + 1; j++)
@ -3699,7 +3699,7 @@ int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
st->internal->extract_extradata.bsf) &&
extract_extradata_check(st))
break;
if (st->first_dts == AV_NOPTS_VALUE &&
if (st->internal->first_dts == AV_NOPTS_VALUE &&
!(ic->iformat->flags & AVFMT_NOTIMESTAMPS) &&
st->internal->codec_info_nb_frames < ((st->disposition & AV_DISPOSITION_ATTACHED_PIC) ? 1 : ic->max_ts_probe) &&
(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
@ -4414,15 +4414,15 @@ AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
* but durations get some timestamps, formats with some unknown
* timestamps have their first few packets buffered and the
* timestamps corrected before they are returned to the user */
st->cur_dts = RELATIVE_TS_BASE;
st->internal->cur_dts = RELATIVE_TS_BASE;
} else {
st->cur_dts = AV_NOPTS_VALUE;
st->internal->cur_dts = AV_NOPTS_VALUE;
}
st->index = s->nb_streams;
st->start_time = AV_NOPTS_VALUE;
st->duration = AV_NOPTS_VALUE;
st->first_dts = AV_NOPTS_VALUE;
st->internal->first_dts = AV_NOPTS_VALUE;
st->internal->probe_packets = s->max_probe_packets;
st->internal->pts_wrap_reference = AV_NOPTS_VALUE;
st->internal->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;

View File

@ -277,7 +277,7 @@ static int vqf_read_seek(AVFormatContext *s,
AV_ROUND_DOWN : AV_ROUND_UP);
pos *= c->frame_bit_len;
st->cur_dts = av_rescale(pos, st->time_base.den,
st->internal->cur_dts = av_rescale(pos, st->time_base.den,
st->codecpar->bit_rate * (int64_t)st->time_base.num);
if ((ret = avio_seek(s->pb, ((pos-7) >> 3) + s->internal->data_offset, SEEK_SET)) < 0)

View File

@ -702,8 +702,8 @@ static int wav_read_packet(AVFormatContext *s, AVPacket *pkt)
int64_t audio_dts, video_dts;
AVStream *vst = wav->vst;
smv_retry:
audio_dts = (int32_t)st->cur_dts;
video_dts = (int32_t)vst->cur_dts;
audio_dts = (int32_t)st->internal->cur_dts;
video_dts = (int32_t)vst->internal->cur_dts;
if (audio_dts != AV_NOPTS_VALUE && video_dts != AV_NOPTS_VALUE) {
/*We always return a video frame first to get the pixel format first*/