mirror of
https://github.com/archlinuxarm/PKGBUILDs.git
synced 2024-11-28 22:57:37 +00:00
585 lines
27 KiB
Diff
585 lines
27 KiB
Diff
From 6e554a30893150793c2638e3689cf208ffc8e375 Mon Sep 17 00:00:00 2001
|
|
From: Dale Curtis <dalecurtis@chromium.org>
|
|
Date: Sat, 2 Apr 2022 05:13:53 +0000
|
|
Subject: [PATCH] Roll src/third_party/ffmpeg/ 574c39cce..32b2d1d526 (1125
|
|
commits)
|
|
|
|
https://chromium.googlesource.com/chromium/third_party/ffmpeg.git/+log/574c39cce323..32b2d1d526
|
|
|
|
Created with:
|
|
roll-dep src/third_party/ffmpeg
|
|
|
|
Fixed: 1293918
|
|
Cq-Include-Trybots: luci.chromium.try:mac_chromium_asan_rel_ng,linux_chromium_asan_rel_ng,linux_chromium_chromeos_asan_rel_ng
|
|
Change-Id: I41945d0f963e3d1f65940067bac22f63b68e37d2
|
|
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/3565647
|
|
Auto-Submit: Dale Curtis <dalecurtis@chromium.org>
|
|
Reviewed-by: Dan Sanders <sandersd@chromium.org>
|
|
Commit-Queue: Dale Curtis <dalecurtis@chromium.org>
|
|
Cr-Commit-Position: refs/heads/main@{#988253}
|
|
---
|
|
.../clear_key_cdm/ffmpeg_cdm_audio_decoder.cc | 29 ++++++++++---------
|
|
media/ffmpeg/ffmpeg_common.cc | 11 +++----
|
|
media/filters/audio_file_reader.cc | 9 +++---
|
|
media/filters/audio_file_reader_unittest.cc | 6 ++--
|
|
.../filters/audio_video_metadata_extractor.cc | 11 +++++--
|
|
.../filters/ffmpeg_aac_bitstream_converter.cc | 7 +++--
|
|
...ffmpeg_aac_bitstream_converter_unittest.cc | 2 +-
|
|
media/filters/ffmpeg_audio_decoder.cc | 13 +++++----
|
|
8 files changed, 51 insertions(+), 37 deletions(-)
|
|
|
|
diff --git a/media/cdm/library_cdm/clear_key_cdm/ffmpeg_cdm_audio_decoder.cc b/media/cdm/library_cdm/clear_key_cdm/ffmpeg_cdm_audio_decoder.cc
|
|
index e4fc3f460e2..9b1ad9f7675 100644
|
|
--- a/media/cdm/library_cdm/clear_key_cdm/ffmpeg_cdm_audio_decoder.cc
|
|
+++ b/media/cdm/library_cdm/clear_key_cdm/ffmpeg_cdm_audio_decoder.cc
|
|
@@ -74,7 +74,7 @@ void CdmAudioDecoderConfigToAVCodecContext(
|
|
codec_context->sample_fmt = AV_SAMPLE_FMT_NONE;
|
|
}
|
|
|
|
- codec_context->channels = config.channel_count;
|
|
+ codec_context->ch_layout.nb_channels = config.channel_count;
|
|
codec_context->sample_rate = config.samples_per_second;
|
|
|
|
if (config.extra_data) {
|
|
@@ -124,8 +124,8 @@ void CopySamples(cdm::AudioFormat cdm_format,
|
|
case cdm::kAudioFormatPlanarS16:
|
|
case cdm::kAudioFormatPlanarF32: {
|
|
const int decoded_size_per_channel =
|
|
- decoded_audio_size / av_frame.channels;
|
|
- for (int i = 0; i < av_frame.channels; ++i) {
|
|
+ decoded_audio_size / av_frame.ch_layout.nb_channels;
|
|
+ for (int i = 0; i < av_frame.ch_layout.nb_channels; ++i) {
|
|
memcpy(output_buffer, av_frame.extended_data[i],
|
|
decoded_size_per_channel);
|
|
output_buffer += decoded_size_per_channel;
|
|
@@ -185,13 +185,14 @@ bool FFmpegCdmAudioDecoder::Initialize(
|
|
// Success!
|
|
decoding_loop_ = std::make_unique<FFmpegDecodingLoop>(codec_context_.get());
|
|
samples_per_second_ = config.samples_per_second;
|
|
- bytes_per_frame_ = codec_context_->channels * config.bits_per_channel / 8;
|
|
+ bytes_per_frame_ =
|
|
+ codec_context_->ch_layout.nb_channels * config.bits_per_channel / 8;
|
|
output_timestamp_helper_ =
|
|
std::make_unique<AudioTimestampHelper>(config.samples_per_second);
|
|
is_initialized_ = true;
|
|
|
|
// Store initial values to guard against midstream configuration changes.
|
|
- channels_ = codec_context_->channels;
|
|
+ channels_ = codec_context_->ch_layout.nb_channels;
|
|
av_sample_format_ = codec_context_->sample_fmt;
|
|
|
|
return true;
|
|
@@ -291,18 +292,19 @@ cdm::Status FFmpegCdmAudioDecoder::DecodeBuffer(
|
|
for (auto& frame : audio_frames) {
|
|
int decoded_audio_size = 0;
|
|
if (frame->sample_rate != samples_per_second_ ||
|
|
- frame->channels != channels_ || frame->format != av_sample_format_) {
|
|
+ frame->ch_layout.nb_channels != channels_ ||
|
|
+ frame->format != av_sample_format_) {
|
|
DLOG(ERROR) << "Unsupported midstream configuration change!"
|
|
<< " Sample Rate: " << frame->sample_rate << " vs "
|
|
<< samples_per_second_
|
|
<< ", Channels: " << frame->ch_layout.nb_channels << " vs "
|
|
<< channels_ << ", Sample Format: " << frame->format << " vs "
|
|
<< av_sample_format_;
|
|
return cdm::kDecodeError;
|
|
}
|
|
|
|
decoded_audio_size = av_samples_get_buffer_size(
|
|
- nullptr, codec_context_->channels, frame->nb_samples,
|
|
+ nullptr, codec_context_->ch_layout.nb_channels, frame->nb_samples,
|
|
codec_context_->sample_fmt, 1);
|
|
if (!decoded_audio_size)
|
|
continue;
|
|
@@ -320,9 +323,9 @@ bool FFmpegCdmAudioDecoder::OnNewFrame(
|
|
size_t* total_size,
|
|
std::vector<std::unique_ptr<AVFrame, ScopedPtrAVFreeFrame>>* audio_frames,
|
|
AVFrame* frame) {
|
|
- *total_size += av_samples_get_buffer_size(
|
|
- nullptr, codec_context_->channels, frame->nb_samples,
|
|
- codec_context_->sample_fmt, 1);
|
|
+ *total_size += av_samples_get_buffer_size(
|
|
+ nullptr, codec_context_->ch_layout.nb_channels, frame->nb_samples,
|
|
+ codec_context_->sample_fmt, 1);
|
|
audio_frames->emplace_back(av_frame_clone(frame));
|
|
return true;
|
|
}
|
|
diff --git a/media/ffmpeg/ffmpeg_common.cc b/media/ffmpeg/ffmpeg_common.cc
|
|
index 87ca8969626..76f03d6608e 100644
|
|
--- a/media/ffmpeg/ffmpeg_common.cc
|
|
+++ b/media/ffmpeg/ffmpeg_common.cc
|
|
@@ -345,10 +345,11 @@ bool AVCodecContextToAudioDecoderConfig(const AVCodecContext* codec_context,
|
|
codec_context->sample_fmt, codec_context->codec_id);
|
|
|
|
ChannelLayout channel_layout =
|
|
- codec_context->channels > 8
|
|
+ codec_context->ch_layout.nb_channels > 8
|
|
? CHANNEL_LAYOUT_DISCRETE
|
|
- : ChannelLayoutToChromeChannelLayout(codec_context->channel_layout,
|
|
- codec_context->channels);
|
|
+ : ChannelLayoutToChromeChannelLayout(
|
|
+ codec_context->ch_layout.u.mask,
|
|
+ codec_context->ch_layout.nb_channels);
|
|
|
|
int sample_rate = codec_context->sample_rate;
|
|
switch (codec) {
|
|
@@ -401,7 +402,7 @@ bool AVCodecContextToAudioDecoderConfig(const AVCodecContext* codec_context,
|
|
extra_data, encryption_scheme, seek_preroll,
|
|
codec_context->delay);
|
|
if (channel_layout == CHANNEL_LAYOUT_DISCRETE)
|
|
- config->SetChannelsForDiscrete(codec_context->channels);
|
|
+ config->SetChannelsForDiscrete(codec_context->ch_layout.nb_channels);
|
|
|
|
#if BUILDFLAG(ENABLE_PLATFORM_AC3_EAC3_AUDIO)
|
|
// These are bitstream formats unknown to ffmpeg, so they don't have
|
|
@@ -470,7 +471,7 @@ void AudioDecoderConfigToAVCodecContext(const AudioDecoderConfig& config,
|
|
|
|
// TODO(scherkus): should we set |channel_layout|? I'm not sure if FFmpeg uses
|
|
// said information to decode.
|
|
- codec_context->channels = config.channels();
|
|
+ codec_context->ch_layout.nb_channels = config.channels();
|
|
codec_context->sample_rate = config.samples_per_second();
|
|
|
|
if (config.extra_data().empty()) {
|
|
diff --git a/media/filters/audio_file_reader.cc b/media/filters/audio_file_reader.cc
|
|
index 5f257bdfaa6..e1be5aa9a5b 100644
|
|
--- a/media/filters/audio_file_reader.cc
|
|
+++ b/media/filters/audio_file_reader.cc
|
|
@@ -113,14 +113,15 @@ bool AudioFileReader::OpenDecoder() {
|
|
|
|
// Verify the channel layout is supported by Chrome. Acts as a sanity check
|
|
// against invalid files. See http://crbug.com/171962
|
|
- if (ChannelLayoutToChromeChannelLayout(codec_context_->channel_layout,
|
|
- codec_context_->channels) ==
|
|
+ if (ChannelLayoutToChromeChannelLayout(
|
|
+ codec_context_->ch_layout.u.mask,
|
|
+ codec_context_->ch_layout.nb_channels) ==
|
|
CHANNEL_LAYOUT_UNSUPPORTED) {
|
|
return false;
|
|
}
|
|
|
|
// Store initial values to guard against midstream configuration changes.
|
|
- channels_ = codec_context_->channels;
|
|
+ channels_ = codec_context_->ch_layout.nb_channels;
|
|
audio_codec_ = CodecIDToAudioCodec(codec_context_->codec_id);
|
|
sample_rate_ = codec_context_->sample_rate;
|
|
av_sample_format_ = codec_context_->sample_fmt;
|
|
@@ -223,7 +224,7 @@ bool AudioFileReader::OnNewFrame(
|
|
if (frames_read < 0)
|
|
return false;
|
|
|
|
- const int channels = frame->channels;
|
|
+ const int channels = frame->ch_layout.nb_channels;
|
|
if (frame->sample_rate != sample_rate_ || channels != channels_ ||
|
|
frame->format != av_sample_format_) {
|
|
DLOG(ERROR) << "Unsupported midstream configuration change!"
|
|
diff --git a/media/filters/ffmpeg_aac_bitstream_converter.cc b/media/filters/ffmpeg_aac_bitstream_converter.cc
|
|
index 6f231c85729..ca5e5fb927d 100644
|
|
--- a/media/filters/ffmpeg_aac_bitstream_converter.cc
|
|
+++ b/media/filters/ffmpeg_aac_bitstream_converter.cc
|
|
@@ -195,14 +195,15 @@ bool FFmpegAACBitstreamConverter::ConvertPacket(AVPacket* packet) {
|
|
if (!header_generated_ || codec_ != stream_codec_parameters_->codec_id ||
|
|
audio_profile_ != stream_codec_parameters_->profile ||
|
|
sample_rate_index_ != sample_rate_index ||
|
|
- channel_configuration_ != stream_codec_parameters_->channels ||
|
|
+ channel_configuration_ !=
|
|
+ stream_codec_parameters_->ch_layout.nb_channels ||
|
|
frame_length_ != header_plus_packet_size) {
|
|
header_generated_ =
|
|
GenerateAdtsHeader(stream_codec_parameters_->codec_id,
|
|
0, // layer
|
|
stream_codec_parameters_->profile, sample_rate_index,
|
|
0, // private stream
|
|
- stream_codec_parameters_->channels,
|
|
+ stream_codec_parameters_->ch_layout.nb_channels,
|
|
0, // originality
|
|
0, // home
|
|
0, // copyrighted_stream
|
|
@@ -214,7 +215,7 @@ bool FFmpegAACBitstreamConverter::ConvertPacket(AVPacket* packet) {
|
|
codec_ = stream_codec_parameters_->codec_id;
|
|
audio_profile_ = stream_codec_parameters_->profile;
|
|
sample_rate_index_ = sample_rate_index;
|
|
- channel_configuration_ = stream_codec_parameters_->channels;
|
|
+ channel_configuration_ = stream_codec_parameters_->ch_layout.nb_channels;
|
|
frame_length_ = header_plus_packet_size;
|
|
}
|
|
|
|
diff --git a/media/filters/ffmpeg_aac_bitstream_converter_unittest.cc b/media/filters/ffmpeg_aac_bitstream_converter_unittest.cc
|
|
index 1fd4c5ccd7d..f59bcd8fdaf 100644
|
|
--- a/media/filters/ffmpeg_aac_bitstream_converter_unittest.cc
|
|
+++ b/media/filters/ffmpeg_aac_bitstream_converter_unittest.cc
|
|
@@ -34,7 +34,7 @@ class FFmpegAACBitstreamConverterTest : public testing::Test {
|
|
memset(&test_parameters_, 0, sizeof(AVCodecParameters));
|
|
test_parameters_.codec_id = AV_CODEC_ID_AAC;
|
|
test_parameters_.profile = FF_PROFILE_AAC_MAIN;
|
|
- test_parameters_.channels = 2;
|
|
+ test_parameters_.ch_layout.nb_channels = 2;
|
|
test_parameters_.extradata = extradata_header_;
|
|
test_parameters_.extradata_size = sizeof(extradata_header_);
|
|
}
|
|
diff --git a/media/filters/ffmpeg_audio_decoder.cc b/media/filters/ffmpeg_audio_decoder.cc
|
|
index 6a56c675f7d..4615fdeb3fb 100644
|
|
--- a/media/filters/ffmpeg_audio_decoder.cc
|
|
+++ b/media/filters/ffmpeg_audio_decoder.cc
|
|
@@ -28,7 +28,7 @@ namespace media {
|
|
|
|
// Return the number of channels from the data in |frame|.
|
|
static inline int DetermineChannels(AVFrame* frame) {
|
|
- return frame->channels;
|
|
+ return frame->ch_layout.nb_channels;
|
|
}
|
|
|
|
// Called by FFmpeg's allocation routine to allocate a buffer. Uses
|
|
@@ -231,7 +231,7 @@ bool FFmpegAudioDecoder::OnNewFrame(const DecoderBuffer& buffer,
|
|
// Translate unsupported into discrete layouts for discrete configurations;
|
|
// ffmpeg does not have a labeled discrete configuration internally.
|
|
ChannelLayout channel_layout = ChannelLayoutToChromeChannelLayout(
|
|
- codec_context_->channel_layout, codec_context_->channels);
|
|
+ codec_context_->ch_layout.u.mask, codec_context_->ch_layout.nb_channels);
|
|
if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED &&
|
|
config_.channel_layout() == CHANNEL_LAYOUT_DISCRETE) {
|
|
channel_layout = CHANNEL_LAYOUT_DISCRETE;
|
|
@@ -348,11 +348,11 @@ bool FFmpegAudioDecoder::ConfigureDecoder(const AudioDecoderConfig& config) {
|
|
// Success!
|
|
av_sample_format_ = codec_context_->sample_fmt;
|
|
|
|
- if (codec_context_->channels != config.channels()) {
|
|
+ if (codec_context_->ch_layout.nb_channels != config.channels()) {
|
|
MEDIA_LOG(ERROR, media_log_)
|
|
<< "Audio configuration specified " << config.channels()
|
|
<< " channels, but FFmpeg thinks the file contains "
|
|
- << codec_context_->channels << " channels";
|
|
+ << codec_context_->ch_layout.nb_channels << " channels";
|
|
ReleaseFFmpegResources();
|
|
state_ = DecoderState::kUninitialized;
|
|
return false;
|
|
@@ -403,7 +403,7 @@ int FFmpegAudioDecoder::GetAudioBuffer(struct AVCodecContext* s,
|
|
if (frame->nb_samples <= 0)
|
|
return AVERROR(EINVAL);
|
|
|
|
- if (s->channels != channels) {
|
|
+ if (s->ch_layout.nb_channels != channels) {
|
|
DLOG(ERROR) << "AVCodecContext and AVFrame disagree on channel count.";
|
|
return AVERROR(EINVAL);
|
|
}
|
|
@@ -436,7 +436,8 @@ int FFmpegAudioDecoder::GetAudioBuffer(struct AVCodecContext* s,
|
|
ChannelLayout channel_layout =
|
|
config_.channel_layout() == CHANNEL_LAYOUT_DISCRETE
|
|
? CHANNEL_LAYOUT_DISCRETE
|
|
- : ChannelLayoutToChromeChannelLayout(s->channel_layout, s->channels);
|
|
+ : ChannelLayoutToChromeChannelLayout(s->ch_layout.u.mask,
|
|
+ s->ch_layout.nb_channels);
|
|
|
|
if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED) {
|
|
DLOG(ERROR) << "Unsupported channel layout.";
|
|
commit 62274859104bd828373ae406aa9309e610449ac5
|
|
Author: Ted Meyer <tmathmeyer@chromium.org>
|
|
Date: Fri Mar 22 19:56:55 2024 +0000
|
|
|
|
Replace deprecated use of AVCodecContext::reordered_opaque
|
|
|
|
We can use the AV_CODEC_FLAG_COPY_OPAQUE flag on the codec context
|
|
now to trigger timestamp propagation.
|
|
|
|
Bug: 330573128
|
|
Change-Id: I6bc57241a35ab5283742aad8d42acb4dc5e85858
|
|
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/5384308
|
|
Commit-Queue: Ted (Chromium) Meyer <tmathmeyer@chromium.org>
|
|
Reviewed-by: Dan Sanders <sandersd@chromium.org>
|
|
Cr-Commit-Position: refs/heads/main@{#1277051}
|
|
|
|
diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc
|
|
index bd75477feeabb..8a658a58caac5 100644
|
|
--- a/media/filters/ffmpeg_video_decoder.cc
|
|
+++ b/media/filters/ffmpeg_video_decoder.cc
|
|
@@ -134,7 +134,7 @@ bool FFmpegVideoDecoder::IsCodecSupported(VideoCodec codec) {
|
|
}
|
|
|
|
FFmpegVideoDecoder::FFmpegVideoDecoder(MediaLog* media_log)
|
|
- : media_log_(media_log) {
|
|
+ : media_log_(media_log), timestamp_map_(128) {
|
|
DVLOG(1) << __func__;
|
|
DETACH_FROM_SEQUENCE(sequence_checker_);
|
|
}
|
|
@@ -363,8 +363,10 @@ bool FFmpegVideoDecoder::FFmpegDecode(const DecoderBuffer& buffer) {
|
|
DCHECK(packet->data);
|
|
DCHECK_GT(packet->size, 0);
|
|
|
|
- // Let FFmpeg handle presentation timestamp reordering.
|
|
- codec_context_->reordered_opaque = buffer.timestamp().InMicroseconds();
|
|
+ const int64_t timestamp = buffer.timestamp().InMicroseconds();
|
|
+ const TimestampId timestamp_id = timestamp_id_generator_.GenerateNextId();
|
|
+ timestamp_map_.Put(std::make_pair(timestamp_id, timestamp));
|
|
+ packet->opaque = reinterpret_cast<void*>(timestamp_id.GetUnsafeValue());
|
|
}
|
|
FFmpegDecodingLoop::DecodeStatus decode_status = decoding_loop_->DecodePacket(
|
|
packet, base::BindRepeating(&FFmpegVideoDecoder::OnNewFrame,
|
|
@@ -423,7 +425,12 @@ bool FFmpegVideoDecoder::OnNewFrame(AVFrame* frame) {
|
|
}
|
|
gfx::Size natural_size = aspect_ratio.GetNaturalSize(visible_rect);
|
|
|
|
- const auto pts = base::Microseconds(frame->reordered_opaque);
|
|
+ const auto ts_id = TimestampId(reinterpret_cast<size_t>(frame->opaque));
|
|
+ const auto ts_lookup = timestamp_map_.Get(ts_id);
|
|
+ if (ts_lookup == timestamp_map_.end()) {
|
|
+ return false;
|
|
+ }
|
|
+ const auto pts = base::Microseconds(std::get<1>(*ts_lookup));
|
|
auto video_frame = VideoFrame::WrapExternalDataWithLayout(
|
|
opaque->layout, visible_rect, natural_size, opaque->data, opaque->size,
|
|
pts);
|
|
@@ -498,8 +505,10 @@ bool FFmpegVideoDecoder::ConfigureDecoder(const VideoDecoderConfig& config,
|
|
codec_context_->thread_count = GetFFmpegVideoDecoderThreadCount(config);
|
|
codec_context_->thread_type =
|
|
FF_THREAD_SLICE | (low_delay ? 0 : FF_THREAD_FRAME);
|
|
+
|
|
codec_context_->opaque = this;
|
|
codec_context_->get_buffer2 = GetVideoBufferImpl;
|
|
+ codec_context_->flags |= AV_CODEC_FLAG_COPY_OPAQUE;
|
|
|
|
if (base::FeatureList::IsEnabled(kFFmpegAllowLists)) {
|
|
// Note: FFmpeg will try to free this string, so we must duplicate it.
|
|
diff --git a/media/filters/ffmpeg_video_decoder.h b/media/filters/ffmpeg_video_decoder.h
|
|
index d02cb89c3ddf7..0a2de1c623fff 100644
|
|
--- a/media/filters/ffmpeg_video_decoder.h
|
|
+++ b/media/filters/ffmpeg_video_decoder.h
|
|
@@ -7,10 +7,12 @@
|
|
|
|
#include <memory>
|
|
|
|
+#include "base/containers/lru_cache.h"
|
|
#include "base/functional/callback.h"
|
|
#include "base/memory/raw_ptr.h"
|
|
#include "base/memory/scoped_refptr.h"
|
|
#include "base/sequence_checker.h"
|
|
+#include "base/types/id_type.h"
|
|
#include "media/base/supported_video_decoder_config.h"
|
|
#include "media/base/video_decoder.h"
|
|
#include "media/base/video_decoder_config.h"
|
|
@@ -87,6 +89,20 @@ class MEDIA_EXPORT FFmpegVideoDecoder : public VideoDecoder {
|
|
// FFmpeg structures owned by this object.
|
|
std::unique_ptr<AVCodecContext, ScopedPtrAVFreeContext> codec_context_;
|
|
|
|
+ // The gist here is that timestamps need to be 64 bits to store microsecond
|
|
+ // precision. A 32 bit integer would overflow at ~35 minutes at this level of
|
|
+ // precision. We can't cast the timestamp to the void ptr object used by the
|
|
+ // opaque field in ffmpeg then, because it would lose data on a 32 bit build.
|
|
+ // However, we don't actually have 2^31 timestamped frames in a single
|
|
+ // playback, so it's fine to use the 32 bit value as a key in a map which
|
|
+ // contains the actual timestamps. Additionally, we've in the past set 128
|
|
+ // outstanding frames for re-ordering as a limit for cross-thread decoding
|
|
+ // tasks, so we'll do that here too with the LRU cache.
|
|
+ using TimestampId = base::IdType<int64_t, size_t, 0>;
|
|
+
|
|
+ TimestampId::Generator timestamp_id_generator_;
|
|
+ base::LRUCache<TimestampId, int64_t> timestamp_map_;
|
|
+
|
|
VideoDecoderConfig config_;
|
|
|
|
scoped_refptr<FrameBufferPool> frame_pool_;
|
|
diff --git a/media/ffmpeg/ffmpeg_common.cc b/media/ffmpeg/ffmpeg_common.cc
|
|
index 3331581a6fee6..69539fd6594ec 100644
|
|
--- a/media/ffmpeg/ffmpeg_common.cc
|
|
+++ b/media/ffmpeg/ffmpeg_common.cc
|
|
@@ -404,7 +404,9 @@ bool AVCodecContextToAudioDecoderConfig(const AVCodecContext* codec_context,
|
|
|
|
// TODO(dalecurtis): Just use the profile from the codec context if ffmpeg
|
|
// ever starts supporting xHE-AAC.
|
|
- if (codec_context->profile == FF_PROFILE_UNKNOWN) {
|
|
+ constexpr uint8_t kXHEAAc = 41;
|
|
+ if (codec_context->profile == FF_PROFILE_UNKNOWN ||
|
|
+ codec_context->profile == kXHEAAc) {
|
|
// Errors aren't fatal here, so just drop any MediaLog messages.
|
|
NullMediaLog media_log;
|
|
mp4::AAC aac_parser;
|
|
diff --git a/media/ffmpeg/ffmpeg_regression_tests.cc b/media/ffmpeg/ffmpeg_regression_tests.cc
|
|
index 05dcb1cd62c75..866f446698947 100644
|
|
--- a/media/ffmpeg/ffmpeg_regression_tests.cc
|
|
+++ b/media/ffmpeg/ffmpeg_regression_tests.cc
|
|
@@ -90,16 +90,16 @@ FFMPEG_TEST_CASE(Cr62127,
|
|
PIPELINE_ERROR_DECODE,
|
|
PIPELINE_ERROR_DECODE);
|
|
FFMPEG_TEST_CASE(Cr93620, "security/93620.ogg", PIPELINE_OK, PIPELINE_OK);
|
|
-FFMPEG_TEST_CASE(Cr100492,
|
|
- "security/100492.webm",
|
|
- DECODER_ERROR_NOT_SUPPORTED,
|
|
- DECODER_ERROR_NOT_SUPPORTED);
|
|
+FFMPEG_TEST_CASE(Cr100492, "security/100492.webm", PIPELINE_OK, PIPELINE_OK);
|
|
FFMPEG_TEST_CASE(Cr100543, "security/100543.webm", PIPELINE_OK, PIPELINE_OK);
|
|
FFMPEG_TEST_CASE(Cr101458,
|
|
"security/101458.webm",
|
|
PIPELINE_ERROR_DECODE,
|
|
PIPELINE_ERROR_DECODE);
|
|
-FFMPEG_TEST_CASE(Cr108416, "security/108416.webm", PIPELINE_OK, PIPELINE_OK);
|
|
+FFMPEG_TEST_CASE(Cr108416,
|
|
+ "security/108416.webm",
|
|
+ PIPELINE_ERROR_DECODE,
|
|
+ PIPELINE_ERROR_DECODE);
|
|
FFMPEG_TEST_CASE(Cr110849,
|
|
"security/110849.mkv",
|
|
DEMUXER_ERROR_COULD_NOT_OPEN,
|
|
@@ -154,7 +154,10 @@ FFMPEG_TEST_CASE(Cr234630b,
|
|
"security/234630b.mov",
|
|
DEMUXER_ERROR_NO_SUPPORTED_STREAMS,
|
|
DEMUXER_ERROR_NO_SUPPORTED_STREAMS);
|
|
-FFMPEG_TEST_CASE(Cr242786, "security/242786.webm", PIPELINE_OK, PIPELINE_OK);
|
|
+FFMPEG_TEST_CASE(Cr242786,
|
|
+ "security/242786.webm",
|
|
+ PIPELINE_OK,
|
|
+ PIPELINE_ERROR_DECODE);
|
|
// Test for out-of-bounds access with slightly corrupt file (detection logic
|
|
// thinks it's a MONO file, but actually contains STEREO audio).
|
|
FFMPEG_TEST_CASE(Cr275590,
|
|
@@ -372,8 +375,8 @@ FFMPEG_TEST_CASE(WEBM_2,
|
|
DEMUXER_ERROR_NO_SUPPORTED_STREAMS);
|
|
FFMPEG_TEST_CASE(WEBM_4,
|
|
"security/out.webm.68798.1929",
|
|
- DECODER_ERROR_NOT_SUPPORTED,
|
|
- DECODER_ERROR_NOT_SUPPORTED);
|
|
+ PIPELINE_OK,
|
|
+ PIPELINE_OK);
|
|
FFMPEG_TEST_CASE(WEBM_5, "frame_size_change.webm", PIPELINE_OK, PIPELINE_OK);
|
|
|
|
// General MKV test cases.
|
|
diff --git a/media/filters/audio_decoder_unittest.cc b/media/filters/audio_decoder_unittest.cc
|
|
index a31823cfe3b58..e43f408b79e5c 100644
|
|
--- a/media/filters/audio_decoder_unittest.cc
|
|
+++ b/media/filters/audio_decoder_unittest.cc
|
|
@@ -484,7 +484,7 @@ constexpr TestParams kXheAacTestParams[] = {
|
|
}},
|
|
0,
|
|
29400,
|
|
- CHANNEL_LAYOUT_MONO,
|
|
+ CHANNEL_LAYOUT_UNSUPPORTED,
|
|
AudioCodecProfile::kXHE_AAC},
|
|
#endif
|
|
{AudioCodec::kAAC,
|
|
diff --git a/media/filters/audio_file_reader_unittest.cc b/media/filters/audio_file_reader_unittest.cc
|
|
index c0cc568d63019..edf9470f2f8b3 100644
|
|
--- a/media/filters/audio_file_reader_unittest.cc
|
|
+++ b/media/filters/audio_file_reader_unittest.cc
|
|
@@ -62,15 +62,14 @@ class AudioFileReaderTest : public testing::Test {
|
|
// Verify packets are consistent across demuxer runs. Reads the first few
|
|
// packets and then seeks back to the start timestamp and verifies that the
|
|
// hashes match on the packets just read.
|
|
- void VerifyPackets() {
|
|
- const int kReads = 3;
|
|
+ void VerifyPackets(int packet_reads) {
|
|
const int kTestPasses = 2;
|
|
|
|
AVPacket packet;
|
|
base::TimeDelta start_timestamp;
|
|
std::vector<std::string> packet_md5_hashes_;
|
|
for (int i = 0; i < kTestPasses; ++i) {
|
|
- for (int j = 0; j < kReads; ++j) {
|
|
+ for (int j = 0; j < packet_reads; ++j) {
|
|
ASSERT_TRUE(reader_->ReadPacketForTesting(&packet));
|
|
|
|
// On the first pass save the MD5 hash of each packet, on subsequent
|
|
@@ -99,7 +98,8 @@ class AudioFileReaderTest : public testing::Test {
|
|
int sample_rate,
|
|
base::TimeDelta duration,
|
|
int frames,
|
|
- int expected_frames) {
|
|
+ int expected_frames,
|
|
+ int packet_reads = 3) {
|
|
Initialize(fn);
|
|
ASSERT_TRUE(reader_->Open());
|
|
EXPECT_EQ(channels, reader_->channels());
|
|
@@ -113,7 +113,7 @@ class AudioFileReaderTest : public testing::Test {
|
|
EXPECT_EQ(reader_->HasKnownDuration(), false);
|
|
}
|
|
if (!packet_verification_disabled_)
|
|
- ASSERT_NO_FATAL_FAILURE(VerifyPackets());
|
|
+ ASSERT_NO_FATAL_FAILURE(VerifyPackets(packet_reads));
|
|
ReadAndVerify(hash, expected_frames);
|
|
}
|
|
|
|
@@ -220,7 +220,7 @@ TEST_F(AudioFileReaderTest, AAC_ADTS) {
|
|
}
|
|
|
|
TEST_F(AudioFileReaderTest, MidStreamConfigChangesFail) {
|
|
- RunTestFailingDecode("midstream_config_change.mp3", 42624);
|
|
+ RunTestFailingDecode("midstream_config_change.mp3", 0);
|
|
}
|
|
#endif
|
|
|
|
@@ -230,7 +230,7 @@ TEST_F(AudioFileReaderTest, VorbisInvalidChannelLayout) {
|
|
|
|
TEST_F(AudioFileReaderTest, WaveValidFourChannelLayout) {
|
|
RunTest("4ch.wav", "131.71,38.02,130.31,44.89,135.98,42.52,", 4, 44100,
|
|
- base::Microseconds(100001), 4411, 4410);
|
|
+ base::Microseconds(100001), 4411, 4410, /*packet_reads=*/2);
|
|
}
|
|
|
|
TEST_F(AudioFileReaderTest, ReadPartialMP3) {
|
|
diff --git a/media/filters/ffmpeg_video_decoder.cc b/media/filters/ffmpeg_video_decoder.cc
|
|
index 8a658a58caac5..9d6ed8aeb5c48 100644
|
|
--- a/media/filters/ffmpeg_video_decoder.cc
|
|
+++ b/media/filters/ffmpeg_video_decoder.cc
|
|
@@ -213,10 +213,6 @@ int FFmpegVideoDecoder::GetVideoBuffer(struct AVCodecContext* codec_context,
|
|
frame->linesize[plane] = layout->planes()[plane].stride;
|
|
}
|
|
|
|
- // This seems unsafe, given threaded decoding. However, `reordered_opaque` is
|
|
- // also going away upstream, so we need a whole new mechanism either way.
|
|
- frame->reordered_opaque = codec_context->reordered_opaque;
|
|
-
|
|
// This will be freed by `ReleaseVideoBufferImpl`.
|
|
auto* opaque = new OpaqueData(fb_priv, frame_pool_, data, allocation_size,
|
|
std::move(*layout));
|
|
diff --git a/media/filters/audio_file_reader.cc b/media/filters/audio_file_reader.cc
|
|
index e1be5aa9a5b13..951c003956fb5 100644
|
|
--- a/media/filters/audio_file_reader.cc
|
|
+++ b/media/filters/audio_file_reader.cc
|
|
@@ -243,18 +243,10 @@ bool AudioFileReader::OnNewFrame(
|
|
// silence from being output. In the case where we are also discarding some
|
|
// portion of the packet (as indicated by a negative pts), we further want to
|
|
// adjust the duration downward by however much exists before zero.
|
|
-#if BUILDFLAG(USE_SYSTEM_FFMPEG)
|
|
- if (audio_codec_ == AudioCodec::kAAC && frame->pkt_duration) {
|
|
-#else
|
|
if (audio_codec_ == AudioCodec::kAAC && frame->duration) {
|
|
-#endif // BUILDFLAG(USE_SYSTEM_FFMPEG)
|
|
const base::TimeDelta pkt_duration = ConvertFromTimeBase(
|
|
glue_->format_context()->streams[stream_index_]->time_base,
|
|
-#if BUILDFLAG(USE_SYSTEM_FFMPEG)
|
|
- frame->pkt_duration + std::min(static_cast<int64_t>(0), frame->pts));
|
|
-#else
|
|
frame->duration + std::min(static_cast<int64_t>(0), frame->pts));
|
|
-#endif // BUILDFLAG(USE_SYSTEM_FFMPEG)
|
|
const base::TimeDelta frame_duration =
|
|
base::Seconds(frames_read / static_cast<double>(sample_rate_));
|
|
|
|
diff --git a/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc b/chromium/third_party/webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc
|
|
index c6446c2..805b95b 100644
|
|
--- a/third_party/webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc
|
|
+++ b/third_party/webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.cc
|
|
@@ -233,7 +233,6 @@
|
|
int total_size = y_size + 2 * uv_size;
|
|
|
|
av_frame->format = context->pix_fmt;
|
|
- av_frame->reordered_opaque = context->reordered_opaque;
|
|
|
|
// Create a VideoFrame object, to keep a reference to the buffer.
|
|
// TODO(nisse): The VideoFrame's timestamp and rotation info is not used.
|
|
@@ -381,8 +380,6 @@
|
|
return WEBRTC_VIDEO_CODEC_ERROR;
|
|
}
|
|
packet->size = static_cast<int>(input_image.size());
|
|
- int64_t frame_timestamp_us = input_image.ntp_time_ms_ * 1000; // ms -> μs
|
|
- av_context_->reordered_opaque = frame_timestamp_us;
|
|
|
|
int result = avcodec_send_packet(av_context_.get(), packet.get());
|
|
|
|
@@ -399,10 +396,6 @@
|
|
return WEBRTC_VIDEO_CODEC_ERROR;
|
|
}
|
|
|
|
- // We don't expect reordering. Decoded frame timestamp should match
|
|
- // the input one.
|
|
- RTC_DCHECK_EQ(av_frame_->reordered_opaque, frame_timestamp_us);
|
|
-
|
|
// TODO(sakal): Maybe it is possible to get QP directly from FFmpeg.
|
|
h264_bitstream_parser_.ParseBitstream(input_image);
|
|
absl::optional<int> qp = h264_bitstream_parser_.GetLastSliceQp();
|