aboutsummaryrefslogtreecommitdiff
path: root/multimedia/ffmpeg/files
diff options
context:
space:
mode:
authorJan Beich <jbeich@FreeBSD.org>2020-06-16 12:56:44 +0000
committerJan Beich <jbeich@FreeBSD.org>2020-06-16 12:56:44 +0000
commitbf95f74ad400e5bfbd482146f938005d120ded05 (patch)
treec8a3ecb489c81ad5a16f205b34c12d52712c9a2a /multimedia/ffmpeg/files
parent92495a7937f6adbf29bfd42dfe6f89b3fd22b116 (diff)
downloadports-bf95f74ad400e5bfbd482146f938005d120ded05.tar.gz
ports-bf95f74ad400e5bfbd482146f938005d120ded05.zip
multimedia/ffmpeg: update to 4.3
New defaults (using existing dependencies): - Enable DRM for VAAPI-capable capture on KMS console, Wayland, X11 - Enable LIBXML2 by default for DASH demuxing support [1] Changes: https://git.ffmpeg.org/gitweb/ffmpeg.git/blob/n4.3:/Changelog ABI: https://abi-laboratory.pro/tracker/timeline/ffmpeg/ Requested by: Tal Al <tad@vif.com> (via private mail) [1] Reported by: portscout
Notes
Notes: svn path=/head/; revision=539350
Diffstat (limited to 'multimedia/ffmpeg/files')
-rw-r--r--multimedia/ffmpeg/files/patch-configure24
-rw-r--r--multimedia/ffmpeg/files/patch-libavcodec_allcodecs.c20
-rw-r--r--multimedia/ffmpeg/files/patch-libavcodec_libsvt__vp9.c14
-rw-r--r--multimedia/ffmpeg/files/patch-libswscale_ppc_swscale__altivec.c55
-rw-r--r--multimedia/ffmpeg/files/patch-rav1e723
-rw-r--r--multimedia/ffmpeg/files/patch-vdpau_vp9330
6 files changed, 16 insertions, 1150 deletions
diff --git a/multimedia/ffmpeg/files/patch-configure b/multimedia/ffmpeg/files/patch-configure
index 8cb95306b522..c801515bbac4 100644
--- a/multimedia/ffmpeg/files/patch-configure
+++ b/multimedia/ffmpeg/files/patch-configure
@@ -1,6 +1,6 @@
---- configure.orig 2018-04-20 10:02:55 UTC
+--- configure.orig 2020-06-15 18:54:23 UTC
+++ configure
-@@ -3516,13 +3516,7 @@ target_os_default=$(tolower $(uname -s))
+@@ -3744,13 +3744,7 @@ target_os_default=$(tolower $(uname -s))
host_os=$target_os_default
# machine
@@ -15,23 +15,3 @@
cpu="generic"
intrinsics="none"
-@@ -6122,6 +6117,7 @@
- die "ERROR: OpenMAX IL headers not found"; } && enable omx
- enabled openssl && { check_pkg_config openssl openssl openssl/ssl.h OPENSSL_init_ssl ||
- check_pkg_config openssl openssl openssl/ssl.h SSL_library_init ||
-+ check_lib openssl openssl/ssl.h OPENSSL_init_ssl -lssl -lcrypto ||
- check_lib openssl openssl/ssl.h SSL_library_init -lssl -lcrypto ||
- check_lib openssl openssl/ssl.h SSL_library_init -lssl32 -leay32 ||
- check_lib openssl openssl/ssl.h SSL_library_init -lssl -lcrypto -lws2_32 -lgdi32 ||
-@@ -6234,8 +6229,10 @@ else
- EOF
- fi
-
--enabled alsa && check_pkg_config alsa alsa "alsa/asoundlib.h" snd_pcm_htimestamp ||
-+if enabled alsa; then
-+ check_pkg_config alsa alsa "alsa/asoundlib.h" snd_pcm_htimestamp ||
- check_lib alsa alsa/asoundlib.h snd_pcm_htimestamp -lasound
-+fi
-
- enabled libjack &&
- require_pkg_config libjack jack jack/jack.h jack_port_get_latency_range
diff --git a/multimedia/ffmpeg/files/patch-libavcodec_allcodecs.c b/multimedia/ffmpeg/files/patch-libavcodec_allcodecs.c
deleted file mode 100644
index 0e27547c26a1..000000000000
--- a/multimedia/ffmpeg/files/patch-libavcodec_allcodecs.c
+++ /dev/null
@@ -1,20 +0,0 @@
-https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/dc0806dd2588
-
---- libavcodec/allcodecs.c.orig 2018-11-05 23:22:25 UTC
-+++ libavcodec/allcodecs.c
-@@ -679,7 +679,6 @@ extern AVCodec ff_pcm_mulaw_at_encoder;
- extern AVCodec ff_pcm_mulaw_at_decoder;
- extern AVCodec ff_qdmc_at_decoder;
- extern AVCodec ff_qdm2_at_decoder;
--extern AVCodec ff_libaom_av1_decoder;
- extern AVCodec ff_libaom_av1_encoder;
- extern AVCodec ff_libaribb24_decoder;
- extern AVCodec ff_libcelt_decoder;
-@@ -738,6 +737,7 @@ extern AVCodec ff_idf_decoder;
- /* external libraries, that shouldn't be used by default if one of the
- * above is available */
- extern AVCodec ff_h263_v4l2m2m_encoder;
-+extern AVCodec ff_libaom_av1_decoder;
- extern AVCodec ff_libopenh264_encoder;
- extern AVCodec ff_libopenh264_decoder;
- extern AVCodec ff_h264_amf_encoder;
diff --git a/multimedia/ffmpeg/files/patch-libavcodec_libsvt__vp9.c b/multimedia/ffmpeg/files/patch-libavcodec_libsvt__vp9.c
new file mode 100644
index 000000000000..cb79b38ee4de
--- /dev/null
+++ b/multimedia/ffmpeg/files/patch-libavcodec_libsvt__vp9.c
@@ -0,0 +1,14 @@
+https://github.com/OpenVisualCloud/SVT-VP9/issues/116
+
+--- libavcodec/libsvt_vp9.c.orig 2020-06-15 18:54:23 UTC
++++ libavcodec/libsvt_vp9.c
+@@ -188,9 +188,6 @@ static int config_enc_params(EbSvtVp9EncConfiguration
+ param->min_qp_allowed = avctx->qmin;
+ }
+
+- param->intra_refresh_type =
+- !!(avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) + 1;
+-
+ if (ten_bits) {
+ param->encoder_bit_depth = 10;
+ }
diff --git a/multimedia/ffmpeg/files/patch-libswscale_ppc_swscale__altivec.c b/multimedia/ffmpeg/files/patch-libswscale_ppc_swscale__altivec.c
deleted file mode 100644
index e4c1c55c503b..000000000000
--- a/multimedia/ffmpeg/files/patch-libswscale_ppc_swscale__altivec.c
+++ /dev/null
@@ -1,55 +0,0 @@
---- libswscale/ppc/swscale_altivec.c.orig 2019-08-11 20:06:32 UTC
-+++ libswscale/ppc/swscale_altivec.c
-@@ -153,13 +153,13 @@ static void yuv2plane1_float_altivec(const int32_t *sr
- const int add = (1 << (shift - 1));
- const int clip = (1 << 16) - 1;
- const float fmult = 1.0f / 65535.0f;
-- const vector uint32_t vadd = (vector uint32_t) {add, add, add, add};
-- const vector uint32_t vshift = (vector uint32_t) vec_splat_u32(shift);
-- const vector uint32_t vlargest = (vector uint32_t) {clip, clip, clip, clip};
-- const vector float vmul = (vector float) {fmult, fmult, fmult, fmult};
-- const vector float vzero = (vector float) {0, 0, 0, 0};
-- vector uint32_t v;
-- vector float vd;
-+ const vec_u32 vadd = (vec_u32) {add, add, add, add};
-+ const vec_u32 vshift = (vec_u32) vec_splat_u32(shift);
-+ const vec_u32 vlargest = (vec_u32) {clip, clip, clip, clip};
-+ const vec_f vmul = (vec_f) {fmult, fmult, fmult, fmult};
-+ const vec_f vzero = (vec_f) {0, 0, 0, 0};
-+ vec_u32 v;
-+ vec_f vd;
- int i;
-
- yuv2plane1_float_u(src, dest, dst_u, 0);
-@@ -186,14 +186,14 @@ static void yuv2plane1_float_bswap_altivec(const int32
- const int add = (1 << (shift - 1));
- const int clip = (1 << 16) - 1;
- const float fmult = 1.0f / 65535.0f;
-- const vector uint32_t vadd = (vector uint32_t) {add, add, add, add};
-- const vector uint32_t vshift = (vector uint32_t) vec_splat_u32(shift);
-- const vector uint32_t vlargest = (vector uint32_t) {clip, clip, clip, clip};
-+ const vec_u32 vadd = (vec_u32) {add, add, add, add};
-+ const vec_u32 vshift = (vec_u32) vec_splat_u32(shift);
-+ const vec_u32 vlargest = (vec_u32) {clip, clip, clip, clip};
- const vector float vmul = (vector float) {fmult, fmult, fmult, fmult};
- const vector float vzero = (vector float) {0, 0, 0, 0};
-- const vector uint32_t vswapbig = (vector uint32_t) {16, 16, 16, 16};
-- const vector uint16_t vswapsmall = vec_splat_u16(8);
-- vector uint32_t v;
-+ const vec_u32 vswapbig = (vec_u32) {16, 16, 16, 16};
-+ const vec_u16 vswapsmall = vec_splat_u16(8);
-+ vec_u32 v;
- vector float vd;
- int i;
-
-@@ -208,8 +208,8 @@ static void yuv2plane1_float_bswap_altivec(const int32
- vd = vec_ctf(v, 0);
- vd = vec_madd(vd, vmul, vzero);
-
-- vd = (vector float) vec_rl((vector uint32_t) vd, vswapbig);
-- vd = (vector float) vec_rl((vector uint16_t) vd, vswapsmall);
-+ vd = (vector float) vec_rl((vec_u32) vd, vswapbig);
-+ vd = (vector float) vec_rl((vec_u16) vd, vswapsmall);
-
- vec_st(vd, 0, (float *) &dest[i]);
- }
diff --git a/multimedia/ffmpeg/files/patch-rav1e b/multimedia/ffmpeg/files/patch-rav1e
deleted file mode 100644
index 9a5d28399b9e..000000000000
--- a/multimedia/ffmpeg/files/patch-rav1e
+++ /dev/null
@@ -1,723 +0,0 @@
-https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/d8bf24459b69
-https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/3a84081cbd98
-https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/1354c39c78e5
-
---- configure.orig 2019-11-11 11:47:47 UTC
-+++ configure
-@@ -254,6 +254,7 @@ External library support:
- --enable-libopenmpt enable decoding tracked files via libopenmpt [no]
- --enable-libopus enable Opus de/encoding via libopus [no]
- --enable-libpulse enable Pulseaudio input via libpulse [no]
-+ --enable-librav1e enable AV1 encoding via rav1e [no]
- --enable-librsvg enable SVG rasterization via librsvg [no]
- --enable-librubberband enable rubberband needed for rubberband filter [no]
- --enable-librtmp enable RTMP[E] support via librtmp [no]
-@@ -1785,6 +1786,7 @@ EXTERNAL_LIBRARY_LIST="
- libopenmpt
- libopus
- libpulse
-+ librav1e
- librsvg
- librtmp
- libshine
-@@ -3187,6 +3189,8 @@ libopenmpt_demuxer_deps="libopenmpt"
- libopus_decoder_deps="libopus"
- libopus_encoder_deps="libopus"
- libopus_encoder_select="audio_frame_queue"
-+librav1e_encoder_deps="librav1e"
-+librav1e_encoder_select="extract_extradata_bsf"
- librsvg_decoder_deps="librsvg"
- libshine_encoder_deps="libshine"
- libshine_encoder_select="audio_frame_queue"
-@@ -6255,6 +6259,7 @@ enabled libopus && {
- }
- }
- enabled libpulse && require_pkg_config libpulse libpulse pulse/pulseaudio.h pa_context_new
-+enabled librav1e && require_pkg_config librav1e "rav1e >= 0.1.0" rav1e.h rav1e_context_new
- enabled librsvg && require_pkg_config librsvg librsvg-2.0 librsvg-2.0/librsvg/rsvg.h rsvg_handle_render_cairo
- enabled librtmp && require_pkg_config librtmp librtmp librtmp/rtmp.h RTMP_Socket
- enabled librubberband && require_pkg_config librubberband "rubberband >= 1.8.1" rubberband/rubberband-c.h rubberband_new -lstdc++ && append librubberband_extralibs "-lstdc++"
---- doc/encoders.texi.orig 2019-08-05 20:52:21 UTC
-+++ doc/encoders.texi
-@@ -1378,6 +1378,49 @@ makes it possible to store non-rgb pix_fmts.
-
- @end table
-
-+@section librav1e
-+
-+rav1e AV1 encoder wrapper.
-+
-+Requires the presence of the rav1e headers and library during configuration.
-+You need to explicitly configure the build with @code{--enable-librav1e}.
-+
-+@subsection Options
-+
-+@table @option
-+@item qmax
-+Sets the maximum quantizer to use when using bitrate mode.
-+
-+@item qmin
-+Sets the minimum quantizer to use when using bitrate mode.
-+
-+@item qp
-+Uses quantizer mode to encode at the given quantizer.
-+
-+@item speed
-+Selects the speed preset (0-10) to encode with.
-+
-+@item tiles
-+Selects how many tiles to encode with.
-+
-+@item tile-rows
-+Selects how many rows of tiles to encode with.
-+
-+@item tile-columns
-+Selects how many columns of tiles to encode with.
-+
-+@item rav1e-params
-+Set rav1e options using a list of @var{key}=@var{value} pairs separated
-+by ":". See @command{rav1e --help} for a list of options.
-+
-+For example to specify librav1e encoding options with @option{-rav1e-params}:
-+
-+@example
-+ffmpeg -i input -c:v librav1e -b:v 500K -rav1e-params speed=5:low_latency=true output.mp4
-+@end example
-+
-+@end table
-+
- @section libaom-av1
-
- libaom AV1 encoder wrapper.
---- doc/general.texi.orig 2019-08-05 20:52:21 UTC
-+++ doc/general.texi
-@@ -243,6 +243,13 @@ FFmpeg can use the OpenJPEG libraries for decoding/enc
- instructions. To enable using OpenJPEG in FFmpeg, pass @code{--enable-libopenjpeg} to
- @file{./configure}.
-
-+@section rav1e
-+
-+FFmpeg can make use of rav1e (Rust AV1 Encoder) via its C bindings to encode videos.
-+Go to @url{https://github.com/xiph/rav1e/} and follow the instructions to build
-+the C library. To enable using rav1e in FFmpeg, pass @code{--enable-librav1e}
-+to @file{./configure}.
-+
- @section TwoLAME
-
- FFmpeg can make use of the TwoLAME library for MP2 encoding.
---- libavcodec/Makefile.orig 2019-11-11 11:47:47 UTC
-+++ libavcodec/Makefile
-@@ -988,6 +988,7 @@ OBJS-$(CONFIG_LIBOPUS_DECODER) += libopusde
- vorbis_data.o
- OBJS-$(CONFIG_LIBOPUS_ENCODER) += libopusenc.o libopus.o \
- vorbis_data.o
-+OBJS-$(CONFIG_LIBRAV1E_ENCODER) += librav1e.o
- OBJS-$(CONFIG_LIBSHINE_ENCODER) += libshine.o
- OBJS-$(CONFIG_LIBSPEEX_DECODER) += libspeexdec.o
- OBJS-$(CONFIG_LIBSPEEX_ENCODER) += libspeexenc.o
---- libavcodec/allcodecs.c.orig 2019-11-11 11:47:47 UTC
-+++ libavcodec/allcodecs.c
-@@ -703,6 +703,7 @@ extern AVCodec ff_libopenjpeg_encoder;
- extern AVCodec ff_libopenjpeg_decoder;
- extern AVCodec ff_libopus_encoder;
- extern AVCodec ff_libopus_decoder;
-+extern AVCodec ff_librav1e_encoder;
- extern AVCodec ff_librsvg_decoder;
- extern AVCodec ff_libshine_encoder;
- extern AVCodec ff_libspeex_encoder;
---- libavcodec/librav1e.c.orig 2019-11-11 11:47:47 UTC
-+++ libavcodec/librav1e.c
-@@ -0,0 +1,593 @@
-+/*
-+ * librav1e encoder
-+ *
-+ * Copyright (c) 2019 Derek Buitenhuis
-+ *
-+ * This file is part of FFmpeg.
-+ *
-+ * FFmpeg is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; either
-+ * version 2.1 of the License, or (at your option) any later version.
-+ *
-+ * FFmpeg is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with FFmpeg; if not, write to the Free Software
-+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <rav1e.h>
-+
-+#include "libavutil/internal.h"
-+#include "libavutil/avassert.h"
-+#include "libavutil/base64.h"
-+#include "libavutil/common.h"
-+#include "libavutil/mathematics.h"
-+#include "libavutil/opt.h"
-+#include "libavutil/pixdesc.h"
-+#include "avcodec.h"
-+#include "internal.h"
-+
-+typedef struct librav1eContext {
-+ const AVClass *class;
-+
-+ RaContext *ctx;
-+ AVBSFContext *bsf;
-+
-+ uint8_t *pass_data;
-+ size_t pass_pos;
-+ int pass_size;
-+
-+ char *rav1e_opts;
-+ int quantizer;
-+ int speed;
-+ int tiles;
-+ int tile_rows;
-+ int tile_cols;
-+} librav1eContext;
-+
-+static inline RaPixelRange range_map(enum AVPixelFormat pix_fmt, enum AVColorRange range)
-+{
-+ switch (pix_fmt) {
-+ case AV_PIX_FMT_YUVJ420P:
-+ case AV_PIX_FMT_YUVJ422P:
-+ case AV_PIX_FMT_YUVJ444P:
-+ return RA_PIXEL_RANGE_FULL;
-+ }
-+
-+ switch (range) {
-+ case AVCOL_RANGE_JPEG:
-+ return RA_PIXEL_RANGE_FULL;
-+ case AVCOL_RANGE_MPEG:
-+ default:
-+ return RA_PIXEL_RANGE_LIMITED;
-+ }
-+}
-+
-+static inline RaChromaSampling pix_fmt_map(enum AVPixelFormat pix_fmt)
-+{
-+ switch (pix_fmt) {
-+ case AV_PIX_FMT_YUV420P:
-+ case AV_PIX_FMT_YUVJ420P:
-+ case AV_PIX_FMT_YUV420P10:
-+ case AV_PIX_FMT_YUV420P12:
-+ return RA_CHROMA_SAMPLING_CS420;
-+ case AV_PIX_FMT_YUV422P:
-+ case AV_PIX_FMT_YUVJ422P:
-+ case AV_PIX_FMT_YUV422P10:
-+ case AV_PIX_FMT_YUV422P12:
-+ return RA_CHROMA_SAMPLING_CS422;
-+ case AV_PIX_FMT_YUV444P:
-+ case AV_PIX_FMT_YUVJ444P:
-+ case AV_PIX_FMT_YUV444P10:
-+ case AV_PIX_FMT_YUV444P12:
-+ return RA_CHROMA_SAMPLING_CS444;
-+ default:
-+ av_assert0(0);
-+ }
-+}
-+
-+static inline RaChromaSamplePosition chroma_loc_map(enum AVChromaLocation chroma_loc)
-+{
-+ switch (chroma_loc) {
-+ case AVCHROMA_LOC_LEFT:
-+ return RA_CHROMA_SAMPLE_POSITION_VERTICAL;
-+ case AVCHROMA_LOC_TOPLEFT:
-+ return RA_CHROMA_SAMPLE_POSITION_COLOCATED;
-+ default:
-+ return RA_CHROMA_SAMPLE_POSITION_UNKNOWN;
-+ }
-+}
-+
-+static int get_stats(AVCodecContext *avctx, int eos)
-+{
-+ librav1eContext *ctx = avctx->priv_data;
-+ RaData* buf = rav1e_twopass_out(ctx->ctx);
-+ if (!buf)
-+ return 0;
-+
-+ if (!eos) {
-+ uint8_t *tmp = av_fast_realloc(ctx->pass_data, &ctx->pass_size,
-+ ctx->pass_pos + buf->len);
-+ if (!tmp) {
-+ rav1e_data_unref(buf);
-+ return AVERROR(ENOMEM);
-+ }
-+
-+ ctx->pass_data = tmp;
-+ memcpy(ctx->pass_data + ctx->pass_pos, buf->data, buf->len);
-+ ctx->pass_pos += buf->len;
-+ } else {
-+ size_t b64_size = AV_BASE64_SIZE(ctx->pass_pos);
-+
-+ memcpy(ctx->pass_data, buf->data, buf->len);
-+
-+ avctx->stats_out = av_malloc(b64_size);
-+ if (!avctx->stats_out) {
-+ rav1e_data_unref(buf);
-+ return AVERROR(ENOMEM);
-+ }
-+
-+ av_base64_encode(avctx->stats_out, b64_size, ctx->pass_data, ctx->pass_pos);
-+
-+ av_freep(&ctx->pass_data);
-+ }
-+
-+ rav1e_data_unref(buf);
-+
-+ return 0;
-+}
-+
-+static int set_stats(AVCodecContext *avctx)
-+{
-+ librav1eContext *ctx = avctx->priv_data;
-+ int ret = 1;
-+
-+ while (ret > 0 && ctx->pass_size - ctx->pass_pos > 0) {
-+ ret = rav1e_twopass_in(ctx->ctx, ctx->pass_data + ctx->pass_pos, ctx->pass_size);
-+ if (ret < 0)
-+ return AVERROR_EXTERNAL;
-+ ctx->pass_pos += ret;
-+ }
-+
-+ return 0;
-+}
-+
-+static av_cold int librav1e_encode_close(AVCodecContext *avctx)
-+{
-+ librav1eContext *ctx = avctx->priv_data;
-+
-+ if (ctx->ctx) {
-+ rav1e_context_unref(ctx->ctx);
-+ ctx->ctx = NULL;
-+ }
-+
-+ av_bsf_free(&ctx->bsf);
-+ av_freep(&ctx->pass_data);
-+
-+ return 0;
-+}
-+
-+static av_cold int librav1e_encode_init(AVCodecContext *avctx)
-+{
-+ librav1eContext *ctx = avctx->priv_data;
-+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
-+ RaConfig *cfg = NULL;
-+ int rret;
-+ int ret = 0;
-+
-+ cfg = rav1e_config_default();
-+ if (!cfg) {
-+ av_log(avctx, AV_LOG_ERROR, "Could not allocate rav1e config.\n");
-+ return AVERROR_EXTERNAL;
-+ }
-+
-+ rav1e_config_set_time_base(cfg, (RaRational) {
-+ avctx->time_base.num * avctx->ticks_per_frame,
-+ avctx->time_base.den
-+ });
-+
-+ if (avctx->flags & AV_CODEC_FLAG_PASS2) {
-+ if (!avctx->stats_in) {
-+ av_log(avctx, AV_LOG_ERROR, "No stats file provided for second pass.\n");
-+ ret = AVERROR(EINVAL);
-+ goto end;
-+ }
-+
-+ ctx->pass_size = (strlen(avctx->stats_in) * 3) / 4;
-+ ctx->pass_data = av_malloc(ctx->pass_size);
-+ if (!ctx->pass_data) {
-+ av_log(avctx, AV_LOG_ERROR, "Could not allocate stats buffer.\n");
-+ ret = AVERROR(ENOMEM);
-+ goto end;
-+ }
-+
-+ ctx->pass_size = av_base64_decode(ctx->pass_data, avctx->stats_in, ctx->pass_size);
-+ if (ctx->pass_size < 0) {
-+ av_log(avctx, AV_LOG_ERROR, "Invalid pass file.\n");
-+ ret = AVERROR(EINVAL);
-+ goto end;
-+ }
-+ }
-+
-+ if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
-+ const AVBitStreamFilter *filter = av_bsf_get_by_name("extract_extradata");
-+ int bret;
-+
-+ if (!filter) {
-+ av_log(avctx, AV_LOG_ERROR, "extract_extradata bitstream filter "
-+ "not found. This is a bug, please report it.\n");
-+ ret = AVERROR_BUG;
-+ goto end;
-+ }
-+
-+ bret = av_bsf_alloc(filter, &ctx->bsf);
-+ if (bret < 0) {
-+ ret = bret;
-+ goto end;
-+ }
-+
-+ bret = avcodec_parameters_from_context(ctx->bsf->par_in, avctx);
-+ if (bret < 0) {
-+ ret = bret;
-+ goto end;
-+ }
-+
-+ bret = av_bsf_init(ctx->bsf);
-+ if (bret < 0) {
-+ ret = bret;
-+ goto end;
-+ }
-+ }
-+
-+ if (ctx->rav1e_opts) {
-+ AVDictionary *dict = NULL;
-+ AVDictionaryEntry *en = NULL;
-+
-+ if (!av_dict_parse_string(&dict, ctx->rav1e_opts, "=", ":", 0)) {
-+ while (en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX)) {
-+ int parse_ret = rav1e_config_parse(cfg, en->key, en->value);
-+ if (parse_ret < 0)
-+ av_log(avctx, AV_LOG_WARNING, "Invalid value for %s: %s.\n", en->key, en->value);
-+ }
-+ av_dict_free(&dict);
-+ }
-+ }
-+
-+ rret = rav1e_config_parse_int(cfg, "width", avctx->width);
-+ if (rret < 0) {
-+ av_log(avctx, AV_LOG_ERROR, "Invalid width passed to rav1e.\n");
-+ ret = AVERROR_INVALIDDATA;
-+ goto end;
-+ }
-+
-+ rret = rav1e_config_parse_int(cfg, "height", avctx->height);
-+ if (rret < 0) {
-+ av_log(avctx, AV_LOG_ERROR, "Invalid height passed to rav1e.\n");
-+ ret = AVERROR_INVALIDDATA;
-+ goto end;
-+ }
-+
-+ rret = rav1e_config_parse_int(cfg, "threads", avctx->thread_count);
-+ if (rret < 0)
-+ av_log(avctx, AV_LOG_WARNING, "Invalid number of threads, defaulting to auto.\n");
-+
-+ if (ctx->speed >= 0) {
-+ rret = rav1e_config_parse_int(cfg, "speed", ctx->speed);
-+ if (rret < 0) {
-+ av_log(avctx, AV_LOG_ERROR, "Could not set speed preset.\n");
-+ ret = AVERROR_EXTERNAL;
-+ goto end;
-+ }
-+ }
-+
-+ /* rav1e handles precedence between 'tiles' and cols/rows for us. */
-+ if (ctx->tiles > 0) {
-+ rret = rav1e_config_parse_int(cfg, "tiles", ctx->tiles);
-+ if (rret < 0) {
-+ av_log(avctx, AV_LOG_ERROR, "Could not set number of tiles to encode with.\n");
-+ ret = AVERROR_EXTERNAL;
-+ goto end;
-+ }
-+ }
-+ if (ctx->tile_rows > 0) {
-+ rret = rav1e_config_parse_int(cfg, "tile_rows", ctx->tile_rows);
-+ if (rret < 0) {
-+ av_log(avctx, AV_LOG_ERROR, "Could not set number of tile rows to encode with.\n");
-+ ret = AVERROR_EXTERNAL;
-+ goto end;
-+ }
-+ }
-+ if (ctx->tile_cols > 0) {
-+ rret = rav1e_config_parse_int(cfg, "tile_cols", ctx->tile_cols);
-+ if (rret < 0) {
-+ av_log(avctx, AV_LOG_ERROR, "Could not set number of tile cols to encode with.\n");
-+ ret = AVERROR_EXTERNAL;
-+ goto end;
-+ }
-+ }
-+
-+ if (avctx->gop_size > 0) {
-+ rret = rav1e_config_parse_int(cfg, "key_frame_interval", avctx->gop_size);
-+ if (rret < 0) {
-+ av_log(avctx, AV_LOG_ERROR, "Could not set max keyint.\n");
-+ ret = AVERROR_EXTERNAL;
-+ goto end;
-+ }
-+ }
-+
-+ if (avctx->keyint_min > 0) {
-+ rret = rav1e_config_parse_int(cfg, "min_key_frame_interval", avctx->keyint_min);
-+ if (rret < 0) {
-+ av_log(avctx, AV_LOG_ERROR, "Could not set min keyint.\n");
-+ ret = AVERROR_EXTERNAL;
-+ goto end;
-+ }
-+ }
-+
-+ if (avctx->bit_rate && ctx->quantizer < 0) {
-+ int max_quantizer = avctx->qmax >= 0 ? avctx->qmax : 255;
-+
-+ rret = rav1e_config_parse_int(cfg, "quantizer", max_quantizer);
-+ if (rret < 0) {
-+ av_log(avctx, AV_LOG_ERROR, "Could not set max quantizer.\n");
-+ ret = AVERROR_EXTERNAL;
-+ goto end;
-+ }
-+
-+ if (avctx->qmin >= 0) {
-+ rret = rav1e_config_parse_int(cfg, "min_quantizer", avctx->qmin);
-+ if (rret < 0) {
-+ av_log(avctx, AV_LOG_ERROR, "Could not set min quantizer.\n");
-+ ret = AVERROR_EXTERNAL;
-+ goto end;
-+ }
-+ }
-+
-+ rret = rav1e_config_parse_int(cfg, "bitrate", avctx->bit_rate);
-+ if (rret < 0) {
-+ av_log(avctx, AV_LOG_ERROR, "Could not set bitrate.\n");
-+ ret = AVERROR_INVALIDDATA;
-+ goto end;
-+ }
-+ } else if (ctx->quantizer >= 0) {
-+ if (avctx->bit_rate)
-+ av_log(avctx, AV_LOG_WARNING, "Both bitrate and quantizer specified. Using quantizer mode.");
-+
-+ rret = rav1e_config_parse_int(cfg, "quantizer", ctx->quantizer);
-+ if (rret < 0) {
-+ av_log(avctx, AV_LOG_ERROR, "Could not set quantizer.\n");
-+ ret = AVERROR_EXTERNAL;
-+ goto end;
-+ }
-+ }
-+
-+ rret = rav1e_config_set_pixel_format(cfg, desc->comp[0].depth,
-+ pix_fmt_map(avctx->pix_fmt),
-+ chroma_loc_map(avctx->chroma_sample_location),
-+ range_map(avctx->pix_fmt, avctx->color_range));
-+ if (rret < 0) {
-+ av_log(avctx, AV_LOG_ERROR, "Failed to set pixel format properties.\n");
-+ ret = AVERROR_INVALIDDATA;
-+ goto end;
-+ }
-+
-+ /* rav1e's colorspace enums match standard values. */
-+ rret = rav1e_config_set_color_description(cfg, (RaMatrixCoefficients) avctx->colorspace,
-+ (RaColorPrimaries) avctx->color_primaries,
-+ (RaTransferCharacteristics) avctx->color_trc);
-+ if (rret < 0) {
-+ av_log(avctx, AV_LOG_WARNING, "Failed to set color properties.\n");
-+ if (avctx->err_recognition & AV_EF_EXPLODE) {
-+ ret = AVERROR_INVALIDDATA;
-+ goto end;
-+ }
-+ }
-+
-+ ctx->ctx = rav1e_context_new(cfg);
-+ if (!ctx->ctx) {
-+ av_log(avctx, AV_LOG_ERROR, "Failed to create rav1e encode context.\n");
-+ ret = AVERROR_EXTERNAL;
-+ goto end;
-+ }
-+
-+ ret = 0;
-+
-+end:
-+
-+ rav1e_config_unref(cfg);
-+
-+ return ret;
-+}
-+
-+static int librav1e_send_frame(AVCodecContext *avctx, const AVFrame *frame)
-+{
-+ librav1eContext *ctx = avctx->priv_data;
-+ RaFrame *rframe = NULL;
-+ int ret;
-+
-+ if (frame) {
-+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
-+
-+ rframe = rav1e_frame_new(ctx->ctx);
-+ if (!rframe) {
-+ av_log(avctx, AV_LOG_ERROR, "Could not allocate new rav1e frame.\n");
-+ return AVERROR(ENOMEM);
-+ }
-+
-+ for (int i = 0; i < desc->nb_components; i++) {
-+ int shift = i ? desc->log2_chroma_h : 0;
-+ int bytes = desc->comp[0].depth == 8 ? 1 : 2;
-+ rav1e_frame_fill_plane(rframe, i, frame->data[i],
-+ (frame->height >> shift) * frame->linesize[i],
-+ frame->linesize[i], bytes);
-+ }
-+ }
-+
-+ ret = rav1e_send_frame(ctx->ctx, rframe);
-+ if (rframe)
-+ rav1e_frame_unref(rframe); /* No need to unref if flushing. */
-+
-+ switch (ret) {
-+ case RA_ENCODER_STATUS_SUCCESS:
-+ break;
-+ case RA_ENCODER_STATUS_ENOUGH_DATA:
-+ return AVERROR(EAGAIN);
-+ case RA_ENCODER_STATUS_FAILURE:
-+ av_log(avctx, AV_LOG_ERROR, "Could not send frame: %s\n", rav1e_status_to_str(ret));
-+ return AVERROR_EXTERNAL;
-+ default:
-+ av_log(avctx, AV_LOG_ERROR, "Unknown return code %d from rav1e_send_frame: %s\n", ret, rav1e_status_to_str(ret));
-+ return AVERROR_UNKNOWN;
-+ }
-+
-+ return 0;
-+}
-+
-+static int librav1e_receive_packet(AVCodecContext *avctx, AVPacket *pkt)
-+{
-+ librav1eContext *ctx = avctx->priv_data;
-+ RaPacket *rpkt = NULL;
-+ int ret;
-+
-+retry:
-+
-+ if (avctx->flags & AV_CODEC_FLAG_PASS1) {
-+ int sret = get_stats(avctx, 0);
-+ if (sret < 0)
-+ return sret;
-+ } else if (avctx->flags & AV_CODEC_FLAG_PASS2) {
-+ int sret = set_stats(avctx);
-+ if (sret < 0)
-+ return sret;
-+ }
-+
-+ ret = rav1e_receive_packet(ctx->ctx, &rpkt);
-+ switch (ret) {
-+ case RA_ENCODER_STATUS_SUCCESS:
-+ break;
-+ case RA_ENCODER_STATUS_LIMIT_REACHED:
-+ if (avctx->flags & AV_CODEC_FLAG_PASS1) {
-+ int sret = get_stats(avctx, 1);
-+ if (sret < 0)
-+ return sret;
-+ }
-+ return AVERROR_EOF;
-+ case RA_ENCODER_STATUS_ENCODED:
-+ if (avctx->internal->draining)
-+ goto retry;
-+ return AVERROR(EAGAIN);
-+ case RA_ENCODER_STATUS_NEED_MORE_DATA:
-+ if (avctx->internal->draining) {
-+ av_log(avctx, AV_LOG_ERROR, "Unexpected error when receiving packet after EOF.\n");
-+ return AVERROR_EXTERNAL;
-+ }
-+ return AVERROR(EAGAIN);
-+ case RA_ENCODER_STATUS_FAILURE:
-+ av_log(avctx, AV_LOG_ERROR, "Could not encode frame: %s\n", rav1e_status_to_str(ret));
-+ return AVERROR_EXTERNAL;
-+ default:
-+ av_log(avctx, AV_LOG_ERROR, "Unknown return code %d from rav1e_receive_packet: %s\n", ret, rav1e_status_to_str(ret));
-+ return AVERROR_UNKNOWN;
-+ }
-+
-+ ret = av_new_packet(pkt, rpkt->len);
-+ if (ret < 0) {
-+ av_log(avctx, AV_LOG_ERROR, "Could not allocate packet.\n");
-+ rav1e_packet_unref(rpkt);
-+ return ret;
-+ }
-+
-+ memcpy(pkt->data, rpkt->data, rpkt->len);
-+
-+ if (rpkt->frame_type == RA_FRAME_TYPE_KEY)
-+ pkt->flags |= AV_PKT_FLAG_KEY;
-+
-+ pkt->pts = pkt->dts = rpkt->input_frameno * avctx->ticks_per_frame;
-+ rav1e_packet_unref(rpkt);
-+
-+ if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
-+ int ret = av_bsf_send_packet(ctx->bsf, pkt);
-+ if (ret < 0) {
-+ av_log(avctx, AV_LOG_ERROR, "extradata extraction send failed.\n");
-+ av_packet_unref(pkt);
-+ return ret;
-+ }
-+
-+ ret = av_bsf_receive_packet(ctx->bsf, pkt);
-+ if (ret < 0) {
-+ av_log(avctx, AV_LOG_ERROR, "extradata extraction receive failed.\n");
-+ av_packet_unref(pkt);
-+ return ret;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+#define OFFSET(x) offsetof(librav1eContext, x)
-+#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
-+
-+static const AVOption options[] = {
-+ { "qp", "use constant quantizer mode", OFFSET(quantizer), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 255, VE },
-+ { "speed", "what speed preset to use", OFFSET(speed), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 10, VE },
-+ { "tiles", "number of tiles encode with", OFFSET(tiles), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
-+ { "tile-rows", "number of tiles rows to encode with", OFFSET(tile_rows), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
-+ { "tile-columns", "number of tiles columns to encode with", OFFSET(tile_cols), AV_OPT_TYPE_INT, { .i64 = 0 }, -1, INT64_MAX, VE },
-+ { "rav1e-params", "set the rav1e configuration using a :-separated list of key=value parameters", OFFSET(rav1e_opts), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE },
-+ { NULL }
-+};
-+
-+static const AVCodecDefault librav1e_defaults[] = {
-+ { "b", "0" },
-+ { "g", "0" },
-+ { "keyint_min", "0" },
-+ { "qmax", "-1" },
-+ { "qmin", "-1" },
-+ { NULL }
-+};
-+
-+const enum AVPixelFormat librav1e_pix_fmts[] = {
-+ AV_PIX_FMT_YUV420P,
-+ AV_PIX_FMT_YUVJ420P,
-+ AV_PIX_FMT_YUV420P10,
-+ AV_PIX_FMT_YUV420P12,
-+ AV_PIX_FMT_YUV422P,
-+ AV_PIX_FMT_YUVJ422P,
-+ AV_PIX_FMT_YUV422P10,
-+ AV_PIX_FMT_YUV422P12,
-+ AV_PIX_FMT_YUV444P,
-+ AV_PIX_FMT_YUVJ444P,
-+ AV_PIX_FMT_YUV444P10,
-+ AV_PIX_FMT_YUV444P12,
-+ AV_PIX_FMT_NONE
-+};
-+
-+static const AVClass class = {
-+ .class_name = "librav1e",
-+ .item_name = av_default_item_name,
-+ .option = options,
-+ .version = LIBAVUTIL_VERSION_INT,
-+};
-+
-+AVCodec ff_librav1e_encoder = {
-+ .name = "librav1e",
-+ .long_name = NULL_IF_CONFIG_SMALL("librav1e AV1"),
-+ .type = AVMEDIA_TYPE_VIDEO,
-+ .id = AV_CODEC_ID_AV1,
-+ .init = librav1e_encode_init,
-+ .send_frame = librav1e_send_frame,
-+ .receive_packet = librav1e_receive_packet,
-+ .close = librav1e_encode_close,
-+ .priv_data_size = sizeof(librav1eContext),
-+ .priv_class = &class,
-+ .defaults = librav1e_defaults,
-+ .pix_fmts = librav1e_pix_fmts,
-+ .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS,
-+ .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
-+ .wrapper_name = "librav1e",
-+};
diff --git a/multimedia/ffmpeg/files/patch-vdpau_vp9 b/multimedia/ffmpeg/files/patch-vdpau_vp9
deleted file mode 100644
index cb473074451e..000000000000
--- a/multimedia/ffmpeg/files/patch-vdpau_vp9
+++ /dev/null
@@ -1,330 +0,0 @@
-https://git.ffmpeg.org/gitweb/ffmpeg.git/commitdiff/1054752c563c
-
---- configure.orig 2019-12-18 11:42:25 UTC
-+++ configure
-@@ -2976,6 +2976,8 @@ vp9_nvdec_hwaccel_deps="nvdec"
- vp9_nvdec_hwaccel_select="vp9_decoder"
- vp9_vaapi_hwaccel_deps="vaapi VADecPictureParameterBufferVP9_bit_depth"
- vp9_vaapi_hwaccel_select="vp9_decoder"
-+vp9_vdpau_hwaccel_deps="vdpau VdpPictureInfoVP9"
-+vp9_vdpau_hwaccel_select="vp9_decoder"
- wmv3_d3d11va_hwaccel_select="vc1_d3d11va_hwaccel"
- wmv3_d3d11va2_hwaccel_select="vc1_d3d11va2_hwaccel"
- wmv3_dxva2_hwaccel_select="vc1_dxva2_hwaccel"
-@@ -6083,6 +6085,7 @@ check_type "windows.h d3d11.h" "ID3D11VideoContext"
- check_type "d3d9.h dxva2api.h" DXVA2_ConfigPictureDecode -D_WIN32_WINNT=0x0602
-
- check_type "vdpau/vdpau.h" "VdpPictureInfoHEVC"
-+check_type "vdpau/vdpau.h" "VdpPictureInfoVP9"
-
- if [ -z "$nvccflags" ]; then
- nvccflags=$nvccflags_default
---- libavcodec/Makefile.orig 2019-12-18 11:42:25 UTC
-+++ libavcodec/Makefile
-@@ -910,6 +910,7 @@ OBJS-$(CONFIG_VP9_D3D11VA_HWACCEL) += dxva2_vp9
- OBJS-$(CONFIG_VP9_DXVA2_HWACCEL) += dxva2_vp9.o
- OBJS-$(CONFIG_VP9_NVDEC_HWACCEL) += nvdec_vp9.o
- OBJS-$(CONFIG_VP9_VAAPI_HWACCEL) += vaapi_vp9.o
-+OBJS-$(CONFIG_VP9_VDPAU_HWACCEL) += vdpau_vp9.o
- OBJS-$(CONFIG_VP8_QSV_HWACCEL) += qsvdec_other.o
-
- # libavformat dependencies
---- libavcodec/hwaccels.h.orig 2019-07-08 17:45:25 UTC
-+++ libavcodec/hwaccels.h
-@@ -68,6 +68,7 @@ extern const AVHWAccel ff_vp9_d3d11va2_hwaccel;
- extern const AVHWAccel ff_vp9_dxva2_hwaccel;
- extern const AVHWAccel ff_vp9_nvdec_hwaccel;
- extern const AVHWAccel ff_vp9_vaapi_hwaccel;
-+extern const AVHWAccel ff_vp9_vdpau_hwaccel;
- extern const AVHWAccel ff_wmv3_d3d11va_hwaccel;
- extern const AVHWAccel ff_wmv3_d3d11va2_hwaccel;
- extern const AVHWAccel ff_wmv3_dxva2_hwaccel;
---- libavcodec/vdpau_internal.h.orig 2019-08-05 20:52:21 UTC
-+++ libavcodec/vdpau_internal.h
-@@ -54,6 +54,9 @@ union VDPAUPictureInfo {
- #ifdef VDP_YCBCR_FORMAT_Y_U_V_444
- VdpPictureInfoHEVC444 hevc_444;
- #endif
-+#ifdef VDP_DECODER_PROFILE_VP9_PROFILE_0
-+ VdpPictureInfoVP9 vp9;
-+#endif
- };
-
- typedef struct VDPAUHWContext {
---- libavcodec/vdpau_vp9.c.orig 2019-12-18 11:42:25 UTC
-+++ libavcodec/vdpau_vp9.c
-@@ -0,0 +1,242 @@
-+/*
-+ * VP9 HW decode acceleration through VDPAU
-+ *
-+ * Copyright (c) 2019 Manoj Gupta Bonda
-+ *
-+ * This file is part of FFmpeg.
-+ *
-+ * FFmpeg is free software; you can redistribute it and/or
-+ * modify it under the terms of the GNU Lesser General Public
-+ * License as published by the Free Software Foundation; either
-+ * version 2.1 of the License, or (at your option) any later version.
-+ *
-+ * FFmpeg is distributed in the hope that it will be useful,
-+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ * Lesser General Public License for more details.
-+ *
-+ * You should have received a copy of the GNU Lesser General Public
-+ * License along with FFmpeg; if not, write to the Free Software Foundation,
-+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
-+ */
-+
-+#include <vdpau/vdpau.h>
-+#include "libavutil/pixdesc.h"
-+#include "avcodec.h"
-+#include "internal.h"
-+#include "vp9data.h"
-+#include "vp9dec.h"
-+#include "hwaccel.h"
-+#include "vdpau.h"
-+#include "vdpau_internal.h"
-+
-+static int vdpau_vp9_start_frame(AVCodecContext *avctx,
-+ const uint8_t *buffer, uint32_t size)
-+{
-+ VP9Context *s = avctx->priv_data;
-+ VP9SharedContext *h = &(s->s);
-+ const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
-+ if (!pixdesc) {
-+ return AV_PIX_FMT_NONE;
-+ }
-+
-+ VP9Frame pic = h->frames[CUR_FRAME];
-+ struct vdpau_picture_context *pic_ctx = pic.hwaccel_picture_private;
-+ int i;
-+
-+ VdpPictureInfoVP9 *info = &pic_ctx->info.vp9;
-+
-+ info->width = avctx->width;
-+ info->height = avctx->height;
-+ /* fill LvPictureInfoVP9 struct */
-+ info->lastReference = VDP_INVALID_HANDLE;
-+ info->goldenReference = VDP_INVALID_HANDLE;
-+ info->altReference = VDP_INVALID_HANDLE;
-+
-+ if (h->refs[h->h.refidx[0]].f && h->refs[h->h.refidx[0]].f->private_ref) {
-+ info->lastReference = ff_vdpau_get_surface_id(h->refs[h->h.refidx[0]].f);
-+ }
-+ if (h->refs[h->h.refidx[1]].f && h->refs[h->h.refidx[1]].f->private_ref) {
-+ info->goldenReference = ff_vdpau_get_surface_id(h->refs[h->h.refidx[1]].f);
-+ }
-+ if (h->refs[h->h.refidx[2]].f && h->refs[h->h.refidx[2]].f->private_ref) {
-+ info->altReference = ff_vdpau_get_surface_id(h->refs[h->h.refidx[2]].f);
-+ }
-+
-+ info->profile = h->h.profile;
-+ info->frameContextIdx = h->h.framectxid;
-+ info->keyFrame = h->h.keyframe;
-+ info->showFrame = !h->h.invisible;
-+ info->errorResilient = h->h.errorres;
-+ info->frameParallelDecoding = h->h.parallelmode;
-+
-+ info->subSamplingX = pixdesc->log2_chroma_w;
-+ info->subSamplingY = pixdesc->log2_chroma_h;
-+
-+ info->intraOnly = h->h.intraonly;
-+ info->allowHighPrecisionMv = h->h.keyframe ? 0 : h->h.highprecisionmvs;
-+ info->refreshEntropyProbs = h->h.refreshctx;
-+
-+ info->bitDepthMinus8Luma = pixdesc->comp[0].depth - 8;
-+ info->bitDepthMinus8Chroma = pixdesc->comp[1].depth - 8;
-+
-+ info->loopFilterLevel = h->h.filter.level;
-+ info->loopFilterSharpness = h->h.filter.sharpness;
-+ info->modeRefLfEnabled = h->h.lf_delta.enabled;
-+
-+ info->log2TileColumns = h->h.tiling.log2_tile_cols;
-+ info->log2TileRows = h->h.tiling.log2_tile_rows;
-+
-+ info->segmentEnabled = h->h.segmentation.enabled;
-+ info->segmentMapUpdate = h->h.segmentation.update_map;
-+ info->segmentMapTemporalUpdate = h->h.segmentation.temporal;
-+ info->segmentFeatureMode = h->h.segmentation.absolute_vals;
-+
-+ info->qpYAc = h->h.yac_qi;
-+ info->qpYDc = h->h.ydc_qdelta;
-+ info->qpChDc = h->h.uvdc_qdelta;
-+ info->qpChAc = h->h.uvac_qdelta;
-+
-+ info->resetFrameContext = h->h.resetctx;
-+ info->mcompFilterType = h->h.filtermode ^ (h->h.filtermode <= 1);
-+ info->uncompressedHeaderSize = h->h.uncompressed_header_size;
-+ info->compressedHeaderSize = h->h.compressed_header_size;
-+ info->refFrameSignBias[0] = 0;
-+
-+
-+ for (i = 0; i < FF_ARRAY_ELEMS(info->mbModeLfDelta); i++)
-+ info->mbModeLfDelta[i] = h->h.lf_delta.mode[i];
-+
-+ for (i = 0; i < FF_ARRAY_ELEMS(info->mbRefLfDelta); i++)
-+ info->mbRefLfDelta[i] = h->h.lf_delta.ref[i];
-+
-+ for (i = 0; i < FF_ARRAY_ELEMS(info->mbSegmentTreeProbs); i++)
-+ info->mbSegmentTreeProbs[i] = h->h.segmentation.prob[i];
-+
-+ for (i = 0; i < FF_ARRAY_ELEMS(info->activeRefIdx); i++) {
-+ info->activeRefIdx[i] = h->h.refidx[i];
-+ info->segmentPredProbs[i] = h->h.segmentation.pred_prob[i];
-+ info->refFrameSignBias[i + 1] = h->h.signbias[i];
-+ }
-+
-+ for (i = 0; i < FF_ARRAY_ELEMS(info->segmentFeatureEnable); i++) {
-+ info->segmentFeatureEnable[i][0] = h->h.segmentation.feat[i].q_enabled;
-+ info->segmentFeatureEnable[i][1] = h->h.segmentation.feat[i].lf_enabled;
-+ info->segmentFeatureEnable[i][2] = h->h.segmentation.feat[i].ref_enabled;
-+ info->segmentFeatureEnable[i][3] = h->h.segmentation.feat[i].skip_enabled;
-+
-+ info->segmentFeatureData[i][0] = h->h.segmentation.feat[i].q_val;
-+ info->segmentFeatureData[i][1] = h->h.segmentation.feat[i].lf_val;
-+ info->segmentFeatureData[i][2] = h->h.segmentation.feat[i].ref_val;
-+ info->segmentFeatureData[i][3] = 0;
-+ }
-+
-+ switch (avctx->colorspace) {
-+ default:
-+ case AVCOL_SPC_UNSPECIFIED:
-+ info->colorSpace = 0;
-+ break;
-+ case AVCOL_SPC_BT470BG:
-+ info->colorSpace = 1;
-+ break;
-+ case AVCOL_SPC_BT709:
-+ info->colorSpace = 2;
-+ break;
-+ case AVCOL_SPC_SMPTE170M:
-+ info->colorSpace = 3;
-+ break;
-+ case AVCOL_SPC_SMPTE240M:
-+ info->colorSpace = 4;
-+ break;
-+ case AVCOL_SPC_BT2020_NCL:
-+ info->colorSpace = 5;
-+ break;
-+ case AVCOL_SPC_RESERVED:
-+ info->colorSpace = 6;
-+ break;
-+ case AVCOL_SPC_RGB:
-+ info->colorSpace = 7;
-+ break;
-+ }
-+
-+ return ff_vdpau_common_start_frame(pic_ctx, buffer, size);
-+
-+}
-+
-+static const uint8_t start_code_prefix[3] = { 0x00, 0x00, 0x01 };
-+
-+static int vdpau_vp9_decode_slice(AVCodecContext *avctx,
-+ const uint8_t *buffer, uint32_t size)
-+{
-+ VP9SharedContext *h = avctx->priv_data;
-+ VP9Frame pic = h->frames[CUR_FRAME];
-+ struct vdpau_picture_context *pic_ctx = pic.hwaccel_picture_private;
-+
-+ int val;
-+
-+ val = ff_vdpau_add_buffer(pic_ctx, start_code_prefix, 3);
-+ if (val)
-+ return val;
-+
-+ val = ff_vdpau_add_buffer(pic_ctx, buffer, size);
-+ if (val)
-+ return val;
-+
-+ return 0;
-+}
-+
-+static int vdpau_vp9_end_frame(AVCodecContext *avctx)
-+{
-+ VP9SharedContext *h = avctx->priv_data;
-+ VP9Frame pic = h->frames[CUR_FRAME];
-+ struct vdpau_picture_context *pic_ctx = pic.hwaccel_picture_private;
-+
-+ int val;
-+
-+ val = ff_vdpau_common_end_frame(avctx, pic.tf.f, pic_ctx);
-+ if (val < 0)
-+ return val;
-+
-+ return 0;
-+}
-+
-+static int vdpau_vp9_init(AVCodecContext *avctx)
-+{
-+ VdpDecoderProfile profile;
-+ uint32_t level = avctx->level;
-+
-+ switch (avctx->profile) {
-+ case FF_PROFILE_VP9_0:
-+ profile = VDP_DECODER_PROFILE_VP9_PROFILE_0;
-+ break;
-+ case FF_PROFILE_VP9_1:
-+ profile = VDP_DECODER_PROFILE_VP9_PROFILE_1;
-+ break;
-+ case FF_PROFILE_VP9_2:
-+ profile = VDP_DECODER_PROFILE_VP9_PROFILE_2;
-+ break;
-+ case FF_PROFILE_VP9_3:
-+ profile = VDP_DECODER_PROFILE_VP9_PROFILE_3;
-+ break;
-+ default:
-+ return AVERROR(ENOTSUP);
-+ }
-+
-+ return ff_vdpau_common_init(avctx, profile, level);
-+}
-+
-+const AVHWAccel ff_vp9_vdpau_hwaccel = {
-+ .name = "vp9_vdpau",
-+ .type = AVMEDIA_TYPE_VIDEO,
-+ .id = AV_CODEC_ID_VP9,
-+ .pix_fmt = AV_PIX_FMT_VDPAU,
-+ .start_frame = vdpau_vp9_start_frame,
-+ .end_frame = vdpau_vp9_end_frame,
-+ .decode_slice = vdpau_vp9_decode_slice,
-+ .frame_priv_data_size = sizeof(struct vdpau_picture_context),
-+ .init = vdpau_vp9_init,
-+ .uninit = ff_vdpau_common_uninit,
-+ .frame_params = ff_vdpau_common_frame_params,
-+ .priv_data_size = sizeof(VDPAUContext),
-+ .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
-+};
---- libavcodec/vp9.c.orig 2019-08-05 20:52:21 UTC
-+++ libavcodec/vp9.c
-@@ -173,7 +173,8 @@ static int update_size(AVCodecContext *avctx, int w, i
- #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
- CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
- CONFIG_VP9_NVDEC_HWACCEL + \
-- CONFIG_VP9_VAAPI_HWACCEL)
-+ CONFIG_VP9_VAAPI_HWACCEL + \
-+ CONFIG_VP9_VDPAU_HWACCEL)
- enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
- VP9Context *s = avctx->priv_data;
- uint8_t *p;
-@@ -188,6 +189,9 @@ static int update_size(AVCodecContext *avctx, int w, i
-
- switch (s->pix_fmt) {
- case AV_PIX_FMT_YUV420P:
-+#if CONFIG_VP9_VDPAU_HWACCEL
-+ *fmtp++ = AV_PIX_FMT_VDPAU;
-+#endif
- case AV_PIX_FMT_YUV420P10:
- #if CONFIG_VP9_DXVA2_HWACCEL
- *fmtp++ = AV_PIX_FMT_DXVA2_VLD;
-@@ -1816,6 +1820,9 @@ AVCodec ff_vp9_decoder = {
- #endif
- #if CONFIG_VP9_VAAPI_HWACCEL
- HWACCEL_VAAPI(vp9),
-+#endif
-+#if CONFIG_VP9_VDPAU_HWACCEL
-+ HWACCEL_VDPAU(vp9),
- #endif
- NULL
- },