diff --git a/Changelog b/Changelog index 1b0bc95b7a7b7..e38a607025975 100644 --- a/Changelog +++ b/Changelog @@ -2,6 +2,11 @@ Entries are sorted chronologically from oldest to youngest within each release, releases are sorted from youngest to oldest. version : +- tpad filter +- AV1 decoding support through libdav1d + + +version 4.1: - deblock filter - tmix filter - amplify filter @@ -35,6 +40,14 @@ version : - AV1 parser - SER demuxer - sinc audio filter source +- chromahold filter +- setparams filter +- vibrance filter +- decoding S12M timecode in h264 +- xstack filter +- pcm vidc decoder and encoder +- (a)graphmonitor filter +- yadif_cuda filter version 4.0: diff --git a/RELEASE b/RELEASE index ff2c9d1a30b72..e3dcbea35c34b 100644 --- a/RELEASE +++ b/RELEASE @@ -1 +1 @@ -4.0.git +4.1.git diff --git a/configure b/configure index 85d5dd59624d7..00b5d9795e0da 100755 --- a/configure +++ b/configure @@ -226,6 +226,7 @@ External library support: --enable-libcelt enable CELT decoding via libcelt [no] --enable-libcdio enable audio CD grabbing with libcdio [no] --enable-libcodec2 enable codec2 en/decoding using libcodec2 [no] + --enable-libdav1d enable AV1 decoding via libdav1d [no] --enable-libdavs2 enable AVS2 decoding via libdavs2 [no] --enable-libdc1394 enable IIDC-1394 grabbing using libdc1394 and libraw1394 [no] @@ -1712,6 +1713,7 @@ EXTERNAL_LIBRARY_LIST=" libcaca libcelt libcodec2 + libdav1d libdc1394 libdrm libflite @@ -2957,6 +2959,7 @@ h264_rkmpp_decoder_deps="rkmpp" h264_rkmpp_decoder_select="h264_mp4toannexb_bsf" h264_vaapi_encoder_select="cbs_h264 vaapi_encode" h264_v4l2m2m_decoder_deps="v4l2_m2m h264_v4l2_m2m" +h264_v4l2m2m_decoder_select="h264_mp4toannexb_bsf" h264_v4l2m2m_encoder_deps="v4l2_m2m h264_v4l2_m2m" hevc_amf_encoder_deps="amf" hevc_cuvid_decoder_deps="cuvid" @@ -2971,6 +2974,7 @@ hevc_rkmpp_decoder_select="hevc_mp4toannexb_bsf" hevc_vaapi_encoder_deps="VAEncPictureParameterBufferHEVC" hevc_vaapi_encoder_select="cbs_h265 vaapi_encode" hevc_v4l2m2m_decoder_deps="v4l2_m2m hevc_v4l2_m2m" +hevc_v4l2m2m_decoder_select="hevc_mp4toannexb_bsf" hevc_v4l2m2m_encoder_deps="v4l2_m2m hevc_v4l2_m2m" mjpeg_cuvid_decoder_deps="cuvid" mjpeg_qsv_encoder_deps="libmfx" @@ -3088,6 +3092,7 @@ libaom_av1_encoder_select="extract_extradata_bsf" libcelt_decoder_deps="libcelt" libcodec2_decoder_deps="libcodec2" libcodec2_encoder_deps="libcodec2" +libdav1d_decoder_deps="libdav1d" libdavs2_decoder_deps="libdavs2" libfdk_aac_decoder_deps="libfdk_aac" libfdk_aac_encoder_deps="libfdk_aac" @@ -3180,6 +3185,7 @@ image2_alias_pix_demuxer_select="image2_demuxer" image2_brender_pix_demuxer_select="image2_demuxer" ipod_muxer_select="mov_muxer" ismv_muxer_select="mov_muxer" +ivf_muxer_select="av1_metadata_bsf vp9_superframe_bsf" matroska_audio_muxer_select="matroska_muxer" matroska_demuxer_select="iso_media riffdec" matroska_demuxer_suggest="bzlib lzo zlib" @@ -3267,6 +3273,7 @@ libcdio_indev_deps="libcdio" libdc1394_indev_deps="libdc1394" openal_indev_deps="openal" opengl_outdev_deps="opengl" +opengl_outdev_suggest="sdl2" oss_indev_deps_any="sys_soundcard_h" oss_outdev_deps_any="sys_soundcard_h" pulse_indev_deps="libpulse" @@ -3480,6 +3487,7 @@ zscale_filter_deps="libzimg const_nan" scale_vaapi_filter_deps="vaapi" vpp_qsv_filter_deps="libmfx" vpp_qsv_filter_select="qsvvpp" +yadif_cuda_filter_deps="cuda_sdk" # examples avio_dir_cmd_deps="avformat avutil" @@ -6063,6 +6071,7 @@ enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0 && die "ERROR: libcelt must be installed and version must be >= 0.11.0."; } enabled libcaca && require_pkg_config libcaca caca caca.h caca_create_canvas enabled libcodec2 && require libcodec2 codec2/codec2.h codec2_create -lcodec2 +enabled libdav1d && require_pkg_config libdav1d "dav1d >= 0.0.1" "dav1d/dav1d.h" dav1d_version enabled libdavs2 && require_pkg_config libdavs2 "davs2 >= 1.5.115" davs2.h davs2_decoder_open enabled libdc1394 && require_pkg_config libdc1394 libdc1394-2 dc1394/dc1394.h dc1394_new enabled libdrm && require_pkg_config libdrm libdrm xf86drm.h drmGetVersion @@ -6903,7 +6912,8 @@ enabled zoompan_filter && prepend avfilter_deps "swscale" enabled lavfi_indev && prepend avdevice_deps "avfilter" #FIXME -enabled sdl2_outdev && add_cflags $(filter_out '-Dmain=SDL_main' $sdl2_cflags) +enabled_any sdl2_outdev opengl_outdev && enabled sdl2 && + add_cflags $(filter_out '-Dmain=SDL_main' $sdl2_cflags) enabled opus_decoder && prepend avcodec_deps "swresample" diff --git a/doc/APIchanges b/doc/APIchanges index 9e93555dac8e6..db1879e6e2379 100644 --- a/doc/APIchanges +++ b/doc/APIchanges @@ -15,31 +15,39 @@ libavutil: 2017-10-21 API changes, most recent first: -2018-10-11 - xxxxxxxxxx - lavc 58.33.100 - mediacodec.h +-------- 8< --------- FFmpeg 4.1 was cut here -------- 8< --------- + +2018-10-27 - 718044dc19 - lavu 56.21.100 - pixdesc.h + Add av_read_image_line2(), av_write_image_line2() + +2018-10-24 - f9d4126f28 - lavu 56.20.100 - frame.h + Add AV_FRAME_DATA_S12M_TIMECODE + +2018-10-11 - f6d48b618a - lavc 58.33.100 - mediacodec.h Add av_mediacodec_render_buffer_at_time(). -2018-09-09 - xxxxxxxxxx - lavc 58.29.100 - avcodec.h +2018-09-09 - 35498c124a - lavc 58.29.100 - avcodec.h Add AV_PKT_DATA_AFD -2018-08-16 - xxxxxxxxxx - lavc 58.23.100 - avcodec.h +2018-08-16 - b33f5299a5 - lavc 58.23.100 - avcodec.h Add av_bsf_flush(). -2018-05-xx - xxxxxxxxxx - lavf 58.15.100 - avformat.h +2018-05-18 - 2b2f2f65f3 - lavf 58.15.100 - avformat.h Add pmt_version field to AVProgram -2018-05-xx - xxxxxxxxxx - lavf 58.14.100 - avformat.h +2018-05-17 - 5dfeb7f081 - lavf 58.14.100 - avformat.h Add AV_DISPOSITION_STILL_IMAGE -2018-05-xx - xxxxxxxxxx - lavu 56.18.101 - hwcontext_cuda.h +2018-05-10 - c855683427 - lavu 56.18.101 - hwcontext_cuda.h Add AVCUDADeviceContext.stream. -2018-04-xx - xxxxxxxxxx - lavu 56.18.100 - pixdesc.h +2018-04-30 - 56b081da57 - lavu 56.18.100 - pixdesc.h Add AV_PIX_FMT_FLAG_ALPHA to AV_PIX_FMT_PAL8. -2018-04-xx - xxxxxxxxxx - lavu 56.17.100 - opt.h +2018-04-26 - 5be0410cb3 - lavu 56.17.100 - opt.h Add AV_OPT_FLAG_DEPRECATED. -2018-04-xx - xxxxxxxxxx - lavu 56.16.100 - threadmessage.h +2018-04-26 - 71fa82bed6 - lavu 56.16.100 - threadmessage.h Add av_thread_message_queue_nb_elems(). -------- 8< --------- FFmpeg 4.0 was cut here -------- 8< --------- diff --git a/doc/encoders.texi b/doc/encoders.texi index 8d184f72f8710..c9464ca7b3b8f 100644 --- a/doc/encoders.texi +++ b/doc/encoders.texi @@ -1641,6 +1641,7 @@ means unlimited. @table @option @item auto-alt-ref Enable use of alternate reference frames (2-pass only). +Values greater than 1 enable multi-layer alternate reference frames (VP9 only). @item arnr-max-frames Set altref noise reduction max frame count. @item arnr-type @@ -1692,6 +1693,8 @@ Corpus VBR mode is a variant of standard VBR where the complexity distribution midpoint is passed in rather than calculated for a specific clip or chunk. The valid range is [0, 10000]. 0 (default) uses standard VBR. +@item enable-tpl @var{boolean} +Enable temporal dependency model. @end table @end table @@ -2598,6 +2601,8 @@ Size / quality tradeoff: higher values are smaller / worse quality. @option{b_qfactor} / @option{b_quant_factor} @item @option{b_qoffset} / @option{b_quant_offset} +@item +@option{slices} @end itemize All encoders support the following options: diff --git a/doc/fate.texi b/doc/fate.texi index a35299423065b..2be61d639c9bb 100644 --- a/doc/fate.texi +++ b/doc/fate.texi @@ -155,6 +155,8 @@ space on each client, network bandwidth and so on benefit from smaller test case Also keep in mind older checkouts use existing sample files, that means in practice generally do not replace, remove or overwrite files as it likely would break older checkouts or releases. +Also all needed samples for a commit should be uploaded, ideally 24 +hours, before the push. @example #First update your local samples copy: diff --git a/doc/filters.texi b/doc/filters.texi index 54b85c4bb9c9e..d16a8b8b1be92 100644 --- a/doc/filters.texi +++ b/doc/filters.texi @@ -1205,7 +1205,7 @@ Set max allowed Impulse Response filter duration in seconds. Default is 30 secon Allowed range is 0.1 to 60 seconds. @item response -Show IR frequency reponse, magnitude and phase in additional video stream. +Show IR frequency reponse, magnitude(magenta) and phase(green) and group delay(yellow) in additional video stream. By default it is disabled. @item channel @@ -1214,6 +1214,19 @@ displayed. This option is used only when @var{response} is enabled. @item size Set video stream size. This option is used only when @var{response} is enabled. + +@item rate +Set video stream frame rate. This option is used only when @var{response} is enabled. + +@item minp +Set minimal partition size used for convolution. Default is @var{16}. +Allowed range is from @var{16} to @var{65536}. +Lower values decreases latency at cost of higher CPU usage. + +@item maxp +Set maximal partition size used for convolution. Default is @var{65536}. +Allowed range is from @var{16} to @var{65536}. +Lower values decreases latency at cost of higher CPU usage. @end table @subsection Examples @@ -6163,6 +6176,26 @@ Only deinterlace frames marked as interlaced. The default value is @code{all}. @end table +@section chromahold +Remove all color information for all colors except for certain one. + +The filter accepts the following options: + +@table @option +@item color +The color which will not be replaced with neutral chroma. + +@item similarity +Similarity percentage with the above color. +0.01 matches only the exact key color, while 1.0 matches everything. + +@item yuv +Signals that the color passed is already in YUV instead of RGB. + +Literal colors like "green" or "red" don't make sense with this enabled anymore. +This can be used to pass exact YUV values as hexadecimal numbers. +@end table + @section chromakey YUV colorspace color/chroma keying. @@ -10101,6 +10134,8 @@ Default is @code{-1}. @section geq +Apply generic equation to each pixel. + The filter accepts the following options: @table @option @@ -10278,6 +10313,63 @@ gradfun=radius=8 @end itemize +@section graphmonitor, agraphmonitor +Show various filtergraph stats. + +With this filter one can debug complete filtergraph. +Especially issues with links filling with queued frames. + +The filter accepts the following options: + +@table @option +@item size, s +Set video output size. Default is @var{hd720}. + +@item opacity, o +Set video opacity. Default is @var{0.9}. Allowed range is from @var{0} to @var{1}. + +@item mode, m +Set output mode, can be @var{fulll} or @var{compact}. +In @var{compact} mode only filters with some queued frames have displayed stats. + +@item flags, f +Set flags which enable which stats are shown in video. + +Available values for flags are: +@table @samp +@item queue +Display number of queued frames in each link. + +@item frame_count_in +Display number of frames taken from filter. + +@item frame_count_out +Display number of frames given out from filter. + +@item pts +Display current filtered frame pts. + +@item time +Display current filtered frame time. + +@item timebase +Display time base for filter link. + +@item format +Display used format for filter link. + +@item size +Display video size or number of audio channels in case of audio used by filter link. + +@item rate +Display video frame rate or sample rate in case of audio used by filter link. +@end table + +@item rate, r +Set upper limit for video rate of output stream, Default value is @var{25}. +This guarantee that output video frame rate will not be higher than this value. +@end table + @section greyedge A color constancy variation filter which estimates scene illumination via grey edge algorithm and corrects the scene colors accordingly. @@ -10528,6 +10620,7 @@ A floating point number which specifies chroma temporal strength. It defaults to @var{luma_tmp}*@var{chroma_spatial}/@var{luma_spatial}. @end table +@anchor{hwdownload} @section hwdownload Download hardware frames to system memory. @@ -10618,6 +10711,7 @@ ways if there are any additional constraints on that filter's output. Do not use it without fully understanding the implications of its use. @end table +@anchor{hwupload} @section hwupload Upload system memory frames to hardware surfaces. @@ -11300,6 +11394,28 @@ Set maximal size in number of frames. Default is 0. Set first frame of loop. Default is 0. @end table +@subsection Examples + +@itemize +@item +Loop single first frame infinitely: +@example +loop=loop=-1:size=1:start=0 +@end example + +@item +Loop single first frame 10 times: +@example +loop=loop=10:size=1:start=0 +@end example + +@item +Loop 10 first frames 5 times: +@example +loop=loop=5:size=10:start=0 +@end example +@end itemize + @section lut1d Apply a 1D LUT to an input video. @@ -14947,6 +15063,125 @@ Mark the frame as progressive. @end table @end table +@anchor{setparams} +@section setparams + +Force frame parameter for the output video frame. + +The @code{setparams} filter marks interlace and color range for the +output frames. It does not change the input frame, but only sets the +corresponding property, which affects how the frame is treated by +filters/encoders. + +@table @option +@item field_mode +Available values are: + +@table @samp +@item auto +Keep the same field property (default). + +@item bff +Mark the frame as bottom-field-first. + +@item tff +Mark the frame as top-field-first. + +@item prog +Mark the frame as progressive. +@end table + +@item range +Available values are: + +@table @samp +@item auto +Keep the same color range property (default). + +@item unspecified, unknown +Mark the frame as unspecified color range. + +@item limited, tv, mpeg +Mark the frame as limited range. + +@item full, pc, jpeg +Mark the frame as full range. +@end table + +@item color_primaries +Set the color primaries. +Available values are: + +@table @samp +@item auto +Keep the same color primaries property (default). + +@item bt709 +@item unknown +@item bt470m +@item bt470bg +@item smpte170m +@item smpte240m +@item film +@item bt2020 +@item smpte428 +@item smpte431 +@item smpte432 +@item jedec-p22 +@end table + +@item color_trc +Set the color transfert. +Available values are: + +@table @samp +@item auto +Keep the same color trc property (default). + +@item bt709 +@item unknown +@item bt470m +@item bt470bg +@item smpte170m +@item smpte240m +@item linear +@item log100 +@item log316 +@item iec61966-2-4 +@item bt1361e +@item iec61966-2-1 +@item bt2020-10 +@item bt2020-12 +@item smpte2084 +@item smpte428 +@item arib-std-b67 +@end table + +@item colorspace +Set the colorspace. +Available values are: + +@table @samp +@item auto +Keep the same colorspace property (default). + +@item gbr +@item bt709 +@item unknown +@item fcc +@item bt470bg +@item smpte170m +@item smpte240m +@item ycgco +@item bt2020nc +@item bt2020c +@item smpte2085 +@item chroma-derived-nc +@item chroma-derived-c +@item ictcp +@end table +@end table + @section showinfo Show a line containing various information for each input video frame. @@ -16428,6 +16663,7 @@ tmix=frames=3:weights="-1 2 -1":scale=1 @end example @end itemize +@anchor{tonemap} @section tonemap Tone map colors from different dynamic ranges. @@ -16535,6 +16771,46 @@ embedded peak information in display metadata is not reliable or when tone mapping from a lower range to a higher range. @end table +@section tpad + +Temporarily pad video frames. + +The filter accepts the following options: + +@table @option +@item start +Specify number of delay frames before input video stream. + +@item stop +Specify number of padding frames after input video stream. +Set to -1 to pad indefinitely. + +@item start_mode +Set kind of frames added to beginning of stream. +Can be either @var{add} or @var{clone}. +With @var{add} frames of solid-color are added. +With @var{clone} frames are clones of first frame. + +@item stop_mode +Set kind of frames added to end of stream. +Can be either @var{add} or @var{clone}. +With @var{add} frames of solid-color are added. +With @var{clone} frames are clones of last frame. + +@item start_duration, stop_duration +Specify the duration of the start/stop delay. See +@ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils} +for the accepted syntax. +These options override @var{start} and @var{stop}. + +@item color +Specify the color of the padded area. For the syntax of this option, +check the @ref{color syntax,,"Color" section in the ffmpeg-utils +manual,ffmpeg-utils}. + +The default value of @var{color} is "black". +@end table + @anchor{transpose} @section transpose @@ -17254,6 +17530,35 @@ and ones with constant delta pts. If there was frames with variable delta, than it will also show min and max delta encountered. +@section vibrance + +Boost or alter saturation. + +The filter accepts the following options: +@table @option +@item intensity +Set strength of boost if positive value or strength of alter if negative value. +Default is 0. Allowed range is from -2 to 2. + +@item rbal +Set the red balance. Default is 1. Allowed range is from -10 to 10. + +@item gbal +Set the green balance. Default is 1. Allowed range is from -10 to 10. + +@item bbal +Set the blue balance. Default is 1. Allowed range is from -10 to 10. + +@item rlum +Set the red luma coefficient. + +@item glum +Set the green luma coefficient. + +@item blum +Set the blue luma coefficient. +@end table + @anchor{vignette} @section vignette @@ -17630,6 +17935,61 @@ Set the scaling dimension: @code{2} for @code{2xBR}, @code{3} for Default is @code{3}. @end table +@section xstack +Stack video inputs into custom layout. + +All streams must be of same pixel format. + +The filter accept the following option: + +@table @option +@item inputs +Set number of input streams. Default is 2. + +@item layout +Specify layout of inputs. +This option requires the desired layout configuration to be explicitly set by the user. +This sets position of each video input in output. Each input +is separated by '|'. +The first number represents the column, and the second number represents the row. +Numbers start at 0 and are separated by '_'. Optionally one can use wX and hX, +where X is video input from which to take width or height. +Multiple values can be used when separated by '+'. In such +case values are summed together. + +@item shortest +If set to 1, force the output to terminate when the shortest input +terminates. Default value is 0. +@end table + +@subsection Examples + +@itemize +@item +Display 4 inputs into 2x2 grid, +note that if inputs are of different sizes unused gaps might appear, +as not all of output video is used. +@example +xstack=inputs=4:layout=0_0|0_h0|w0_0|w0_h0 +@end example + +@item +Display 4 inputs into 1x4 grid, +note that if inputs are of different sizes unused gaps might appear, +as not all of output video is used. +@example +xstack=inputs=4:layout=0_0|0_h0|0_h0+h1|0_h0+h1+h2 +@end example + +@item +Display 9 inputs into 3x3 grid, +note that if inputs are of different sizes unused gaps might appear, +as not all of output video is used. +@example +xstack=inputs=9:layout=w3_0|w3_h0+h2|w3_h0|0_h4|0_0|w3+w1_0|0_h1+h2|w3+w1_h0|w3+w1_h1+h2 +@end example +@end itemize + @anchor{yadif} @section yadif @@ -17688,35 +18048,93 @@ Only deinterlace frames marked as interlaced. The default value is @code{all}. @end table -@section zoompan - -Apply Zoom & Pan effect. +@section yadif_cuda -This filter accepts the following options: +Deinterlace the input video using the @ref{yadif} algorithm, but implemented +in CUDA so that it can work as part of a GPU accelerated pipeline with nvdec +and/or nvenc. -@table @option -@item zoom, z -Set the zoom expression. Default is 1. +It accepts the following parameters: -@item x -@item y -Set the x and y expression. Default is 0. -@item d -Set the duration expression in number of frames. -This sets for how many number of frames effect will last for -single input image. +@table @option -@item s -Set the output image size, default is 'hd720'. +@item mode +The interlacing mode to adopt. It accepts one of the following values: -@item fps -Set the output frame rate, default is '25'. +@table @option +@item 0, send_frame +Output one frame for each frame. +@item 1, send_field +Output one frame for each field. +@item 2, send_frame_nospatial +Like @code{send_frame}, but it skips the spatial interlacing check. +@item 3, send_field_nospatial +Like @code{send_field}, but it skips the spatial interlacing check. @end table -Each expression can contain the following constants: +The default value is @code{send_frame}. -@table @option +@item parity +The picture field parity assumed for the input interlaced video. It accepts one +of the following values: + +@table @option +@item 0, tff +Assume the top field is first. +@item 1, bff +Assume the bottom field is first. +@item -1, auto +Enable automatic detection of field parity. +@end table + +The default value is @code{auto}. +If the interlacing is unknown or the decoder does not export this information, +top field first will be assumed. + +@item deint +Specify which frames to deinterlace. Accept one of the following +values: + +@table @option +@item 0, all +Deinterlace all frames. +@item 1, interlaced +Only deinterlace frames marked as interlaced. +@end table + +The default value is @code{all}. +@end table + +@section zoompan + +Apply Zoom & Pan effect. + +This filter accepts the following options: + +@table @option +@item zoom, z +Set the zoom expression. Default is 1. + +@item x +@item y +Set the x and y expression. Default is 0. + +@item d +Set the duration expression in number of frames. +This sets for how many number of frames effect will last for +single input image. + +@item s +Set the output image size, default is 'hd720'. + +@item fps +Set the output frame rate, default is '25'. +@end table + +Each expression can contain the following constants: + +@table @option @item in_w, iw Input width. @@ -18057,6 +18475,585 @@ pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1. @c man end VIDEO FILTERS +@chapter OpenCL Video Filters +@c man begin OPENCL VIDEO FILTERS + +Below is a description of the currently available OpenCL video filters. + +To enable compilation of these filters you need to configure FFmpeg with +@code{--enable-opencl}. + +Running OpenCL filters requires you to initialize a hardware device and to pass that device to all filters in any filter graph. +@table @option + +@item -init_hw_device opencl[=@var{name}][:@var{device}[,@var{key=value}...]] +Initialise a new hardware device of type @var{opencl} called @var{name}, using the +given device parameters. + +@item -filter_hw_device @var{name} +Pass the hardware device called @var{name} to all filters in any filter graph. + +@end table + +For more detailed information see @url{https://www.ffmpeg.org/ffmpeg.html#Advanced-Video-options} + +@itemize +@item +Example of choosing the first device on the second platform and running avgblur_opencl filter with default parameters on it. +@example +-init_hw_device opencl=gpu:1.0 -filter_hw_device gpu -i INPUT -vf "hwupload, avgblur_opencl, hwdownload" OUTPUT +@end example +@end itemize + +Since OpenCL filters are not able to access frame data in normal memory, all frame data needs to be uploaded(@ref{hwupload}) to hardware surfaces connected to the appropriate device before being used and then downloaded(@ref{hwdownload}) back to normal memory. Note that @ref{hwupload} will upload to a surface with the same layout as the software frame, so it may be necessary to add a @ref{format} filter immediately before to get the input into the right format and @ref{hwdownload} does not support all formats on the output - it may be necessary to insert an additional @ref{format} filter immediately following in the graph to get the output in a supported format. + +@section avgblur_opencl + +Apply average blur filter. + +The filter accepts the following options: + +@table @option +@item sizeX +Set horizontal radius size. +Range is @code{[1, 1024]} and default value is @code{1}. + +@item planes +Set which planes to filter. Default value is @code{0xf}, by which all planes are processed. + +@item sizeY +Set vertical radius size. Range is @code{[1, 1024]} and default value is @code{0}. If zero, @code{sizeX} value will be used. +@end table + +@subsection Example + +@itemize +@item +Apply average blur filter with horizontal and vertical size of 3, setting each pixel of the output to the average value of the 7x7 region centered on it in the input. For pixels on the edges of the image, the region does not extend beyond the image boundaries, and so out-of-range coordinates are not used in the calculations. +@example +-i INPUT -vf "hwupload, avgblur_opencl=3, hwdownload" OUTPUT +@end example +@end itemize + +@section boxblur_opencl + +Apply a boxblur algorithm to the input video. + +It accepts the following parameters: + +@table @option + +@item luma_radius, lr +@item luma_power, lp +@item chroma_radius, cr +@item chroma_power, cp +@item alpha_radius, ar +@item alpha_power, ap + +@end table + +A description of the accepted options follows. + +@table @option +@item luma_radius, lr +@item chroma_radius, cr +@item alpha_radius, ar +Set an expression for the box radius in pixels used for blurring the +corresponding input plane. + +The radius value must be a non-negative number, and must not be +greater than the value of the expression @code{min(w,h)/2} for the +luma and alpha planes, and of @code{min(cw,ch)/2} for the chroma +planes. + +Default value for @option{luma_radius} is "2". If not specified, +@option{chroma_radius} and @option{alpha_radius} default to the +corresponding value set for @option{luma_radius}. + +The expressions can contain the following constants: +@table @option +@item w +@item h +The input width and height in pixels. + +@item cw +@item ch +The input chroma image width and height in pixels. + +@item hsub +@item vsub +The horizontal and vertical chroma subsample values. For example, for the +pixel format "yuv422p", @var{hsub} is 2 and @var{vsub} is 1. +@end table + +@item luma_power, lp +@item chroma_power, cp +@item alpha_power, ap +Specify how many times the boxblur filter is applied to the +corresponding plane. + +Default value for @option{luma_power} is 2. If not specified, +@option{chroma_power} and @option{alpha_power} default to the +corresponding value set for @option{luma_power}. + +A value of 0 will disable the effect. +@end table + +@subsection Examples + +Apply boxblur filter, setting each pixel of the output to the average value of box-radiuses @var{luma_radius}, @var{chroma_radius}, @var{alpha_radius} for each plane respectively. The filter will apply @var{luma_power}, @var{chroma_power}, @var{alpha_power} times onto the corresponding plane. For pixels on the edges of the image, the radius does not extend beyond the image boundaries, and so out-of-range coordinates are not used in the calculations. + +@itemize +@item +Apply a boxblur filter with the luma, chroma, and alpha radius +set to 2 and luma, chroma, and alpha power set to 3. The filter will run 3 times with box-radius set to 2 for every plane of the image. +@example +-i INPUT -vf "hwupload, boxblur_opencl=luma_radius=2:luma_power=3, hwdownload" OUTPUT +-i INPUT -vf "hwupload, boxblur_opencl=2:3, hwdownload" OUTPUT +@end example + +@item +Apply a boxblur filter with luma radius set to 2, luma_power to 1, chroma_radius to 4, chroma_power to 5, alpha_radius to 3 and alpha_power to 7. + +For the luma plane, a 2x2 box radius will be run once. + +For the chroma plane, a 4x4 box radius will be run 5 times. + +For the alpha plane, a 3x3 box radius will be run 7 times. +@example +-i INPUT -vf "hwupload, boxblur_opencl=2:1:4:5:3:7, hwdownload" OUTPUT +@end example +@end itemize + +@section convolution_opencl + +Apply convolution of 3x3, 5x5, 7x7 matrix. + +The filter accepts the following options: + +@table @option +@item 0m +@item 1m +@item 2m +@item 3m +Set matrix for each plane. +Matrix is sequence of 9, 25 or 49 signed numbers. +Default value for each plane is @code{0 0 0 0 1 0 0 0 0}. + +@item 0rdiv +@item 1rdiv +@item 2rdiv +@item 3rdiv +Set multiplier for calculated value for each plane. +If unset or 0, it will be sum of all matrix elements. +The option value must be an float number greater or equal to @code{0.0}. Default value is @code{1.0}. + +@item 0bias +@item 1bias +@item 2bias +@item 3bias +Set bias for each plane. This value is added to the result of the multiplication. +Useful for making the overall image brighter or darker. +The option value must be an float number greater or equal to @code{0.0}. Default value is @code{0.0}. + +@end table + +@subsection Examples + +@itemize +@item +Apply sharpen: +@example +-i INPUT -vf "hwupload, convolution_opencl=0 -1 0 -1 5 -1 0 -1 0:0 -1 0 -1 5 -1 0 -1 0:0 -1 0 -1 5 -1 0 -1 0:0 -1 0 -1 5 -1 0 -1 0, hwdownload" OUTPUT +@end example + +@item +Apply blur: +@example +-i INPUT -vf "hwupload, convolution_opencl=1 1 1 1 1 1 1 1 1:1 1 1 1 1 1 1 1 1:1 1 1 1 1 1 1 1 1:1 1 1 1 1 1 1 1 1:1/9:1/9:1/9:1/9, hwdownload" OUTPUT +@end example + +@item +Apply edge enhance: +@example +-i INPUT -vf "hwupload, convolution_opencl=0 0 0 -1 1 0 0 0 0:0 0 0 -1 1 0 0 0 0:0 0 0 -1 1 0 0 0 0:0 0 0 -1 1 0 0 0 0:5:1:1:1:0:128:128:128, hwdownload" OUTPUT +@end example + +@item +Apply edge detect: +@example +-i INPUT -vf "hwupload, convolution_opencl=0 1 0 1 -4 1 0 1 0:0 1 0 1 -4 1 0 1 0:0 1 0 1 -4 1 0 1 0:0 1 0 1 -4 1 0 1 0:5:5:5:1:0:128:128:128, hwdownload" OUTPUT +@end example + +@item +Apply laplacian edge detector which includes diagonals: +@example +-i INPUT -vf "hwupload, convolution_opencl=1 1 1 1 -8 1 1 1 1:1 1 1 1 -8 1 1 1 1:1 1 1 1 -8 1 1 1 1:1 1 1 1 -8 1 1 1 1:5:5:5:1:0:128:128:0, hwdownload" OUTPUT +@end example + +@item +Apply emboss: +@example +-i INPUT -vf "hwupload, convolution_opencl=-2 -1 0 -1 1 1 0 1 2:-2 -1 0 -1 1 1 0 1 2:-2 -1 0 -1 1 1 0 1 2:-2 -1 0 -1 1 1 0 1 2, hwdownload" OUTPUT +@end example +@end itemize + +@section dilation_opencl + +Apply dilation effect to the video. + +This filter replaces the pixel by the local(3x3) maximum. + +It accepts the following options: + +@table @option +@item threshold0 +@item threshold1 +@item threshold2 +@item threshold3 +Limit the maximum change for each plane. Range is @code{[0, 65535]} and default value is @code{65535}. +If @code{0}, plane will remain unchanged. + +@item coordinates +Flag which specifies the pixel to refer to. +Range is @code{[0, 255]} and default value is @code{255}, i.e. all eight pixels are used. + +Flags to local 3x3 coordinates region centered on @code{x}: + + 1 2 3 + + 4 x 5 + + 6 7 8 +@end table + +@subsection Example + +@itemize +@item +Apply dilation filter with threshold0 set to 30, threshold1 set 40, threshold2 set to 50 and coordinates set to 231, setting each pixel of the output to the local maximum between pixels: 1, 2, 3, 6, 7, 8 of the 3x3 region centered on it in the input. If the difference between input pixel and local maximum is more then threshold of the corresponding plane, output pixel will be set to input pixel + threshold of corresponding plane. +@example +-i INPUT -vf "hwupload, dilation_opencl=30:40:50:coordinates=231, hwdownload" OUTPUT +@end example +@end itemize + +@section erosion_opencl + +Apply erosion effect to the video. + +This filter replaces the pixel by the local(3x3) minimum. + +It accepts the following options: + +@table @option +@item threshold0 +@item threshold1 +@item threshold2 +@item threshold3 +Limit the maximum change for each plane. Range is @code{[0, 65535]} and default value is @code{65535}. +If @code{0}, plane will remain unchanged. + +@item coordinates +Flag which specifies the pixel to refer to. +Range is @code{[0, 255]} and default value is @code{255}, i.e. all eight pixels are used. + +Flags to local 3x3 coordinates region centered on @code{x}: + + 1 2 3 + + 4 x 5 + + 6 7 8 +@end table + +@subsection Example + +@itemize +@item +Apply erosion filter with threshold0 set to 30, threshold1 set 40, threshold2 set to 50 and coordinates set to 231, setting each pixel of the output to the local minimum between pixels: 1, 2, 3, 6, 7, 8 of the 3x3 region centered on it in the input. If the difference between input pixel and local minimum is more then threshold of the corresponding plane, output pixel will be set to input pixel - threshold of corresponding plane. +@example +-i INPUT -vf "hwupload, erosion_opencl=30:40:50:coordinates=231, hwdownload" OUTPUT +@end example +@end itemize + +@section overlay_opencl + +Overlay one video on top of another. + +It takes two inputs and has one output. The first input is the "main" video on which the second input is overlaid. +This filter requires same memory layout for all the inputs. So, format conversion may be needed. + +The filter accepts the following options: + +@table @option + +@item x +Set the x coordinate of the overlaid video on the main video. +Default value is @code{0}. + +@item y +Set the x coordinate of the overlaid video on the main video. +Default value is @code{0}. + +@end table + +@subsection Examples + +@itemize +@item +Overlay an image LOGO at the top-left corner of the INPUT video. Both inputs are yuv420p format. +@example +-i INPUT -i LOGO -filter_complex "[0:v]hwupload[a], [1:v]format=yuv420p, hwupload[b], [a][b]overlay_opencl, hwdownload" OUTPUT +@end example +@item +The inputs have same memory layout for color channels , the overlay has additional alpha plane, like INPUT is yuv420p, and the LOGO is yuva420p. +@example +-i INPUT -i LOGO -filter_complex "[0:v]hwupload[a], [1:v]format=yuva420p, hwupload[b], [a][b]overlay_opencl, hwdownload" OUTPUT +@end example + +@end itemize + +@section prewitt_opencl + +Apply the Prewitt operator (@url{https://en.wikipedia.org/wiki/Prewitt_operator}) to input video stream. + +The filter accepts the following option: + +@table @option +@item planes +Set which planes to filter. Default value is @code{0xf}, by which all planes are processed. + +@item scale +Set value which will be multiplied with filtered result. +Range is @code{[0.0, 65535]} and default value is @code{1.0}. + +@item delta +Set value which will be added to filtered result. +Range is @code{[-65535, 65535]} and default value is @code{0.0}. +@end table + +@subsection Example + +@itemize +@item +Apply the Prewitt operator with scale set to 2 and delta set to 10. +@example +-i INPUT -vf "hwupload, prewitt_opencl=scale=2:delta=10, hwdownload" OUTPUT +@end example +@end itemize + +@section roberts_opencl +Apply the Roberts cross operator (@url{https://en.wikipedia.org/wiki/Roberts_cross}) to input video stream. + +The filter accepts the following option: + +@table @option +@item planes +Set which planes to filter. Default value is @code{0xf}, by which all planes are processed. + +@item scale +Set value which will be multiplied with filtered result. +Range is @code{[0.0, 65535]} and default value is @code{1.0}. + +@item delta +Set value which will be added to filtered result. +Range is @code{[-65535, 65535]} and default value is @code{0.0}. +@end table + +@subsection Example + +@itemize +@item +Apply the Roberts cross operator with scale set to 2 and delta set to 10 +@example +-i INPUT -vf "hwupload, roberts_opencl=scale=2:delta=10, hwdownload" OUTPUT +@end example +@end itemize + +@section sobel_opencl + +Apply the Sobel operator (@url{https://en.wikipedia.org/wiki/Sobel_operator}) to input video stream. + +The filter accepts the following option: + +@table @option +@item planes +Set which planes to filter. Default value is @code{0xf}, by which all planes are processed. + +@item scale +Set value which will be multiplied with filtered result. +Range is @code{[0.0, 65535]} and default value is @code{1.0}. + +@item delta +Set value which will be added to filtered result. +Range is @code{[-65535, 65535]} and default value is @code{0.0}. +@end table + +@subsection Example + +@itemize +@item +Apply sobel operator with scale set to 2 and delta set to 10 +@example +-i INPUT -vf "hwupload, sobel_opencl=scale=2:delta=10, hwdownload" OUTPUT +@end example +@end itemize + +@section tonemap_opencl + +Perform HDR(PQ/HLG) to SDR conversion with tone-mapping. + +It accepts the following parameters: + +@table @option +@item tonemap +Specify the tone-mapping operator to be used. Same as tonemap option in @ref{tonemap}. + +@item param +Tune the tone mapping algorithm. same as param option in @ref{tonemap}. + +@item desat +Apply desaturation for highlights that exceed this level of brightness. The +higher the parameter, the more color information will be preserved. This +setting helps prevent unnaturally blown-out colors for super-highlights, by +(smoothly) turning into white instead. This makes images feel more natural, +at the cost of reducing information about out-of-range colors. + +The default value is 0.5, and the algorithm here is a little different from +the cpu version tonemap currently. A setting of 0.0 disables this option. + +@item threshold +The tonemapping algorithm parameters is fine-tuned per each scene. And a threshold +is used to detect whether the scene has changed or not. If the distance beween +the current frame average brightness and the current running average exceeds +a threshold value, we would re-calculate scene average and peak brightness. +The default value is 0.2. + +@item format +Specify the output pixel format. + +Currently supported formats are: +@table @var +@item p010 +@item nv12 +@end table + +@item range, r +Set the output color range. + +Possible values are: +@table @var +@item tv/mpeg +@item pc/jpeg +@end table + +Default is same as input. + +@item primaries, p +Set the output color primaries. + +Possible values are: +@table @var +@item bt709 +@item bt2020 +@end table + +Default is same as input. + +@item transfer, t +Set the output transfer characteristics. + +Possible values are: +@table @var +@item bt709 +@item bt2020 +@end table + +Default is bt709. + +@item matrix, m +Set the output colorspace matrix. + +Possible value are: +@table @var +@item bt709 +@item bt2020 +@end table + +Default is same as input. + +@end table + +@subsection Example + +@itemize +@item +Convert HDR(PQ/HLG) video to bt2020-transfer-characteristic p010 format using linear operator. +@example +-i INPUT -vf "format=p010,hwupload,tonemap_opencl=t=bt2020:tonemap=linear:format=p010,hwdownload,format=p010" OUTPUT +@end example +@end itemize + +@section unsharp_opencl + +Sharpen or blur the input video. + +It accepts the following parameters: + +@table @option +@item luma_msize_x, lx +Set the luma matrix horizontal size. +Range is @code{[1, 23]} and default value is @code{5}. + +@item luma_msize_y, ly +Set the luma matrix vertical size. +Range is @code{[1, 23]} and default value is @code{5}. + +@item luma_amount, la +Set the luma effect strength. +Range is @code{[-10, 10]} and default value is @code{1.0}. + +Negative values will blur the input video, while positive values will +sharpen it, a value of zero will disable the effect. + +@item chroma_msize_x, cx +Set the chroma matrix horizontal size. +Range is @code{[1, 23]} and default value is @code{5}. + +@item chroma_msize_y, cy +Set the chroma matrix vertical size. +Range is @code{[1, 23]} and default value is @code{5}. + +@item chroma_amount, ca +Set the chroma effect strength. +Range is @code{[-10, 10]} and default value is @code{0.0}. + +Negative values will blur the input video, while positive values will +sharpen it, a value of zero will disable the effect. + +@end table + +All parameters are optional and default to the equivalent of the +string '5:5:1.0:5:5:0.0'. + +@subsection Examples + +@itemize +@item +Apply strong luma sharpen effect: +@example +-i INPUT -vf "hwupload, unsharp_opencl=luma_msize_x=7:luma_msize_y=7:luma_amount=2.5, hwdownload" OUTPUT +@end example + +@item +Apply a strong blur of both luma and chroma parameters: +@example +-i INPUT -vf "hwupload, unsharp_opencl=7:7:-2:7:7:-2, hwdownload" OUTPUT +@end example +@end itemize + +@c man end OPENCL VIDEO FILTERS + @chapter Video Sources @c man begin VIDEO SOURCES @@ -20628,6 +21625,7 @@ It accepts the following values: @item cauchy @item parzen @item poisson +@item bohman @end table Default is @code{hanning}. @@ -20730,6 +21728,14 @@ each channel is displayed using the cool color scheme each channel is displayed using the magma color scheme @item green each channel is displayed using the green color scheme +@item viridis +each channel is displayed using the viridis color scheme +@item plasma +each channel is displayed using the plasma color scheme +@item cividis +each channel is displayed using the cividis color scheme +@item terrain +each channel is displayed using the terrain color scheme @end table Default value is @samp{channel}. @@ -20786,6 +21792,7 @@ It accepts the following values: @item cauchy @item parzen @item poisson +@item bohman @end table Default value is @code{hann}. @@ -20895,6 +21902,14 @@ each channel is displayed using the cool color scheme each channel is displayed using the magma color scheme @item green each channel is displayed using the green color scheme +@item viridis +each channel is displayed using the viridis color scheme +@item plasma +each channel is displayed using the plasma color scheme +@item cividis +each channel is displayed using the cividis color scheme +@item terrain +each channel is displayed using the terrain color scheme @end table Default value is @samp{intensity}. @@ -20949,6 +21964,7 @@ It accepts the following values: @item cauchy @item parzen @item poisson +@item bohman @end table Default value is @code{hann}. diff --git a/doc/general.texi b/doc/general.texi index 4983134f7e0ca..5256e0653d2c4 100644 --- a/doc/general.texi +++ b/doc/general.texi @@ -545,6 +545,7 @@ library: @item raw VC-1 @tab X @tab X @item raw PCM A-law @tab X @tab X @item raw PCM mu-law @tab X @tab X +@item raw PCM Archimedes VIDC @tab X @tab X @item raw PCM signed 8 bit @tab X @tab X @item raw PCM signed 16 bit big-endian @tab X @tab X @item raw PCM signed 16 bit little-endian @tab X @tab X @@ -757,8 +758,8 @@ following image formats are supported: @item Autodesk Animator Flic video @tab @tab X @item Autodesk RLE @tab @tab X @tab fourcc: AASC -@item AV1 @tab @tab E - @tab Supported through external library libaom +@item AV1 @tab E @tab E + @tab Supported through external libraries libaom and libdav1d @item Avid 1:1 10-bit RGB Packer @tab X @tab X @tab fourcc: AVrp @item AVS (Audio Video Standard) video @tab @tab X @@ -1147,6 +1148,7 @@ following image formats are supported: @tab encoding supported through external library libopus @item PCM A-law @tab X @tab X @item PCM mu-law @tab X @tab X +@item PCM Archimedes VIDC @tab X @tab X @item PCM signed 8-bit planar @tab X @tab X @item PCM signed 16-bit big-endian planar @tab X @tab X @item PCM signed 16-bit little-endian planar @tab X @tab X diff --git a/doc/indevs.texi b/doc/indevs.texi index 9a9cb697d35f8..e1301ccf9743b 100644 --- a/doc/indevs.texi +++ b/doc/indevs.texi @@ -1078,6 +1078,10 @@ Defaults to @option{0.5}. When this flag is @option{false}, all video that you receive will be progressive. Defaults to @option{true}. +@item extra_ips +If is set to list of comma separated ip addresses, scan for sources not only +using mDNS but also use unicast ip addresses specified by this list. + @end table @subsection Examples @@ -1090,12 +1094,25 @@ List input devices: ffmpeg -f libndi_newtek -find_sources 1 -i dummy @end example +@item +List local and remote input devices: +@example +ffmpeg -f libndi_newtek -extra_ips "192.168.10.10" -find_sources 1 -i dummy +@end example + @item Restream to NDI: @example ffmpeg -f libndi_newtek -i "DEV-5.INTERNAL.M1STEREO.TV (NDI_SOURCE_NAME_1)" -f libndi_newtek -y NDI_SOURCE_NAME_2 @end example +@item +Restream remote NDI to local NDI: +@example +ffmpeg -f libndi_newtek -extra_ips "192.168.10.10" -i "DEV-5.REMOTE.M1STEREO.TV (NDI_SOURCE_NAME_1)" -f libndi_newtek -y NDI_SOURCE_NAME_2 +@end example + + @end itemize @section openal diff --git a/doc/muxers.texi b/doc/muxers.texi index f18543e83d842..62f4091e31ea5 100644 --- a/doc/muxers.texi +++ b/doc/muxers.texi @@ -764,17 +764,17 @@ Possible values: @table @samp @item mpegts -If this flag is set, the hls segment files will format to mpegts. -the mpegts files is used in all hls versions. +Output segment files in MPEG-2 Transport Stream format. This is +compatible with all HLS versions. @item fmp4 -If this flag is set, the hls segment files will format to fragment mp4 looks like dash. -the fmp4 files is used in hls after version 7. +Output segment files in fragmented MP4 format, similar to MPEG-DASH. +fmp4 files may be used in HLS version 7 and above. @end table @item hls_fmp4_init_filename @var{filename} -set filename to the fragment files header file, default filename is @file{init.mp4}. +Set filename to the fragment files header file, default filename is @file{init.mp4}. When @code{var_stream_map} is set with two or more variant streams, the @var{filename} pattern must contain the string "%v", this string specifies diff --git a/doc/outdevs.texi b/doc/outdevs.texi index 2518f9b55950e..7509ac695db27 100644 --- a/doc/outdevs.texi +++ b/doc/outdevs.texi @@ -398,6 +398,10 @@ Set the SDL window size, can be a string of the form If not specified it defaults to the size of the input video, downscaled according to the aspect ratio. +@item window_x +@item window_y +Set the position of the window on the screen. + @item window_fullscreen Set fullscreen mode when non-zero value is provided. Default value is zero. diff --git a/doc/protocols.texi b/doc/protocols.texi index b34f29eebf9aa..fb7725e058c70 100644 --- a/doc/protocols.texi +++ b/doc/protocols.texi @@ -1306,10 +1306,10 @@ set by the peer side. Before version 1.3.0 this option is only available as @option{latency}. @item recv_buffer_size=@var{bytes} -Set receive buffer size, expressed in bytes. +Set UDP receive buffer size, expressed in bytes. @item send_buffer_size=@var{bytes} -Set send buffer size, expressed in bytes. +Set UDP send buffer size, expressed in bytes. @item rw_timeout Set raise error timeout for read/write optations. @@ -1329,6 +1329,87 @@ have no chance of being delivered in time. It was automatically enabled in the sender if the receiver supports it. +@item sndbuf=@var{bytes} +Set send buffer size, expressed in bytes. + +@item rcvbuf=@var{bytes} +Set receive buffer size, expressed in bytes. + +Receive buffer must not be greater than @option{ffs}. + +@item lossmaxttl=@var{packets} +The value up to which the Reorder Tolerance may grow. When +Reorder Tolerance is > 0, then packet loss report is delayed +until that number of packets come in. Reorder Tolerance +increases every time a "belated" packet has come, but it +wasn't due to retransmission (that is, when UDP packets tend +to come out of order), with the difference between the latest +sequence and this packet's sequence, and not more than the +value of this option. By default it's 0, which means that this +mechanism is turned off, and the loss report is always sent +immediately upon experiencing a "gap" in sequences. + +@item minversion +The minimum SRT version that is required from the peer. A connection +to a peer that does not satisfy the minimum version requirement +will be rejected. + +The version format in hex is 0xXXYYZZ for x.y.z in human readable +form. + +@item streamid=@var{string} +A string limited to 512 characters that can be set on the socket prior +to connecting. This stream ID will be able to be retrieved by the +listener side from the socket that is returned from srt_accept and +was connected by a socket with that set stream ID. SRT does not enforce +any special interpretation of the contents of this string. +This option doesn’t make sense in Rendezvous connection; the result +might be that simply one side will override the value from the other +side and it’s the matter of luck which one would win + +@item smoother=@var{live|file} +The type of Smoother used for the transmission for that socket, which +is responsible for the transmission and congestion control. The Smoother +type must be exactly the same on both connecting parties, otherwise +the connection is rejected. + +@item messageapi=@var{1|0} +When set, this socket uses the Message API, otherwise it uses Buffer +API. Note that in live mode (see @option{transtype}) there’s only +message API available. In File mode you can chose to use one of two modes: + +Stream API (default, when this option is false). In this mode you may +send as many data as you wish with one sending instruction, or even use +dedicated functions that read directly from a file. The internal facility +will take care of any speed and congestion control. When receiving, you +can also receive as many data as desired, the data not extracted will be +waiting for the next call. There is no boundary between data portions in +the Stream mode. + +Message API. In this mode your single sending instruction passes exactly +one piece of data that has boundaries (a message). Contrary to Live mode, +this message may span across multiple UDP packets and the only size +limitation is that it shall fit as a whole in the sending buffer. The +receiver shall use as large buffer as necessary to receive the message, +otherwise the message will not be given up. When the message is not +complete (not all packets received or there was a packet loss) it will +not be given up. + +@item transtype=@var{live|file} +Sets the transmission type for the socket, in particular, setting this +option sets multiple other parameters to their default values as required +for a particular transmission type. + +live: Set options as for live transmission. In this mode, you should +send by one sending instruction only so many data that fit in one UDP packet, +and limited to the value defined first in @option{payload_size} (1316 is +default in this mode). There is no speed control in this mode, only the +bandwidth control, if configured, in order to not exceed the bandwidth with +the overhead transmission (retransmitted and control packets). + +file: Set options as for non-live transmission. See @option{messageapi} +for further explanations + @end table For more information see: @url{https://github.com/Haivision/srt}. diff --git a/libavcodec/Makefile b/libavcodec/Makefile index a97055ef3fa6a..8643da8f2be54 100644 --- a/libavcodec/Makefile +++ b/libavcodec/Makefile @@ -794,6 +794,8 @@ OBJS-$(CONFIG_PCM_U32BE_DECODER) += pcm.o OBJS-$(CONFIG_PCM_U32BE_ENCODER) += pcm.o OBJS-$(CONFIG_PCM_U32LE_DECODER) += pcm.o OBJS-$(CONFIG_PCM_U32LE_ENCODER) += pcm.o +OBJS-$(CONFIG_PCM_VIDC_DECODER) += pcm.o +OBJS-$(CONFIG_PCM_VIDC_ENCODER) += pcm.o OBJS-$(CONFIG_PCM_ZORK_DECODER) += pcm.o OBJS-$(CONFIG_ADPCM_4XM_DECODER) += adpcm.o adpcm_data.o @@ -954,6 +956,7 @@ OBJS-$(CONFIG_LIBAOM_AV1_ENCODER) += libaomenc.o OBJS-$(CONFIG_LIBCELT_DECODER) += libcelt_dec.o OBJS-$(CONFIG_LIBCODEC2_DECODER) += libcodec2.o codec2utils.o OBJS-$(CONFIG_LIBCODEC2_ENCODER) += libcodec2.o codec2utils.o +OBJS-$(CONFIG_LIBDAV1D_DECODER) += libdav1d.o OBJS-$(CONFIG_LIBDAVS2_DECODER) += libdavs2.o OBJS-$(CONFIG_LIBFDK_AAC_DECODER) += libfdk-aacdec.o OBJS-$(CONFIG_LIBFDK_AAC_ENCODER) += libfdk-aacenc.o diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c index c0b4d56d0d475..2c17db5a70c34 100644 --- a/libavcodec/allcodecs.c +++ b/libavcodec/allcodecs.c @@ -552,6 +552,8 @@ extern AVCodec ff_pcm_u32be_encoder; extern AVCodec ff_pcm_u32be_decoder; extern AVCodec ff_pcm_u32le_encoder; extern AVCodec ff_pcm_u32le_decoder; +extern AVCodec ff_pcm_vidc_encoder; +extern AVCodec ff_pcm_vidc_decoder; extern AVCodec ff_pcm_zork_decoder; /* DPCM codecs */ @@ -674,6 +676,7 @@ extern AVCodec ff_libaom_av1_encoder; extern AVCodec ff_libcelt_decoder; extern AVCodec ff_libcodec2_encoder; extern AVCodec ff_libcodec2_decoder; +extern AVCodec ff_libdav1d_decoder; extern AVCodec ff_libdavs2_decoder; extern AVCodec ff_libfdk_aac_encoder; extern AVCodec ff_libfdk_aac_decoder; diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index 705a3ce4f3ef0..3922e89331f07 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h @@ -491,6 +491,7 @@ enum AVCodecID { AV_CODEC_ID_PCM_S64BE, AV_CODEC_ID_PCM_F16LE, AV_CODEC_ID_PCM_F24LE, + AV_CODEC_ID_PCM_VIDC, /* various ADPCM codecs */ AV_CODEC_ID_ADPCM_IMA_QT = 0x11000, @@ -1070,6 +1071,13 @@ typedef struct RcOverride{ */ #define AV_CODEC_CAP_HYBRID (1 << 19) +/** + * This codec takes the reordered_opaque field from input AVFrames + * and returns it in the corresponding field in AVCodecContext after + * encoding. + */ +#define AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE (1 << 20) + /** * Pan Scan area. * This specifies the area which should be displayed. @@ -2676,7 +2684,10 @@ typedef struct AVCodecContext { /** * opaque 64-bit number (generally a PTS) that will be reordered and * output in AVFrame.reordered_opaque - * - encoding: unused + * - encoding: Set by libavcodec to the reordered_opaque of the input + * frame corresponding to the last returned packet. Only + * supported by encoders with the + * AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE capability. * - decoding: Set by user. */ int64_t reordered_opaque; @@ -2960,6 +2971,13 @@ typedef struct AVCodecContext { #define FF_PROFILE_SBC_MSBC 1 +#define FF_PROFILE_PRORES_PROXY 0 +#define FF_PROFILE_PRORES_LT 1 +#define FF_PROFILE_PRORES_STANDARD 2 +#define FF_PROFILE_PRORES_HQ 3 +#define FF_PROFILE_PRORES_4444 4 +#define FF_PROFILE_PRORES_XQ 5 + /** * level * - encoding: Set by user. diff --git a/libavcodec/cavsdec.c b/libavcodec/cavsdec.c index c7fff67c06c66..5f3b354518e64 100644 --- a/libavcodec/cavsdec.c +++ b/libavcodec/cavsdec.c @@ -591,14 +591,21 @@ static int decode_residual_block(AVSContext *h, GetBitContext *gb, } -static inline void decode_residual_chroma(AVSContext *h) +static inline int decode_residual_chroma(AVSContext *h) { - if (h->cbp & (1 << 4)) - decode_residual_block(h, &h->gb, chroma_dec, 0, + if (h->cbp & (1 << 4)) { + int ret = decode_residual_block(h, &h->gb, chroma_dec, 0, ff_cavs_chroma_qp[h->qp], h->cu, h->c_stride); - if (h->cbp & (1 << 5)) - decode_residual_block(h, &h->gb, chroma_dec, 0, + if (ret < 0) + return ret; + } + if (h->cbp & (1 << 5)) { + int ret = decode_residual_block(h, &h->gb, chroma_dec, 0, ff_cavs_chroma_qp[h->qp], h->cv, h->c_stride); + if (ret < 0) + return ret; + } + return 0; } static inline int decode_residual_inter(AVSContext *h) @@ -649,6 +656,7 @@ static int decode_mb_i(AVSContext *h, int cbp_code) uint8_t top[18]; uint8_t *left = NULL; uint8_t *d; + int ret; ff_cavs_init_mb(h); @@ -692,8 +700,11 @@ static int decode_mb_i(AVSContext *h, int cbp_code) ff_cavs_load_intra_pred_luma(h, top, &left, block); h->intra_pred_l[h->pred_mode_Y[scan3x3[block]]] (d, top, left, h->l_stride); - if (h->cbp & (1<qp, d, h->l_stride); + if (h->cbp & (1<qp, d, h->l_stride); + if (ret < 0) + return ret; + } } /* chroma intra prediction */ @@ -703,7 +714,9 @@ static int decode_mb_i(AVSContext *h, int cbp_code) h->intra_pred_c[pred_mode_uv](h->cv, &h->top_border_v[h->mbx * 10], h->left_border_v, h->c_stride); - decode_residual_chroma(h); + ret = decode_residual_chroma(h); + if (ret < 0) + return ret; ff_cavs_filter(h, I_8X8); set_mv_intra(h); return 0; diff --git a/libavcodec/cbs_av1.c b/libavcodec/cbs_av1.c index 9bac9dde0912a..ff32a6fca595b 100644 --- a/libavcodec/cbs_av1.c +++ b/libavcodec/cbs_av1.c @@ -996,7 +996,10 @@ static int cbs_av1_read_unit(CodedBitstreamContext *ctx, case AV1_OBU_REDUNDANT_FRAME_HEADER: { err = cbs_av1_read_frame_header_obu(ctx, &gbc, - &obu->obu.frame_header); + &obu->obu.frame_header, + obu->header.obu_type == + AV1_OBU_REDUNDANT_FRAME_HEADER, + unit->data_ref); if (err < 0) return err; } @@ -1016,7 +1019,8 @@ static int cbs_av1_read_unit(CodedBitstreamContext *ctx, break; case AV1_OBU_FRAME: { - err = cbs_av1_read_frame_obu(ctx, &gbc, &obu->obu.frame); + err = cbs_av1_read_frame_obu(ctx, &gbc, &obu->obu.frame, + unit->data_ref); if (err < 0) return err; @@ -1124,7 +1128,10 @@ static int cbs_av1_write_obu(CodedBitstreamContext *ctx, case AV1_OBU_REDUNDANT_FRAME_HEADER: { err = cbs_av1_write_frame_header_obu(ctx, pbc, - &obu->obu.frame_header); + &obu->obu.frame_header, + obu->header.obu_type == + AV1_OBU_REDUNDANT_FRAME_HEADER, + NULL); if (err < 0) return err; } @@ -1141,7 +1148,7 @@ static int cbs_av1_write_obu(CodedBitstreamContext *ctx, break; case AV1_OBU_FRAME: { - err = cbs_av1_write_frame_obu(ctx, pbc, &obu->obu.frame); + err = cbs_av1_write_frame_obu(ctx, pbc, &obu->obu.frame, NULL); if (err < 0) return err; @@ -1179,7 +1186,7 @@ static int cbs_av1_write_obu(CodedBitstreamContext *ctx, if (err < 0) return err; end_pos = put_bits_count(pbc); - obu->obu_size = (end_pos - start_pos + 7) / 8; + obu->obu_size = header_size = (end_pos - start_pos + 7) / 8; } else { // Empty OBU. obu->obu_size = 0; @@ -1302,6 +1309,7 @@ static void cbs_av1_close(CodedBitstreamContext *ctx) CodedBitstreamAV1Context *priv = ctx->priv_data; av_buffer_unref(&priv->sequence_header_ref); + av_buffer_unref(&priv->frame_header_ref); av_freep(&priv->write_buffer); } diff --git a/libavcodec/cbs_av1.h b/libavcodec/cbs_av1.h index 0d7fd761f1ead..f662265f755f0 100644 --- a/libavcodec/cbs_av1.h +++ b/libavcodec/cbs_av1.h @@ -87,8 +87,8 @@ typedef struct AV1RawSequenceHeader { uint8_t seq_level_idx[AV1_MAX_OPERATING_POINTS]; uint8_t seq_tier[AV1_MAX_OPERATING_POINTS]; uint8_t decoder_model_present_for_this_op[AV1_MAX_OPERATING_POINTS]; - uint8_t decoder_buffer_delay[AV1_MAX_OPERATING_POINTS]; - uint8_t encoder_buffer_delay[AV1_MAX_OPERATING_POINTS]; + uint32_t decoder_buffer_delay[AV1_MAX_OPERATING_POINTS]; + uint32_t encoder_buffer_delay[AV1_MAX_OPERATING_POINTS]; uint8_t low_delay_mode_flag[AV1_MAX_OPERATING_POINTS]; uint8_t initial_display_delay_present_for_this_op[AV1_MAX_OPERATING_POINTS]; uint8_t initial_display_delay_minus_1[AV1_MAX_OPERATING_POINTS]; @@ -161,7 +161,7 @@ typedef struct AV1RawFrameHeader { uint8_t render_width_minus_1; uint8_t render_height_minus_1; - uint8_t found_ref; + uint8_t found_ref[AV1_REFS_PER_FRAME]; uint8_t refresh_frame_flags; uint8_t allow_intrabc; @@ -399,7 +399,10 @@ typedef struct CodedBitstreamAV1Context { AV1RawSequenceHeader *sequence_header; AVBufferRef *sequence_header_ref; - int seen_frame_header; + int seen_frame_header; + AVBufferRef *frame_header_ref; + uint8_t *frame_header; + size_t frame_header_size; int temporal_id; int spatial_id; diff --git a/libavcodec/cbs_av1_syntax_template.c b/libavcodec/cbs_av1_syntax_template.c index 84ab2973ab072..0da79b615d8c8 100644 --- a/libavcodec/cbs_av1_syntax_template.c +++ b/libavcodec/cbs_av1_syntax_template.c @@ -417,8 +417,8 @@ static int FUNC(frame_size_with_refs)(CodedBitstreamContext *ctx, RWContext *rw, int i, err; for (i = 0; i < AV1_REFS_PER_FRAME; i++) { - flag(found_ref); - if (current->found_ref) { + flags(found_ref[i], 1, i); + if (current->found_ref[i]) { AV1ReferenceFrameState *ref = &priv->ref[current->ref_frame_idx[i]]; @@ -439,7 +439,7 @@ static int FUNC(frame_size_with_refs)(CodedBitstreamContext *ctx, RWContext *rw, } } - if (current->found_ref == 0) { + if (i >= AV1_REFS_PER_FRAME) { CHECK(FUNC(frame_size)(ctx, rw, current)); CHECK(FUNC(render_size)(ctx, rw, current)); } else { @@ -1463,24 +1463,90 @@ static int FUNC(uncompressed_header)(CodedBitstreamContext *ctx, RWContext *rw, } static int FUNC(frame_header_obu)(CodedBitstreamContext *ctx, RWContext *rw, - AV1RawFrameHeader *current) + AV1RawFrameHeader *current, int redundant, + AVBufferRef *rw_buffer_ref) { CodedBitstreamAV1Context *priv = ctx->priv_data; - int err; - - HEADER("Frame Header"); + int start_pos, fh_bits, fh_bytes, err; + uint8_t *fh_start; if (priv->seen_frame_header) { - // Nothing to do. + if (!redundant) { + av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid repeated " + "frame header OBU.\n"); + return AVERROR_INVALIDDATA; + } else { + GetBitContext fh; + size_t i, b; + uint32_t val; + + HEADER("Redundant Frame Header"); + + av_assert0(priv->frame_header_ref && priv->frame_header); + + init_get_bits(&fh, priv->frame_header, + priv->frame_header_size); + for (i = 0; i < priv->frame_header_size; i += 8) { + b = FFMIN(priv->frame_header_size - i, 8); + val = get_bits(&fh, b); + xf(b, frame_header_copy[i], + val, val, val, 1, i / 8); + } + } } else { + if (redundant) + HEADER("Redundant Frame Header (used as Frame Header)"); + else + HEADER("Frame Header"); + priv->seen_frame_header = 1; +#ifdef READ + start_pos = get_bits_count(rw); +#else + start_pos = put_bits_count(rw); +#endif + CHECK(FUNC(uncompressed_header)(ctx, rw, current)); if (current->show_existing_frame) { priv->seen_frame_header = 0; } else { priv->seen_frame_header = 1; + + av_buffer_unref(&priv->frame_header_ref); + +#ifdef READ + fh_bits = get_bits_count(rw) - start_pos; + fh_start = (uint8_t*)rw->buffer + start_pos / 8; +#else + // Need to flush the bitwriter so that we can copy its output, + // but use a copy so we don't affect the caller's structure. + { + PutBitContext tmp = *rw; + flush_put_bits(&tmp); + } + + fh_bits = put_bits_count(rw) - start_pos; + fh_start = rw->buf + start_pos / 8; +#endif + fh_bytes = (fh_bits + 7) / 8; + + priv->frame_header_size = fh_bits; + + if (rw_buffer_ref) { + priv->frame_header_ref = av_buffer_ref(rw_buffer_ref); + if (!priv->frame_header_ref) + return AVERROR(ENOMEM); + priv->frame_header = fh_start; + } else { + priv->frame_header_ref = + av_buffer_alloc(fh_bytes + AV_INPUT_BUFFER_PADDING_SIZE); + if (!priv->frame_header_ref) + return AVERROR(ENOMEM); + priv->frame_header = priv->frame_header_ref->data; + memcpy(priv->frame_header, fh_start, fh_bytes); + } } } @@ -1524,11 +1590,13 @@ static int FUNC(tile_group_obu)(CodedBitstreamContext *ctx, RWContext *rw, } static int FUNC(frame_obu)(CodedBitstreamContext *ctx, RWContext *rw, - AV1RawFrame *current) + AV1RawFrame *current, + AVBufferRef *rw_buffer_ref) { int err; - CHECK(FUNC(frame_header_obu)(ctx, rw, ¤t->header)); + CHECK(FUNC(frame_header_obu)(ctx, rw, ¤t->header, + 0, rw_buffer_ref)); CHECK(FUNC(byte_alignment)(ctx, rw)); diff --git a/libavcodec/cbs_h2645.c b/libavcodec/cbs_h2645.c index 4b31601c0ff53..e55bd00183183 100644 --- a/libavcodec/cbs_h2645.c +++ b/libavcodec/cbs_h2645.c @@ -319,7 +319,8 @@ static int cbs_h2645_read_more_rbsp_data(GetBitContext *gbc) #define byte_alignment(rw) (get_bits_count(rw) % 8) #define allocate(name, size) do { \ - name ## _ref = av_buffer_allocz(size); \ + name ## _ref = av_buffer_allocz(size + \ + AV_INPUT_BUFFER_PADDING_SIZE); \ if (!name ## _ref) \ return AVERROR(ENOMEM); \ name = name ## _ref->data; \ @@ -872,7 +873,21 @@ static int cbs_h264_read_nal_unit(CodedBitstreamContext *ctx, break; case H264_NAL_END_SEQUENCE: - return 0; + case H264_NAL_END_STREAM: + { + err = ff_cbs_alloc_unit_content(ctx, unit, + sizeof(H264RawNALUnitHeader), + NULL); + if (err < 0) + return err; + + err = (unit->type == H264_NAL_END_SEQUENCE ? + cbs_h264_read_end_of_sequence : + cbs_h264_read_end_of_stream)(ctx, &gbc, unit->content); + if (err < 0) + return err; + } + break; default: return AVERROR(ENOSYS); @@ -1147,6 +1162,22 @@ static int cbs_h264_write_nal_unit(CodedBitstreamContext *ctx, } break; + case H264_NAL_END_SEQUENCE: + { + err = cbs_h264_write_end_of_sequence(ctx, pbc, unit->content); + if (err < 0) + return err; + } + break; + + case H264_NAL_END_STREAM: + { + err = cbs_h264_write_end_of_stream(ctx, pbc, unit->content); + if (err < 0) + return err; + } + break; + default: av_log(ctx->log_ctx, AV_LOG_ERROR, "Write unimplemented for " "NAL unit type %"PRIu32".\n", unit->type); diff --git a/libavcodec/cbs_h264_syntax_template.c b/libavcodec/cbs_h264_syntax_template.c index 1c8d7d5eae2be..4da4c5da67360 100644 --- a/libavcodec/cbs_h264_syntax_template.c +++ b/libavcodec/cbs_h264_syntax_template.c @@ -513,6 +513,8 @@ static int FUNC(sei_buffering_period)(CodedBitstreamContext *ctx, RWContext *rw, const H264RawSPS *sps; int err, i, length; + HEADER("Buffering Period"); + ue(seq_parameter_set_id, 0, 31); sps = h264->sps[current->seq_parameter_set_id]; @@ -551,10 +553,9 @@ static int FUNC(sei_buffering_period)(CodedBitstreamContext *ctx, RWContext *rw, } static int FUNC(sei_pic_timestamp)(CodedBitstreamContext *ctx, RWContext *rw, - H264RawSEIPicTimestamp *current) + H264RawSEIPicTimestamp *current, + const H264RawSPS *sps) { - CodedBitstreamH264Context *h264 = ctx->priv_data; - const H264RawSPS *sps; uint8_t time_offset_length; int err; @@ -583,13 +584,6 @@ static int FUNC(sei_pic_timestamp)(CodedBitstreamContext *ctx, RWContext *rw, } } - sps = h264->active_sps; - if (!sps) { - av_log(ctx->log_ctx, AV_LOG_ERROR, - "No active SPS for pic_timestamp.\n"); - return AVERROR_INVALIDDATA; - } - if (sps->vui.nal_hrd_parameters_present_flag) time_offset_length = sps->vui.nal_hrd_parameters.time_offset_length; else if (sps->vui.vcl_hrd_parameters_present_flag) @@ -613,6 +607,8 @@ static int FUNC(sei_pic_timing)(CodedBitstreamContext *ctx, RWContext *rw, const H264RawSPS *sps; int err; + HEADER("Picture Timing"); + sps = h264->active_sps; if (!sps) { // If there is exactly one possible SPS but it is not yet active @@ -669,7 +665,8 @@ static int FUNC(sei_pic_timing)(CodedBitstreamContext *ctx, RWContext *rw, for (i = 0; i < num_clock_ts[current->pic_struct]; i++) { flags(clock_timestamp_flag[i], 1, i); if (current->clock_timestamp_flag[i]) - CHECK(FUNC(sei_pic_timestamp)(ctx, rw, ¤t->timestamp[i])); + CHECK(FUNC(sei_pic_timestamp)(ctx, rw, + ¤t->timestamp[i], sps)); } } @@ -681,6 +678,8 @@ static int FUNC(sei_pan_scan_rect)(CodedBitstreamContext *ctx, RWContext *rw, { int err, i; + HEADER("Pan-Scan Rectangle"); + ue(pan_scan_rect_id, 0, UINT32_MAX - 1); flag(pan_scan_rect_cancel_flag); @@ -706,6 +705,8 @@ static int FUNC(sei_user_data_registered)(CodedBitstreamContext *ctx, RWContext { int err, i, j; + HEADER("User Data Registered ITU-T T.35"); + u(8, itu_t_t35_country_code, 0x00, 0xff); if (current->itu_t_t35_country_code != 0xff) i = 1; @@ -725,7 +726,7 @@ static int FUNC(sei_user_data_registered)(CodedBitstreamContext *ctx, RWContext *payload_size = i + current->data_length; #endif - allocate(current->data, current->data_length + AV_INPUT_BUFFER_PADDING_SIZE); + allocate(current->data, current->data_length); for (j = 0; j < current->data_length; j++) xu(8, itu_t_t35_payload_byte[i], current->data[j], 0x00, 0xff, 1, i + j); @@ -738,6 +739,8 @@ static int FUNC(sei_user_data_unregistered)(CodedBitstreamContext *ctx, RWContex { int err, i; + HEADER("User Data Unregistered"); + #ifdef READ if (*payload_size < 16) { av_log(ctx->log_ctx, AV_LOG_ERROR, @@ -765,6 +768,8 @@ static int FUNC(sei_recovery_point)(CodedBitstreamContext *ctx, RWContext *rw, { int err; + HEADER("Recovery Point"); + ue(recovery_frame_cnt, 0, 65535); flag(exact_match_flag); flag(broken_link_flag); @@ -778,6 +783,8 @@ static int FUNC(sei_display_orientation)(CodedBitstreamContext *ctx, RWContext * { int err; + HEADER("Display Orientation"); + flag(display_orientation_cancel_flag); if (!current->display_orientation_cancel_flag) { flag(hor_flip); @@ -795,6 +802,8 @@ static int FUNC(sei_mastering_display_colour_volume)(CodedBitstreamContext *ctx, { int err, c; + HEADER("Mastering Display Colour Volume"); + for (c = 0; c < 3; c++) { us(16, display_primaries_x[c], 0, 50000, 1, c); us(16, display_primaries_y[c], 0, 50000, 1, c); @@ -1181,11 +1190,10 @@ static int FUNC(slice_header)(CodedBitstreamContext *ctx, RWContext *rw, "in the same access unit.\n"); return AVERROR_INVALIDDATA; } + idr_pic_flag = h264->last_slice_nal_unit_type == H264_NAL_IDR_SLICE; } else { - h264->last_slice_nal_unit_type = - current->nal_unit_header.nal_unit_type; + idr_pic_flag = current->nal_unit_header.nal_unit_type == H264_NAL_IDR_SLICE; } - idr_pic_flag = h264->last_slice_nal_unit_type == H264_NAL_IDR_SLICE; ue(first_mb_in_slice, 0, H264_MAX_MB_PIC_SIZE - 1); ue(slice_type, 0, 9); @@ -1263,6 +1271,13 @@ static int FUNC(slice_header)(CodedBitstreamContext *ctx, RWContext *rw, if (pps->redundant_pic_cnt_present_flag) ue(redundant_pic_cnt, 0, 127); + else + infer(redundant_pic_cnt, 0); + + if (current->nal_unit_header.nal_unit_type != H264_NAL_AUXILIARY_SLICE + && !current->redundant_pic_cnt) + h264->last_slice_nal_unit_type = + current->nal_unit_header.nal_unit_type; if (slice_type_b) flag(direct_spatial_mv_pred_flag); @@ -1375,3 +1390,21 @@ static int FUNC(filler)(CodedBitstreamContext *ctx, RWContext *rw, return 0; } + +static int FUNC(end_of_sequence)(CodedBitstreamContext *ctx, RWContext *rw, + H264RawNALUnitHeader *current) +{ + HEADER("End of Sequence"); + + return FUNC(nal_unit_header)(ctx, rw, current, + 1 << H264_NAL_END_SEQUENCE); +} + +static int FUNC(end_of_stream)(CodedBitstreamContext *ctx, RWContext *rw, + H264RawNALUnitHeader *current) +{ + HEADER("End of Stream"); + + return FUNC(nal_unit_header)(ctx, rw, current, + 1 << H264_NAL_END_STREAM); +} diff --git a/libavcodec/cbs_h265_syntax_template.c b/libavcodec/cbs_h265_syntax_template.c index d4e4f7b1c2459..e43f3caf99a85 100644 --- a/libavcodec/cbs_h265_syntax_template.c +++ b/libavcodec/cbs_h265_syntax_template.c @@ -130,6 +130,11 @@ static int FUNC(profile_tier_level)(CodedBitstreamContext *ctx, RWContext *rw, fixed(24, general_reserved_zero_34bits, 0); fixed(10, general_reserved_zero_34bits, 0); } + } else if (profile_compatible(2)) { + fixed(7, general_reserved_zero_7bits, 0); + flag(general_one_picture_only_constraint_flag); + fixed(24, general_reserved_zero_35bits, 0); + fixed(11, general_reserved_zero_35bits, 0); } else { fixed(24, general_reserved_zero_43bits, 0); fixed(19, general_reserved_zero_43bits, 0); diff --git a/libavcodec/cbs_vp9.c b/libavcodec/cbs_vp9.c index 7498be4b73b40..c03ce986c07e5 100644 --- a/libavcodec/cbs_vp9.c +++ b/libavcodec/cbs_vp9.c @@ -314,6 +314,12 @@ static int cbs_vp9_write_le(CodedBitstreamContext *ctx, PutBitContext *pbc, current->name = prob; \ } while (0) +#define fixed(width, name, value) do { \ + av_unused uint32_t fixed_value = value; \ + CHECK(ff_cbs_read_unsigned(ctx, rw, width, #name, \ + 0, &fixed_value, value, value)); \ + } while (0) + #define infer(name, value) do { \ current->name = value; \ } while (0) @@ -331,6 +337,7 @@ static int cbs_vp9_write_le(CodedBitstreamContext *ctx, PutBitContext *pbc, #undef fle #undef delta_q #undef prob +#undef fixed #undef infer #undef byte_alignment @@ -370,6 +377,11 @@ static int cbs_vp9_write_le(CodedBitstreamContext *ctx, PutBitContext *pbc, xf(8, name.prob, current->name, subs, __VA_ARGS__); \ } while (0) +#define fixed(width, name, value) do { \ + CHECK(ff_cbs_write_unsigned(ctx, rw, width, #name, \ + 0, value, value, value)); \ + } while (0) + #define infer(name, value) do { \ if (current->name != (value)) { \ av_log(ctx->log_ctx, AV_LOG_WARNING, "Warning: " \ @@ -392,6 +404,7 @@ static int cbs_vp9_write_le(CodedBitstreamContext *ctx, PutBitContext *pbc, #undef fle #undef delta_q #undef prob +#undef fixed #undef infer #undef byte_alignment diff --git a/libavcodec/cbs_vp9.h b/libavcodec/cbs_vp9.h index 5b99c90c2e68e..4c9b2f880d32e 100644 --- a/libavcodec/cbs_vp9.h +++ b/libavcodec/cbs_vp9.h @@ -84,7 +84,6 @@ typedef struct VP9RawFrameHeader { uint8_t frame_marker; uint8_t profile_low_bit; uint8_t profile_high_bit; - uint8_t profile_reserved_zero; uint8_t show_existing_frame; uint8_t frame_to_show_map_idx; @@ -99,7 +98,6 @@ typedef struct VP9RawFrameHeader { uint8_t color_range; uint8_t subsampling_x; uint8_t subsampling_y; - uint8_t color_config_reserved_zero; uint8_t refresh_frame_flags; @@ -183,8 +181,17 @@ typedef struct VP9RawSuperframe { VP9RawSuperframeIndex index; } VP9RawSuperframe; +typedef struct VP9ReferenceFrameState { + int frame_width; // RefFrameWidth + int frame_height; // RefFrameHeight + int subsampling_x; // RefSubsamplingX + int subsampling_y; // RefSubsamplingY + int bit_depth; // RefBitDepth +} VP9ReferenceFrameState; typedef struct CodedBitstreamVP9Context { + int profile; + // Frame dimensions in 8x8 mode info blocks. uint16_t mi_cols; uint16_t mi_rows; @@ -192,6 +199,15 @@ typedef struct CodedBitstreamVP9Context { uint16_t sb64_cols; uint16_t sb64_rows; + int frame_width; + int frame_height; + + uint8_t subsampling_x; + uint8_t subsampling_y; + int bit_depth; + + VP9ReferenceFrameState ref[VP9_NUM_REF_FRAMES]; + // Write buffer. uint8_t *write_buffer; size_t write_buffer_size; diff --git a/libavcodec/cbs_vp9_syntax_template.c b/libavcodec/cbs_vp9_syntax_template.c index 0db0f52a6dbaa..898cede329cfd 100644 --- a/libavcodec/cbs_vp9_syntax_template.c +++ b/libavcodec/cbs_vp9_syntax_template.c @@ -43,10 +43,14 @@ static int FUNC(frame_sync_code)(CodedBitstreamContext *ctx, RWContext *rw, static int FUNC(color_config)(CodedBitstreamContext *ctx, RWContext *rw, VP9RawFrameHeader *current, int profile) { + CodedBitstreamVP9Context *vp9 = ctx->priv_data; int err; - if (profile >= 2) + if (profile >= 2) { f(1, ten_or_twelve_bit); + vp9->bit_depth = current->ten_or_twelve_bit ? 12 : 10; + } else + vp9->bit_depth = 8; f(3, color_space); @@ -55,7 +59,7 @@ static int FUNC(color_config)(CodedBitstreamContext *ctx, RWContext *rw, if (profile == 1 || profile == 3) { f(1, subsampling_x); f(1, subsampling_y); - f(1, color_config_reserved_zero); + fixed(1, reserved_zero, 0); } else { infer(subsampling_x, 1); infer(subsampling_y, 1); @@ -65,9 +69,13 @@ static int FUNC(color_config)(CodedBitstreamContext *ctx, RWContext *rw, if (profile == 1 || profile == 3) { infer(subsampling_x, 0); infer(subsampling_y, 0); + fixed(1, reserved_zero, 0); } } + vp9->subsampling_x = current->subsampling_x; + vp9->subsampling_y = current->subsampling_y; + return 0; } @@ -80,8 +88,11 @@ static int FUNC(frame_size)(CodedBitstreamContext *ctx, RWContext *rw, f(16, frame_width_minus_1); f(16, frame_height_minus_1); - vp9->mi_cols = (current->frame_width_minus_1 + 8) >> 3; - vp9->mi_rows = (current->frame_height_minus_1 + 8) >> 3; + vp9->frame_width = current->frame_width_minus_1 + 1; + vp9->frame_height = current->frame_height_minus_1 + 1; + + vp9->mi_cols = (vp9->frame_width + 7) >> 3; + vp9->mi_rows = (vp9->frame_height + 7) >> 3; vp9->sb64_cols = (vp9->mi_cols + 7) >> 3; vp9->sb64_rows = (vp9->mi_rows + 7) >> 3; @@ -106,15 +117,33 @@ static int FUNC(render_size)(CodedBitstreamContext *ctx, RWContext *rw, static int FUNC(frame_size_with_refs)(CodedBitstreamContext *ctx, RWContext *rw, VP9RawFrameHeader *current) { + CodedBitstreamVP9Context *vp9 = ctx->priv_data; int err, i; for (i = 0; i < VP9_REFS_PER_FRAME; i++) { fs(1, found_ref[i], 1, i); - if (current->found_ref[i]) + if (current->found_ref[i]) { + VP9ReferenceFrameState *ref = + &vp9->ref[current->ref_frame_idx[i]]; + + vp9->frame_width = ref->frame_width; + vp9->frame_height = ref->frame_height; + + vp9->subsampling_x = ref->subsampling_x; + vp9->subsampling_y = ref->subsampling_y; + vp9->bit_depth = ref->bit_depth; + break; + } } if (i >= VP9_REFS_PER_FRAME) CHECK(FUNC(frame_size)(ctx, rw, current)); + else { + vp9->mi_cols = (vp9->frame_width + 7) >> 3; + vp9->mi_rows = (vp9->frame_height + 7) >> 3; + vp9->sb64_cols = (vp9->mi_cols + 7) >> 3; + vp9->sb64_rows = (vp9->mi_rows + 7) >> 3; + } CHECK(FUNC(render_size)(ctx, rw, current)); return 0; @@ -248,16 +277,16 @@ static int FUNC(tile_info)(CodedBitstreamContext *ctx, RWContext *rw, static int FUNC(uncompressed_header)(CodedBitstreamContext *ctx, RWContext *rw, VP9RawFrameHeader *current) { - int profile, i; - int err; + CodedBitstreamVP9Context *vp9 = ctx->priv_data; + int err, i; f(2, frame_marker); f(1, profile_low_bit); f(1, profile_high_bit); - profile = (current->profile_high_bit << 1) + current->profile_low_bit; - if (profile == 3) - f(1, profile_reserved_zero); + vp9->profile = (current->profile_high_bit << 1) + current->profile_low_bit; + if (vp9->profile == 3) + fixed(1, reserved_zero, 0); f(1, show_existing_frame); if (current->show_existing_frame) { @@ -274,7 +303,7 @@ static int FUNC(uncompressed_header)(CodedBitstreamContext *ctx, RWContext *rw, if (current->frame_type == VP9_KEY_FRAME) { CHECK(FUNC(frame_sync_code)(ctx, rw, current)); - CHECK(FUNC(color_config)(ctx, rw, current, profile)); + CHECK(FUNC(color_config)(ctx, rw, current, vp9->profile)); CHECK(FUNC(frame_size)(ctx, rw, current)); CHECK(FUNC(render_size)(ctx, rw, current)); @@ -294,12 +323,16 @@ static int FUNC(uncompressed_header)(CodedBitstreamContext *ctx, RWContext *rw, if (current->intra_only == 1) { CHECK(FUNC(frame_sync_code)(ctx, rw, current)); - if (profile > 0) { - CHECK(FUNC(color_config)(ctx, rw, current, profile)); + if (vp9->profile > 0) { + CHECK(FUNC(color_config)(ctx, rw, current, vp9->profile)); } else { infer(color_space, 1); infer(subsampling_x, 1); infer(subsampling_y, 1); + vp9->bit_depth = 8; + + vp9->subsampling_x = current->subsampling_x; + vp9->subsampling_y = current->subsampling_y; } f(8, refresh_frame_flags); @@ -338,6 +371,25 @@ static int FUNC(uncompressed_header)(CodedBitstreamContext *ctx, RWContext *rw, f(16, header_size_in_bytes); + for (i = 0; i < VP9_NUM_REF_FRAMES; i++) { + if (current->refresh_frame_flags & (1 << i)) { + vp9->ref[i] = (VP9ReferenceFrameState) { + .frame_width = vp9->frame_width, + .frame_height = vp9->frame_height, + .subsampling_x = vp9->subsampling_x, + .subsampling_y = vp9->subsampling_y, + .bit_depth = vp9->bit_depth, + }; + } + } + + av_log(ctx->log_ctx, AV_LOG_DEBUG, "Frame: size %dx%d " + "subsample %dx%d bit_depth %d tiles %dx%d.\n", + vp9->frame_width, vp9->frame_height, + vp9->subsampling_x, vp9->subsampling_y, + vp9->bit_depth, 1 << current->tile_cols_log2, + 1 << current->tile_rows_log2); + return 0; } diff --git a/libavcodec/codec_desc.c b/libavcodec/codec_desc.c index 67a30542d1b05..4850e4fb430dd 100644 --- a/libavcodec/codec_desc.c +++ b/libavcodec/codec_desc.c @@ -81,6 +81,7 @@ static const AVCodecDescriptor codec_descriptors[] = { .long_name = NULL_IF_CONFIG_SMALL("Motion JPEG"), .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, .mime_types= MT("image/jpeg"), + .profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles), }, { .id = AV_CODEC_ID_MJPEGB, @@ -1077,6 +1078,7 @@ static const AVCodecDescriptor codec_descriptors[] = { .name = "prores", .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes (iCodec Pro)"), .props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY, + .profiles = NULL_IF_CONFIG_SMALL(ff_prores_profiles), }, { .id = AV_CODEC_ID_JV, @@ -1936,6 +1938,13 @@ static const AVCodecDescriptor codec_descriptors[] = { .long_name = NULL_IF_CONFIG_SMALL("PCM 24.0 floating point little-endian"), .props = AV_CODEC_PROP_LOSSLESS, }, + { + .id = AV_CODEC_ID_PCM_VIDC, + .type = AVMEDIA_TYPE_AUDIO, + .name = "pcm_vidc", + .long_name = NULL_IF_CONFIG_SMALL("PCM Archimedes VIDC"), + .props = AV_CODEC_PROP_LOSSY, + }, /* various ADPCM codecs */ { diff --git a/libavcodec/decode.c b/libavcodec/decode.c index 4607e9f318a59..c89c77c43a98d 100644 --- a/libavcodec/decode.c +++ b/libavcodec/decode.c @@ -281,10 +281,6 @@ int ff_decode_bsfs_init(AVCodecContext *avctx) bsfs_str++; } - ret = avcodec_parameters_to_context(avctx, s->bsfs[s->nb_bsfs - 1]->par_out); - if (ret < 0) - return ret; - return 0; fail: ff_decode_bsfs_uninit(avctx); @@ -1500,7 +1496,7 @@ static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame) tmpsize = av_image_fill_pointers(data, avctx->pix_fmt, h, NULL, linesize); if (tmpsize < 0) - return -1; + return tmpsize; for (i = 0; i < 3 && data[i + 1]; i++) size[i] = data[i + 1] - data[i]; diff --git a/libavcodec/h264_sei.c b/libavcodec/h264_sei.c index 43593d34d2d34..d4eb9c0dab67c 100644 --- a/libavcodec/h264_sei.c +++ b/libavcodec/h264_sei.c @@ -84,32 +84,38 @@ static int decode_picture_timing(H264SEIPictureTiming *h, GetBitContext *gb, return AVERROR_INVALIDDATA; num_clock_ts = sei_num_clock_ts_table[h->pic_struct]; - + h->timecode_cnt = 0; for (i = 0; i < num_clock_ts; i++) { - if (get_bits(gb, 1)) { /* clock_timestamp_flag */ + if (get_bits(gb, 1)) { /* clock_timestamp_flag */ + H264SEITimeCode *tc = &h->timecode[h->timecode_cnt++]; unsigned int full_timestamp_flag; - + unsigned int counting_type, cnt_dropped_flag; h->ct_type |= 1 << get_bits(gb, 2); - skip_bits(gb, 1); /* nuit_field_based_flag */ - skip_bits(gb, 5); /* counting_type */ + skip_bits(gb, 1); /* nuit_field_based_flag */ + counting_type = get_bits(gb, 5); /* counting_type */ full_timestamp_flag = get_bits(gb, 1); - skip_bits(gb, 1); /* discontinuity_flag */ - skip_bits(gb, 1); /* cnt_dropped_flag */ - skip_bits(gb, 8); /* n_frames */ + skip_bits(gb, 1); /* discontinuity_flag */ + cnt_dropped_flag = get_bits(gb, 1); /* cnt_dropped_flag */ + if (cnt_dropped_flag && counting_type > 1 && counting_type < 7) + tc->dropframe = 1; + tc->frame = get_bits(gb, 8); /* n_frames */ if (full_timestamp_flag) { - skip_bits(gb, 6); /* seconds_value 0..59 */ - skip_bits(gb, 6); /* minutes_value 0..59 */ - skip_bits(gb, 5); /* hours_value 0..23 */ + tc->full = 1; + tc->seconds = get_bits(gb, 6); /* seconds_value 0..59 */ + tc->minutes = get_bits(gb, 6); /* minutes_value 0..59 */ + tc->hours = get_bits(gb, 5); /* hours_value 0..23 */ } else { - if (get_bits(gb, 1)) { /* seconds_flag */ - skip_bits(gb, 6); /* seconds_value range 0..59 */ - if (get_bits(gb, 1)) { /* minutes_flag */ - skip_bits(gb, 6); /* minutes_value 0..59 */ - if (get_bits(gb, 1)) /* hours_flag */ - skip_bits(gb, 5); /* hours_value 0..23 */ + tc->seconds = tc->minutes = tc->hours = tc->full = 0; + if (get_bits(gb, 1)) { /* seconds_flag */ + tc->seconds = get_bits(gb, 6); + if (get_bits(gb, 1)) { /* minutes_flag */ + tc->minutes = get_bits(gb, 6); + if (get_bits(gb, 1)) /* hours_flag */ + tc->hours = get_bits(gb, 5); } } } + if (sps->time_offset_length > 0) skip_bits(gb, sps->time_offset_length); /* time_offset */ diff --git a/libavcodec/h264_sei.h b/libavcodec/h264_sei.h index 5b7c8ef9d8aa8..a75c3aa1753e9 100644 --- a/libavcodec/h264_sei.h +++ b/libavcodec/h264_sei.h @@ -67,6 +67,17 @@ typedef enum { H264_SEI_FPA_TYPE_2D = 6, } H264_SEI_FpaType; +typedef struct H264SEITimeCode { + /* When not continuously receiving full timecodes, we have to reference + the previous timecode received */ + int full; + int frame; + int seconds; + int minutes; + int hours; + int dropframe; +} H264SEITimeCode; + typedef struct H264SEIPictureTiming { int present; H264_SEI_PicStructType pic_struct; @@ -87,6 +98,16 @@ typedef struct H264SEIPictureTiming { * cpb_removal_delay in picture timing SEI message, see H.264 C.1.2 */ int cpb_removal_delay; + + /** + * Maximum three timecodes in a pic_timing SEI. + */ + H264SEITimeCode timecode[3]; + + /** + * Number of timecode in use + */ + int timecode_cnt; } H264SEIPictureTiming; typedef struct H264SEIAFD { diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c index d09cee4b13fbb..2e158745a02ba 100644 --- a/libavcodec/h264_slice.c +++ b/libavcodec/h264_slice.c @@ -1287,6 +1287,51 @@ static int h264_export_frame_props(H264Context *h) h->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS; } + if (h->sei.picture_timing.timecode_cnt > 0) { + uint32_t tc = 0; + uint32_t *tc_sd; + + AVFrameSideData *tcside = av_frame_new_side_data(cur->f, + AV_FRAME_DATA_S12M_TIMECODE, + sizeof(uint32_t)*4); + if (!tcside) + return AVERROR(ENOMEM); + + tc_sd = (uint32_t*)tcside->data; + tc_sd[0] = h->sei.picture_timing.timecode_cnt; + + for (int i = 0; i < tc_sd[0]; i++) { + uint32_t frames; + + /* For SMPTE 12-M timecodes, frame count is a special case if > 30 FPS. + See SMPTE ST 12-1:2014 Sec 12.1 for more info. */ + if (av_cmp_q(h->avctx->framerate, (AVRational) {30, 1}) == 1) { + frames = h->sei.picture_timing.timecode[i].frame / 2; + if (h->sei.picture_timing.timecode[i].frame % 2 == 1) { + if (av_cmp_q(h->avctx->framerate, (AVRational) {50, 1}) == 0) + tc |= (1 << 7); + else + tc |= (1 << 23); + } + } else { + frames = h->sei.picture_timing.timecode[i].frame; + } + + tc |= h->sei.picture_timing.timecode[i].dropframe << 30; + tc |= (frames / 10) << 28; + tc |= (frames % 10) << 24; + tc |= (h->sei.picture_timing.timecode[i].seconds / 10) << 20; + tc |= (h->sei.picture_timing.timecode[i].seconds % 10) << 16; + tc |= (h->sei.picture_timing.timecode[i].minutes / 10) << 12; + tc |= (h->sei.picture_timing.timecode[i].minutes % 10) << 8; + tc |= (h->sei.picture_timing.timecode[i].hours / 10) << 4; + tc |= (h->sei.picture_timing.timecode[i].hours % 10); + + tc_sd[i + 1] = tc; + } + h->sei.picture_timing.timecode_cnt = 0; + } + if (h->sei.alternative_transfer.present && av_color_transfer_name(h->sei.alternative_transfer.preferred_transfer_characteristics) && h->sei.alternative_transfer.preferred_transfer_characteristics != AVCOL_TRC_UNSPECIFIED) { diff --git a/libavcodec/h264dec.c b/libavcodec/h264dec.c index 7b4c5c76eaab7..00d922fbe9c91 100644 --- a/libavcodec/h264dec.c +++ b/libavcodec/h264dec.c @@ -657,11 +657,6 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size) goto end; } if(!idr_cleared) { - if (h->current_slice && (avctx->active_thread_type & FF_THREAD_SLICE)) { - av_log(h, AV_LOG_ERROR, "invalid mixed IDR / non IDR frames cannot be decoded in slice multithreading mode\n"); - ret = AVERROR_INVALIDDATA; - goto end; - } idr(h); // FIXME ensure we don't lose some frames if there is reordering } idr_cleared = 1; diff --git a/libavcodec/ilbcdec.c b/libavcodec/ilbcdec.c index 8f234b98e1817..8a6dbe0b752ab 100644 --- a/libavcodec/ilbcdec.c +++ b/libavcodec/ilbcdec.c @@ -1372,7 +1372,7 @@ static int ilbc_decode_frame(AVCodecContext *avctx, void *data, if (unpack_frame(s)) mode = 0; - if (s->frame.start < 1) + if (s->frame.start < 1 || s->frame.start > 5) mode = 0; if (mode) { diff --git a/libavcodec/jpeg2000dec.c b/libavcodec/jpeg2000dec.c index 96dab8e1766c1..5b07f07aa9a2a 100644 --- a/libavcodec/jpeg2000dec.c +++ b/libavcodec/jpeg2000dec.c @@ -1162,7 +1162,7 @@ static int jpeg2000_decode_packets_po_iteration(Jpeg2000DecoderContext *s, Jpeg2 step_x = 32; step_y = 32; - if (RSpoc > FFMIN(codsty->nreslevels, REpoc)) + if (RSpoc >= FFMIN(codsty->nreslevels, REpoc)) continue; for (reslevelno = RSpoc; reslevelno < FFMIN(codsty->nreslevels, REpoc); reslevelno++) { diff --git a/libavcodec/libaomenc.c b/libavcodec/libaomenc.c index 045c519f7236e..c5458766cbfb6 100644 --- a/libavcodec/libaomenc.c +++ b/libavcodec/libaomenc.c @@ -34,6 +34,7 @@ #include "libavutil/opt.h" #include "libavutil/pixdesc.h" +#include "av1.h" #include "avcodec.h" #include "internal.h" #include "profiles.h" @@ -74,6 +75,10 @@ typedef struct AOMEncoderContext { uint64_t sse[4]; int have_sse; /**< true if we have pending sse[] */ uint64_t frame_number; + int tile_cols, tile_rows; + int tile_cols_log2, tile_rows_log2; + aom_superblock_size_t superblock_size; + int uniform_tiles; } AOMContext; static const char *const ctlidstr[] = { @@ -85,6 +90,9 @@ static const char *const ctlidstr[] = { [AV1E_SET_COLOR_PRIMARIES] = "AV1E_SET_COLOR_PRIMARIES", [AV1E_SET_MATRIX_COEFFICIENTS] = "AV1E_SET_MATRIX_COEFFICIENTS", [AV1E_SET_TRANSFER_CHARACTERISTICS] = "AV1E_SET_TRANSFER_CHARACTERISTICS", + [AV1E_SET_SUPERBLOCK_SIZE] = "AV1E_SET_SUPERBLOCK_SIZE", + [AV1E_SET_TILE_COLUMNS] = "AV1E_SET_TILE_COLUMNS", + [AV1E_SET_TILE_ROWS] = "AV1E_SET_TILE_ROWS", }; static av_cold void log_encoder_error(AVCodecContext *avctx, const char *desc) @@ -149,6 +157,10 @@ static av_cold void dump_enc_cfg(AVCodecContext *avctx, width, "kf_mode:", cfg->kf_mode, width, "kf_min_dist:", cfg->kf_min_dist, width, "kf_max_dist:", cfg->kf_max_dist); + av_log(avctx, level, "tile settings\n" + " %*s%d\n %*s%d\n", + width, "tile_width_count:", cfg->tile_width_count, + width, "tile_height_count:", cfg->tile_height_count); av_log(avctx, level, "\n"); } @@ -290,6 +302,169 @@ static void set_color_range(AVCodecContext *avctx) codecctl_int(avctx, AV1E_SET_COLOR_RANGE, aom_cr); } +static int count_uniform_tiling(int dim, int sb_size, int tiles_log2) +{ + int sb_dim = (dim + sb_size - 1) / sb_size; + int tile_dim = (sb_dim + (1 << tiles_log2) - 1) >> tiles_log2; + av_assert0(tile_dim > 0); + return (sb_dim + tile_dim - 1) / tile_dim; +} + +static int choose_tiling(AVCodecContext *avctx, + struct aom_codec_enc_cfg *enccfg) +{ + AOMContext *ctx = avctx->priv_data; + int sb_128x128_possible, sb_size, sb_width, sb_height; + int uniform_rows, uniform_cols; + int uniform_64x64_possible, uniform_128x128_possible; + int tile_size, rounding, i; + + if (ctx->tile_cols_log2 >= 0) + ctx->tile_cols = 1 << ctx->tile_cols_log2; + if (ctx->tile_rows_log2 >= 0) + ctx->tile_rows = 1 << ctx->tile_rows_log2; + + if (ctx->tile_cols == 0) { + ctx->tile_cols = (avctx->width + AV1_MAX_TILE_WIDTH - 1) / + AV1_MAX_TILE_WIDTH; + if (ctx->tile_cols > 1) { + av_log(avctx, AV_LOG_DEBUG, "Automatically using %d tile " + "columns to fill width.\n", ctx->tile_cols); + } + } + av_assert0(ctx->tile_cols > 0); + if (ctx->tile_rows == 0) { + int max_tile_width = + FFALIGN((FFALIGN(avctx->width, 128) + + ctx->tile_cols - 1) / ctx->tile_cols, 128); + ctx->tile_rows = + (max_tile_width * FFALIGN(avctx->height, 128) + + AV1_MAX_TILE_AREA - 1) / AV1_MAX_TILE_AREA; + if (ctx->tile_rows > 1) { + av_log(avctx, AV_LOG_DEBUG, "Automatically using %d tile " + "rows to fill area.\n", ctx->tile_rows); + } + } + av_assert0(ctx->tile_rows > 0); + + if ((avctx->width + 63) / 64 < ctx->tile_cols || + (avctx->height + 63) / 64 < ctx->tile_rows) { + av_log(avctx, AV_LOG_ERROR, "Invalid tile sizing: frame not " + "large enough to fit specified tile arrangement.\n"); + return AVERROR(EINVAL); + } + if (ctx->tile_cols > AV1_MAX_TILE_COLS || + ctx->tile_rows > AV1_MAX_TILE_ROWS) { + av_log(avctx, AV_LOG_ERROR, "Invalid tile sizing: AV1 does " + "not allow more than %dx%d tiles.\n", + AV1_MAX_TILE_COLS, AV1_MAX_TILE_ROWS); + return AVERROR(EINVAL); + } + if (avctx->width / ctx->tile_cols > AV1_MAX_TILE_WIDTH) { + av_log(avctx, AV_LOG_ERROR, "Invalid tile sizing: AV1 does " + "not allow tiles of width greater than %d.\n", + AV1_MAX_TILE_WIDTH); + return AVERROR(EINVAL); + } + + ctx->superblock_size = AOM_SUPERBLOCK_SIZE_DYNAMIC; + + if (ctx->tile_cols == 1 && ctx->tile_rows == 1) { + av_log(avctx, AV_LOG_DEBUG, "Using a single tile.\n"); + return 0; + } + + sb_128x128_possible = + (avctx->width + 127) / 128 >= ctx->tile_cols && + (avctx->height + 127) / 128 >= ctx->tile_rows; + + ctx->tile_cols_log2 = ctx->tile_cols == 1 ? 0 : + av_log2(ctx->tile_cols - 1) + 1; + ctx->tile_rows_log2 = ctx->tile_rows == 1 ? 0 : + av_log2(ctx->tile_rows - 1) + 1; + + uniform_cols = count_uniform_tiling(avctx->width, + 64, ctx->tile_cols_log2); + uniform_rows = count_uniform_tiling(avctx->height, + 64, ctx->tile_rows_log2); + av_log(avctx, AV_LOG_DEBUG, "Uniform with 64x64 superblocks " + "-> %dx%d tiles.\n", uniform_cols, uniform_rows); + uniform_64x64_possible = uniform_cols == ctx->tile_cols && + uniform_rows == ctx->tile_rows; + + if (sb_128x128_possible) { + uniform_cols = count_uniform_tiling(avctx->width, + 128, ctx->tile_cols_log2); + uniform_rows = count_uniform_tiling(avctx->height, + 128, ctx->tile_rows_log2); + av_log(avctx, AV_LOG_DEBUG, "Uniform with 128x128 superblocks " + "-> %dx%d tiles.\n", uniform_cols, uniform_rows); + uniform_128x128_possible = uniform_cols == ctx->tile_cols && + uniform_rows == ctx->tile_rows; + } else { + av_log(avctx, AV_LOG_DEBUG, "128x128 superblocks not possible.\n"); + uniform_128x128_possible = 0; + } + + ctx->uniform_tiles = 1; + if (uniform_64x64_possible && uniform_128x128_possible) { + av_log(avctx, AV_LOG_DEBUG, "Using uniform tiling with dynamic " + "superblocks (tile_cols_log2 = %d, tile_rows_log2 = %d).\n", + ctx->tile_cols_log2, ctx->tile_rows_log2); + return 0; + } + if (uniform_64x64_possible && !sb_128x128_possible) { + av_log(avctx, AV_LOG_DEBUG, "Using uniform tiling with 64x64 " + "superblocks (tile_cols_log2 = %d, tile_rows_log2 = %d).\n", + ctx->tile_cols_log2, ctx->tile_rows_log2); + ctx->superblock_size = AOM_SUPERBLOCK_SIZE_64X64; + return 0; + } + if (uniform_128x128_possible) { + av_log(avctx, AV_LOG_DEBUG, "Using uniform tiling with 128x128 " + "superblocks (tile_cols_log2 = %d, tile_rows_log2 = %d).\n", + ctx->tile_cols_log2, ctx->tile_rows_log2); + ctx->superblock_size = AOM_SUPERBLOCK_SIZE_128X128; + return 0; + } + ctx->uniform_tiles = 0; + + if (sb_128x128_possible) { + sb_size = 128; + ctx->superblock_size = AOM_SUPERBLOCK_SIZE_128X128; + } else { + sb_size = 64; + ctx->superblock_size = AOM_SUPERBLOCK_SIZE_64X64; + } + av_log(avctx, AV_LOG_DEBUG, "Using fixed tiling with %dx%d " + "superblocks (tile_cols = %d, tile_rows = %d).\n", + sb_size, sb_size, ctx->tile_cols, ctx->tile_rows); + + enccfg->tile_width_count = ctx->tile_cols; + enccfg->tile_height_count = ctx->tile_rows; + + sb_width = (avctx->width + sb_size - 1) / sb_size; + sb_height = (avctx->height + sb_size - 1) / sb_size; + + tile_size = sb_width / ctx->tile_cols; + rounding = sb_width % ctx->tile_cols; + for (i = 0; i < ctx->tile_cols; i++) { + enccfg->tile_widths[i] = tile_size + + (i < rounding / 2 || + i > ctx->tile_cols - 1 - (rounding + 1) / 2); + } + + tile_size = sb_height / ctx->tile_rows; + rounding = sb_height % ctx->tile_rows; + for (i = 0; i < ctx->tile_rows; i++) { + enccfg->tile_heights[i] = tile_size + + (i < rounding / 2 || + i > ctx->tile_rows - 1 - (rounding + 1) / 2); + } + + return 0; +} + static av_cold int aom_init(AVCodecContext *avctx, const struct aom_codec_iface *iface) { @@ -442,6 +617,10 @@ static av_cold int aom_init(AVCodecContext *avctx, enccfg.g_error_resilient = ctx->error_resilient; + res = choose_tiling(avctx, &enccfg); + if (res < 0) + return res; + dump_enc_cfg(avctx, &enccfg); /* Construct Encoder Context */ res = aom_codec_enc_init(&ctx->encoder, iface, &enccfg, flags); @@ -465,6 +644,12 @@ static av_cold int aom_init(AVCodecContext *avctx, codecctl_int(avctx, AV1E_SET_TRANSFER_CHARACTERISTICS, avctx->color_trc); set_color_range(avctx); + codecctl_int(avctx, AV1E_SET_SUPERBLOCK_SIZE, ctx->superblock_size); + if (ctx->uniform_tiles) { + codecctl_int(avctx, AV1E_SET_TILE_COLUMNS, ctx->tile_cols_log2); + codecctl_int(avctx, AV1E_SET_TILE_ROWS, ctx->tile_rows_log2); + } + // provide dummy value to initialize wrapper, values will be updated each _encode() aom_img_wrap(&ctx->rawimg, img_fmt, avctx->width, avctx->height, 1, (unsigned char*)1); @@ -796,6 +981,9 @@ static const AVOption options[] = { { "static-thresh", "A change threshold on blocks below which they will be skipped by the encoder", OFFSET(static_thresh), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE }, { "drop-threshold", "Frame drop threshold", offsetof(AOMContext, drop_threshold), AV_OPT_TYPE_INT, {.i64 = 0 }, INT_MIN, INT_MAX, VE }, { "noise-sensitivity", "Noise sensitivity", OFFSET(noise_sensitivity), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 4, VE}, + { "tiles", "Tile columns x rows", OFFSET(tile_cols), AV_OPT_TYPE_IMAGE_SIZE, { .str = NULL }, 0, 0, VE }, + { "tile-columns", "Log2 of number of tile columns to use", OFFSET(tile_cols_log2), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 6, VE}, + { "tile-rows", "Log2 of number of tile rows to use", OFFSET(tile_rows_log2), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 6, VE}, { NULL } }; diff --git a/libavcodec/libdav1d.c b/libavcodec/libdav1d.c new file mode 100644 index 0000000000000..873adfda40379 --- /dev/null +++ b/libavcodec/libdav1d.c @@ -0,0 +1,270 @@ +/* + * Copyright (c) 2018 Ronald S. Bultje + * Copyright (c) 2018 James Almer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +#include "libavutil/avassert.h" +#include "libavutil/fifo.h" +#include "libavutil/opt.h" + +#include "avcodec.h" +#include "decode.h" +#include "internal.h" + +typedef struct Libdav1dContext { + AVClass *class; + Dav1dContext *c; + + AVFifoBuffer *cache; + Dav1dData data; + int tile_threads; +} Libdav1dContext; + +static av_cold int libdav1d_init(AVCodecContext *c) +{ + Libdav1dContext *dav1d = c->priv_data; + Dav1dSettings s; + int res; + + av_log(c, AV_LOG_INFO, "libdav1d %s\n", dav1d_version()); + + dav1d_default_settings(&s); + s.n_tile_threads = dav1d->tile_threads; + s.n_frame_threads = FFMIN(c->thread_count ? c->thread_count : av_cpu_count(), 256); + + dav1d->cache = av_fifo_alloc(8 * sizeof(AVPacket)); + if (!dav1d->cache) + return AVERROR(ENOMEM); + + res = dav1d_open(&dav1d->c, &s); + if (res < 0) + return AVERROR(ENOMEM); + + return 0; +} + +static void libdav1d_flush(AVCodecContext *c) +{ + Libdav1dContext *dav1d = c->priv_data; + + av_fifo_reset(dav1d->cache); + dav1d_data_unref(&dav1d->data); + dav1d_flush(dav1d->c); +} + +static int libdav1d_fifo_write(void *src, void *dst, int dst_size) { + AVPacket *pkt_dst = dst, *pkt_src = src; + + av_assert2(dst_size >= sizeof(AVPacket)); + + pkt_src->buf = NULL; + av_packet_free_side_data(pkt_src); + *pkt_dst = *pkt_src; + + return sizeof(AVPacket); +} + +static void libdav1d_data_free(const uint8_t *data, void *opaque) { + AVBufferRef *buf = opaque; + + av_buffer_unref(&buf); +} + +static void libdav1d_frame_free(void *opaque, uint8_t *data) { + Dav1dPicture p = { 0 }; + + p.ref = opaque; + p.data[0] = (void *) 0x1; // this has to be non-NULL + dav1d_picture_unref(&p); +} + +static const enum AVPixelFormat pix_fmt[][2] = { + [DAV1D_PIXEL_LAYOUT_I400] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10 }, + [DAV1D_PIXEL_LAYOUT_I420] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV420P10 }, + [DAV1D_PIXEL_LAYOUT_I422] = { AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV422P10 }, + [DAV1D_PIXEL_LAYOUT_I444] = { AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV444P10 }, +}; + +// TODO: Update once 12bit support is added. +static const int profile[] = { + [DAV1D_PIXEL_LAYOUT_I400] = FF_PROFILE_AV1_MAIN, + [DAV1D_PIXEL_LAYOUT_I420] = FF_PROFILE_AV1_MAIN, + [DAV1D_PIXEL_LAYOUT_I422] = FF_PROFILE_AV1_PROFESSIONAL, + [DAV1D_PIXEL_LAYOUT_I444] = FF_PROFILE_AV1_HIGH, +}; + +static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame) +{ + Libdav1dContext *dav1d = c->priv_data; + Dav1dData *data = &dav1d->data; + AVPacket pkt = { 0 }; + Dav1dPicture p = { 0 }; + int res; + + if (!data->sz) { + res = ff_decode_get_packet(c, &pkt); + if (res < 0 && res != AVERROR_EOF) + return res; + + if (pkt.size) { + if (!av_fifo_space(dav1d->cache)) { + res = av_fifo_grow(dav1d->cache, 8 * sizeof(pkt)); + if (res < 0) { + av_packet_unref(&pkt); + return res; + } + } + + res = dav1d_data_wrap(data, pkt.data, pkt.size, libdav1d_data_free, pkt.buf); + if (res < 0) { + av_packet_unref(&pkt); + return res; + } + + av_fifo_generic_write(dav1d->cache, &pkt, sizeof(pkt), libdav1d_fifo_write); + } else { + data = NULL; + } + } + + res = dav1d_decode(dav1d->c, data, &p); + if (res < 0) { + if (res == -EINVAL) + res = AVERROR_INVALIDDATA; + else if (res == -EAGAIN && c->internal->draining) + res = AVERROR_EOF; + + return res; + } + + av_assert0(p.data[0] != NULL); + + av_fifo_generic_read(dav1d->cache, &pkt, sizeof(pkt), NULL); + + frame->buf[0] = av_buffer_create(NULL, 0, libdav1d_frame_free, + p.ref, AV_BUFFER_FLAG_READONLY); + if (!frame->buf[0]) { + dav1d_picture_unref(&p); + return AVERROR(ENOMEM); + } + + frame->data[0] = p.data[0]; + frame->data[1] = p.data[1]; + frame->data[2] = p.data[2]; + frame->linesize[0] = p.stride[0]; + frame->linesize[1] = p.stride[1]; + frame->linesize[2] = p.stride[1]; + + c->profile = profile[p.p.layout]; + frame->format = c->pix_fmt = pix_fmt[p.p.layout][p.p.bpc == 10]; + frame->width = p.p.w; + frame->height = p.p.h; + if (c->width != p.p.w || c->height != p.p.h) { + res = ff_set_dimensions(c, p.p.w, p.p.h); + if (res < 0) + return res; + } + + switch (p.p.chr) { + case DAV1D_CHR_VERTICAL: + frame->chroma_location = c->chroma_sample_location = AVCHROMA_LOC_LEFT; + break; + case DAV1D_CHR_COLOCATED: + frame->chroma_location = c->chroma_sample_location = AVCHROMA_LOC_TOPLEFT; + break; + } + frame->colorspace = c->colorspace = (enum AVColorSpace) p.p.mtrx; + frame->color_primaries = c->color_primaries = (enum AVColorPrimaries) p.p.pri; + frame->color_trc = c->color_trc = (enum AVColorTransferCharacteristic) p.p.trc; + frame->color_range = c->color_range = p.p.fullrange ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; + + // match timestamps and packet size + frame->pts = frame->best_effort_timestamp = pkt.pts; +#if FF_API_PKT_PTS +FF_DISABLE_DEPRECATION_WARNINGS + frame->pkt_pts = pkt.pts; +FF_ENABLE_DEPRECATION_WARNINGS +#endif + frame->pkt_dts = pkt.dts; + frame->pkt_pos = pkt.pos; + frame->pkt_size = pkt.size; + frame->pkt_duration = pkt.duration; + frame->key_frame = p.p.type == DAV1D_FRAME_TYPE_KEY; + + switch (p.p.type) { + case DAV1D_FRAME_TYPE_KEY: + case DAV1D_FRAME_TYPE_INTRA: + frame->pict_type = AV_PICTURE_TYPE_I; + break; + case DAV1D_FRAME_TYPE_INTER: + frame->pict_type = AV_PICTURE_TYPE_P; + break; + case DAV1D_FRAME_TYPE_SWITCH: + frame->pict_type = AV_PICTURE_TYPE_SP; + break; + default: + return AVERROR_INVALIDDATA; + } + + return 0; +} + +static av_cold int libdav1d_close(AVCodecContext *c) +{ + Libdav1dContext *dav1d = c->priv_data; + + av_fifo_freep(&dav1d->cache); + dav1d_data_unref(&dav1d->data); + dav1d_close(&dav1d->c); + + return 0; +} + +#define OFFSET(x) offsetof(Libdav1dContext, x) +#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM +static const AVOption libdav1d_options[] = { + { "tilethreads", "Tile threads", OFFSET(tile_threads), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 64, VD, NULL }, + { NULL } +}; + +static const AVClass libdav1d_class = { + .class_name = "libdav1d decoder", + .item_name = av_default_item_name, + .option = libdav1d_options, + .version = LIBAVUTIL_VERSION_INT, +}; + +AVCodec ff_libdav1d_decoder = { + .name = "libdav1d", + .long_name = NULL_IF_CONFIG_SMALL("dav1d AV1 decoder by VideoLAN"), + .type = AVMEDIA_TYPE_VIDEO, + .id = AV_CODEC_ID_AV1, + .priv_data_size = sizeof(Libdav1dContext), + .init = libdav1d_init, + .close = libdav1d_close, + .flush = libdav1d_flush, + .receive_frame = libdav1d_receive_frame, + .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS, + .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP | + FF_CODEC_CAP_SETS_PKT_DTS, + .priv_class = &libdav1d_class, + .wrapper_name = "libdav1d", +}; diff --git a/libavcodec/libdavs2.c b/libavcodec/libdavs2.c index aa1478290abfd..cadf9954444b5 100644 --- a/libavcodec/libdavs2.c +++ b/libavcodec/libdavs2.c @@ -165,7 +165,7 @@ AVCodec ff_libdavs2_decoder = { .close = davs2_end, .decode = davs2_decode_frame, .capabilities = AV_CODEC_CAP_DELAY,//AV_CODEC_CAP_DR1 | - .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV420P10, + .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .wrapper_name = "libdavs2", }; diff --git a/libavcodec/libopenh264enc.c b/libavcodec/libopenh264enc.c index 83c3f0ce208cb..5baa423433dac 100644 --- a/libavcodec/libopenh264enc.c +++ b/libavcodec/libopenh264enc.c @@ -164,6 +164,47 @@ FF_ENABLE_DEPRECATION_WARNINGS param.sSpatialLayers[0].iSpatialBitrate = param.iTargetBitrate; param.sSpatialLayers[0].iMaxSpatialBitrate = param.iMaxBitrate; +#if OPENH264_VER_AT_LEAST(1, 7) + if (avctx->sample_aspect_ratio.num && avctx->sample_aspect_ratio.den) { + // Table E-1. + static const AVRational sar_idc[] = { + { 0, 0 }, // Unspecified (never written here). + { 1, 1 }, { 12, 11 }, { 10, 11 }, { 16, 11 }, + { 40, 33 }, { 24, 11 }, { 20, 11 }, { 32, 11 }, + { 80, 33 }, { 18, 11 }, { 15, 11 }, { 64, 33 }, + { 160, 99 }, // Last 3 are unknown to openh264: { 4, 3 }, { 3, 2 }, { 2, 1 }, + }; + static const ESampleAspectRatio asp_idc[] = { + ASP_UNSPECIFIED, + ASP_1x1, ASP_12x11, ASP_10x11, ASP_16x11, + ASP_40x33, ASP_24x11, ASP_20x11, ASP_32x11, + ASP_80x33, ASP_18x11, ASP_15x11, ASP_64x33, + ASP_160x99, + }; + int num, den, i; + + av_reduce(&num, &den, avctx->sample_aspect_ratio.num, + avctx->sample_aspect_ratio.den, 65535); + + for (i = 1; i < FF_ARRAY_ELEMS(sar_idc); i++) { + if (num == sar_idc[i].num && + den == sar_idc[i].den) + break; + } + if (i == FF_ARRAY_ELEMS(sar_idc)) { + param.sSpatialLayers[0].eAspectRatio = ASP_EXT_SAR; + param.sSpatialLayers[0].sAspectRatioExtWidth = num; + param.sSpatialLayers[0].sAspectRatioExtHeight = den; + } else { + param.sSpatialLayers[0].eAspectRatio = asp_idc[i]; + } + param.sSpatialLayers[0].bAspectRatioPresent = true; + } + else { + param.sSpatialLayers[0].bAspectRatioPresent = false; + } +#endif + if ((avctx->slices > 1) && (s->max_nal_size)) { av_log(avctx, AV_LOG_ERROR, "Invalid combination -slices %d and -max_nal_size %d.\n", diff --git a/libavcodec/libvpxdec.c b/libavcodec/libvpxdec.c index 04f27d3396107..164dbda49b08d 100644 --- a/libavcodec/libvpxdec.c +++ b/libavcodec/libvpxdec.c @@ -47,8 +47,7 @@ static av_cold int vpx_init(AVCodecContext *avctx, { VPxContext *ctx = avctx->priv_data; struct vpx_codec_dec_cfg deccfg = { - /* token partitions+1 would be a decent choice */ - .threads = FFMIN(avctx->thread_count, 16) + .threads = FFMIN(avctx->thread_count ? avctx->thread_count : av_cpu_count(), 16) }; av_log(avctx, AV_LOG_INFO, "%s\n", vpx_codec_version_str()); diff --git a/libavcodec/libvpxenc.c b/libavcodec/libvpxenc.c index 09f7a88452873..ad440a9c21f3c 100644 --- a/libavcodec/libvpxenc.c +++ b/libavcodec/libvpxenc.c @@ -111,6 +111,7 @@ typedef struct VPxEncoderContext { int row_mt; int tune_content; int corpus_complexity; + int tpl_model; } VPxContext; /** String mappings for enum vp8e_enc_control_id */ @@ -146,6 +147,9 @@ static const char *const ctlidstr[] = { #ifdef VPX_CTRL_VP9E_SET_TUNE_CONTENT [VP9E_SET_TUNE_CONTENT] = "VP9E_SET_TUNE_CONTENT", #endif +#ifdef VPX_CTRL_VP9E_SET_TPL + [VP9E_SET_TPL] = "VP9E_SET_TPL", +#endif #endif }; @@ -716,6 +720,10 @@ FF_ENABLE_DEPRECATION_WARNINGS #ifdef VPX_CTRL_VP9E_SET_TUNE_CONTENT if (ctx->tune_content >= 0) codecctl_int(avctx, VP9E_SET_TUNE_CONTENT, ctx->tune_content); +#endif +#ifdef VPX_CTRL_VP9E_SET_TPL + if (ctx->tpl_model >= 0) + codecctl_int(avctx, VP9E_SET_TPL, ctx->tpl_model); #endif } #endif @@ -1067,8 +1075,6 @@ static int vpx_encode(AVCodecContext *avctx, AVPacket *pkt, #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM #define COMMON_OPTIONS \ - { "auto-alt-ref", "Enable use of alternate reference " \ - "frames (2-pass only)", OFFSET(auto_alt_ref), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 2, VE}, \ { "lag-in-frames", "Number of frames to look ahead for " \ "alternate reference frame selection", OFFSET(lag_in_frames), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, VE}, \ { "arnr-maxframes", "altref noise reduction max frame count", OFFSET(arnr_max_frames), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, VE}, \ @@ -1112,6 +1118,8 @@ static int vpx_encode(AVCodecContext *avctx, AVPacket *pkt, #if CONFIG_LIBVPX_VP8_ENCODER static const AVOption vp8_options[] = { COMMON_OPTIONS + { "auto-alt-ref", "Enable use of alternate reference " + "frames (2-pass only)", OFFSET(auto_alt_ref), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 2, VE}, { "cpu-used", "Quality/Speed ratio modifier", OFFSET(cpu_used), AV_OPT_TYPE_INT, {.i64 = 1}, -16, 16, VE}, LEGACY_OPTIONS { NULL } @@ -1121,6 +1129,8 @@ static const AVOption vp8_options[] = { #if CONFIG_LIBVPX_VP9_ENCODER static const AVOption vp9_options[] = { COMMON_OPTIONS + { "auto-alt-ref", "Enable use of alternate reference " + "frames (2-pass only)", OFFSET(auto_alt_ref), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 6, VE}, { "cpu-used", "Quality/Speed ratio modifier", OFFSET(cpu_used), AV_OPT_TYPE_INT, {.i64 = 1}, -8, 8, VE}, { "lossless", "Lossless mode", OFFSET(lossless), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 1, VE}, { "tile-columns", "Number of tile columns to use, log2", OFFSET(tile_columns), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 6, VE}, @@ -1156,6 +1166,9 @@ static const AVOption vp9_options[] = { #endif #if VPX_ENCODER_ABI_VERSION >= 14 { "corpus-complexity", "corpus vbr complexity midpoint", OFFSET(corpus_complexity), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 10000, VE }, +#endif +#ifdef VPX_CTRL_VP9E_SET_TPL + { "enable-tpl", "Enable temporal dependency model", OFFSET(tpl_model), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, VE }, #endif LEGACY_OPTIONS { NULL } diff --git a/libavcodec/libx264.c b/libavcodec/libx264.c index 54e6703d739b6..a68d0a7f61ad6 100644 --- a/libavcodec/libx264.c +++ b/libavcodec/libx264.c @@ -92,6 +92,9 @@ typedef struct X264Context { int noise_reduction; char *x264_params; + + int nb_reordered_opaque, next_reordered_opaque; + int64_t *reordered_opaque; } X264Context; static void X264_log(void *p, int level, const char *fmt, va_list args) @@ -278,6 +281,7 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame, int nnal, i, ret; x264_picture_t pic_out = {0}; int pict_type; + int64_t *out_opaque; x264_picture_init( &x4->pic ); x4->pic.img.i_csp = x4->params.i_csp; @@ -297,6 +301,11 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame, x4->pic.i_pts = frame->pts; + x4->reordered_opaque[x4->next_reordered_opaque] = frame->reordered_opaque; + x4->pic.opaque = &x4->reordered_opaque[x4->next_reordered_opaque]; + x4->next_reordered_opaque++; + x4->next_reordered_opaque %= x4->nb_reordered_opaque; + switch (frame->pict_type) { case AV_PICTURE_TYPE_I: x4->pic.i_type = x4->forced_idr > 0 ? X264_TYPE_IDR @@ -350,6 +359,14 @@ static int X264_frame(AVCodecContext *ctx, AVPacket *pkt, const AVFrame *frame, pkt->pts = pic_out.i_pts; pkt->dts = pic_out.i_dts; + out_opaque = pic_out.opaque; + if (out_opaque >= x4->reordered_opaque && + out_opaque < &x4->reordered_opaque[x4->nb_reordered_opaque]) { + ctx->reordered_opaque = *out_opaque; + } else { + // Unexpected opaque pointer on picture output + ctx->reordered_opaque = 0; + } switch (pic_out.i_type) { case X264_TYPE_IDR: @@ -393,6 +410,7 @@ static av_cold int X264_close(AVCodecContext *avctx) av_freep(&avctx->extradata); av_freep(&x4->sei); + av_freep(&x4->reordered_opaque); if (x4->enc) { x264_encoder_close(x4->enc); @@ -846,6 +864,14 @@ FF_ENABLE_DEPRECATION_WARNINGS cpb_props->max_bitrate = x4->params.rc.i_vbv_max_bitrate * 1000; cpb_props->avg_bitrate = x4->params.rc.i_bitrate * 1000; + // Overestimate the reordered opaque buffer size, in case a runtime + // reconfigure would increase the delay (which it shouldn't). + x4->nb_reordered_opaque = x264_encoder_maximum_delayed_frames(x4->enc) + 17; + x4->reordered_opaque = av_malloc_array(x4->nb_reordered_opaque, + sizeof(*x4->reordered_opaque)); + if (!x4->reordered_opaque) + return AVERROR(ENOMEM); + return 0; } @@ -1059,12 +1085,12 @@ AVCodec ff_libx264_encoder = { .init = X264_init, .encode2 = X264_frame, .close = X264_close, - .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS, + .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS | + AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE, .priv_class = &x264_class, .defaults = x264_defaults, .init_static_data = X264_init_static, - .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | - FF_CODEC_CAP_INIT_CLEANUP, + .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, .wrapper_name = "libx264", }; #endif @@ -1086,7 +1112,8 @@ AVCodec ff_libx264rgb_encoder = { .init = X264_init, .encode2 = X264_frame, .close = X264_close, - .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS, + .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS | + AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE, .priv_class = &rgbclass, .defaults = x264_defaults, .pix_fmts = pix_fmts_8bit_rgb, @@ -1111,12 +1138,12 @@ AVCodec ff_libx262_encoder = { .init = X264_init, .encode2 = X264_frame, .close = X264_close, - .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS, + .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS | + AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE, .priv_class = &X262_class, .defaults = x264_defaults, .pix_fmts = pix_fmts_8bit, - .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | - FF_CODEC_CAP_INIT_CLEANUP, + .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, .wrapper_name = "libx264", }; #endif diff --git a/libavcodec/libxavs2.c b/libavcodec/libxavs2.c index 2b47d0c5d28e8..1df4148b6fe98 100644 --- a/libavcodec/libxavs2.c +++ b/libavcodec/libxavs2.c @@ -78,20 +78,20 @@ static av_cold int xavs2_init(AVCodecContext *avctx) return AVERROR(ENOMEM); } - xavs2_opt_set2("width", "%d", avctx->width); - xavs2_opt_set2("height", "%d", avctx->height); - xavs2_opt_set2("bframes", "%d", avctx->max_b_frames); - xavs2_opt_set2("bitdepth", "%d", bit_depth); - xavs2_opt_set2("log", "%d", cae->log_level); - xavs2_opt_set2("preset", "%d", cae->preset_level); + xavs2_opt_set2("Width", "%d", avctx->width); + xavs2_opt_set2("Height", "%d", avctx->height); + xavs2_opt_set2("BFrames", "%d", avctx->max_b_frames); + xavs2_opt_set2("BitDepth", "%d", bit_depth); + xavs2_opt_set2("Log", "%d", cae->log_level); + xavs2_opt_set2("Preset", "%d", cae->preset_level); - /* not the same parameter as the IntraPeriod in xavs2 log */ - xavs2_opt_set2("intraperiod", "%d", avctx->gop_size); + xavs2_opt_set2("IntraPeriodMax", "%d", avctx->gop_size); + xavs2_opt_set2("IntraPeriodMin", "%d", avctx->gop_size); - xavs2_opt_set2("thread_frames", "%d", avctx->thread_count); - xavs2_opt_set2("thread_rows", "%d", cae->lcu_row_threads); + xavs2_opt_set2("ThreadFrames", "%d", avctx->thread_count); + xavs2_opt_set2("ThreadRows", "%d", cae->lcu_row_threads); - xavs2_opt_set2("OpenGOP", "%d", 1); + xavs2_opt_set2("OpenGOP", "%d", !(avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)); if (cae->xavs2_opts) { AVDictionary *dict = NULL; @@ -109,11 +109,11 @@ static av_cold int xavs2_init(AVCodecContext *avctx) if (avctx->bit_rate > 0) { xavs2_opt_set2("RateControl", "%d", 1); xavs2_opt_set2("TargetBitRate", "%"PRId64"", avctx->bit_rate); - xavs2_opt_set2("initial_qp", "%d", cae->initial_qp); - xavs2_opt_set2("max_qp", "%d", cae->max_qp); - xavs2_opt_set2("min_qp", "%d", cae->min_qp); + xavs2_opt_set2("InitialQP", "%d", cae->initial_qp); + xavs2_opt_set2("MaxQP", "%d", cae->max_qp); + xavs2_opt_set2("MinQP", "%d", cae->min_qp); } else { - xavs2_opt_set2("initial_qp", "%d", cae->qp); + xavs2_opt_set2("InitialQP", "%d", cae->qp); } @@ -286,7 +286,8 @@ AVCodec ff_libxavs2_encoder = { .encode2 = xavs2_encode_frame, .close = xavs2_close, .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AUTO_THREADS, - .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV420P10, AV_PIX_FMT_NONE }, + .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, + AV_PIX_FMT_NONE }, .priv_class = &libxavs2, .defaults = xavs2_defaults, .wrapper_name = "libxavs2", diff --git a/libavcodec/mjpegdec.c b/libavcodec/mjpegdec.c index b0cb3ffc83925..96c425515a9f1 100644 --- a/libavcodec/mjpegdec.c +++ b/libavcodec/mjpegdec.c @@ -43,6 +43,7 @@ #include "mjpeg.h" #include "mjpegdec.h" #include "jpeglsdec.h" +#include "profiles.h" #include "put_bits.h" #include "tiff.h" #include "exif.h" @@ -2796,6 +2797,7 @@ AVCodec ff_mjpeg_decoder = { .capabilities = AV_CODEC_CAP_DR1, .max_lowres = 3, .priv_class = &mjpegdec_class, + .profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles), .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM, .hw_configs = (const AVCodecHWConfigInternal*[]) { diff --git a/libavcodec/mjpegenc.c b/libavcodec/mjpegenc.c index d2fcb8e19199a..0ea7bd3d1030e 100644 --- a/libavcodec/mjpegenc.c +++ b/libavcodec/mjpegenc.c @@ -38,6 +38,7 @@ #include "mpegvideo.h" #include "mjpeg.h" #include "mjpegenc.h" +#include "profiles.h" static int alloc_huffman(MpegEncContext *s) { @@ -418,6 +419,7 @@ AVCodec ff_mjpeg_encoder = { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_NONE }, .priv_class = &mjpeg_class, + .profiles = NULL_IF_CONFIG_SMALL(ff_mjpeg_profiles), }; #endif diff --git a/libavcodec/mpeg4videodec.c b/libavcodec/mpeg4videodec.c index f435a520c8b42..f44ee76bd4194 100644 --- a/libavcodec/mpeg4videodec.c +++ b/libavcodec/mpeg4videodec.c @@ -402,7 +402,7 @@ static int mpeg4_decode_sprite_trajectory(Mpeg4DecContext *ctx, GetBitContext *g llabs(sprite_offset[0][i] + sprite_delta[i][1] * (h+16LL)) >= INT_MAX || llabs(sprite_offset[0][i] + sprite_delta[i][0] * (w+16LL) + sprite_delta[i][1] * (h+16LL)) >= INT_MAX || llabs(sprite_delta[i][0] * (w+16LL)) >= INT_MAX || - llabs(sprite_delta[i][1] * (w+16LL)) >= INT_MAX || + llabs(sprite_delta[i][1] * (h+16LL)) >= INT_MAX || llabs(sd[0]) >= INT_MAX || llabs(sd[1]) >= INT_MAX || llabs(sprite_offset[0][i] + sd[0] * (w+16LL)) >= INT_MAX || @@ -3056,6 +3056,7 @@ static int decode_studio_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb) if (get_bits_left(gb) <= 32) return 0; + s->partitioned_frame = 0; s->decode_mb = mpeg4_decode_studio_mb; decode_smpte_tc(ctx, gb); diff --git a/libavcodec/mpegaudio_parser.c b/libavcodec/mpegaudio_parser.c index a109f12701b14..1005e89aaeabd 100644 --- a/libavcodec/mpegaudio_parser.c +++ b/libavcodec/mpegaudio_parser.c @@ -101,7 +101,7 @@ static int mpegaudio_parse(AVCodecParserContext *s1, "MP3ADU full parser"); *poutbuf = NULL; *poutbuf_size = 0; - return 0; /* parsers must not return error codes */ + return buf_size; /* parsers must not return error codes */ } break; diff --git a/libavcodec/msrle.c b/libavcodec/msrle.c index adb55b1302c07..1ab8a419851f8 100644 --- a/libavcodec/msrle.c +++ b/libavcodec/msrle.c @@ -95,6 +95,9 @@ static int msrle_decode_frame(AVCodecContext *avctx, s->buf = buf; s->size = buf_size; + if (buf_size < 2) //Minimally a end of picture code should be there + return AVERROR_INVALIDDATA; + if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) return ret; diff --git a/libavcodec/nvdec.c b/libavcodec/nvdec.c index e779be3a45f1c..0426c9b3197f8 100644 --- a/libavcodec/nvdec.c +++ b/libavcodec/nvdec.c @@ -149,8 +149,12 @@ static void nvdec_decoder_free(void *opaque, uint8_t *data) { NVDECDecoder *decoder = (NVDECDecoder*)data; - if (decoder->decoder) + if (decoder->decoder) { + CUcontext dummy; + decoder->cudl->cuCtxPushCurrent(decoder->cuda_ctx); decoder->cvdl->cuvidDestroyDecoder(decoder->decoder); + decoder->cudl->cuCtxPopCurrent(&dummy); + } av_buffer_unref(&decoder->hw_device_ref); @@ -597,7 +601,11 @@ int ff_nvdec_frame_params(AVCodecContext *avctx, frames_ctx->format = AV_PIX_FMT_CUDA; frames_ctx->width = (avctx->coded_width + 1) & ~1; frames_ctx->height = (avctx->coded_height + 1) & ~1; - frames_ctx->initial_pool_size = dpb_size; + /* + * We add two extra frames to the pool to account for deinterlacing filters + * holding onto their frames. + */ + frames_ctx->initial_pool_size = dpb_size + 2; frames_ctx->free = nvdec_free_dummy; frames_ctx->pool = av_buffer_pool_init(0, nvdec_alloc_dummy); diff --git a/libavcodec/pcm.c b/libavcodec/pcm.c index 8c326c68294c7..ffcbccc77db58 100644 --- a/libavcodec/pcm.c +++ b/libavcodec/pcm.c @@ -42,6 +42,9 @@ static av_cold int pcm_encode_init(AVCodecContext *avctx) case AV_CODEC_ID_PCM_MULAW: pcm_ulaw_tableinit(); break; + case AV_CODEC_ID_PCM_VIDC: + pcm_vidc_tableinit(); + break; default: break; } @@ -216,6 +219,12 @@ static int pcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, *dst++ = linear_to_ulaw[(v + 32768) >> 2]; } break; + case AV_CODEC_ID_PCM_VIDC: + for (; n > 0; n--) { + v = *samples++; + *dst++ = linear_to_vidc[(v + 32768) >> 2]; + } + break; default: return -1; } @@ -249,6 +258,10 @@ static av_cold int pcm_decode_init(AVCodecContext *avctx) for (i = 0; i < 256; i++) s->table[i] = ulaw2linear(i); break; + case AV_CODEC_ID_PCM_VIDC: + for (i = 0; i < 256; i++) + s->table[i] = vidc2linear(i); + break; case AV_CODEC_ID_PCM_F16LE: case AV_CODEC_ID_PCM_F24LE: s->scale = 1. / (1 << (avctx->bits_per_coded_sample - 1)); @@ -485,6 +498,7 @@ static int pcm_decode_frame(AVCodecContext *avctx, void *data, break; case AV_CODEC_ID_PCM_ALAW: case AV_CODEC_ID_PCM_MULAW: + case AV_CODEC_ID_PCM_VIDC: for (; n > 0; n--) { AV_WN16A(samples, s->table[*src++]); samples += 2; @@ -612,3 +626,4 @@ PCM_CODEC (PCM_U32LE, AV_SAMPLE_FMT_S32, pcm_u32le, "PCM unsigned PCM_DECODER(PCM_ZORK, AV_SAMPLE_FMT_U8, pcm_zork, "PCM Zork"); PCM_CODEC (PCM_S64BE, AV_SAMPLE_FMT_S64, pcm_s64be, "PCM signed 64-bit big-endian"); PCM_CODEC (PCM_S64LE, AV_SAMPLE_FMT_S64, pcm_s64le, "PCM signed 64-bit little-endian"); +PCM_CODEC (PCM_VIDC, AV_SAMPLE_FMT_S16, pcm_vidc, "PCM Archimedes VIDC"); diff --git a/libavcodec/pcm_tablegen.c b/libavcodec/pcm_tablegen.c index bf8e7fb707547..473a47f6d96f8 100644 --- a/libavcodec/pcm_tablegen.c +++ b/libavcodec/pcm_tablegen.c @@ -29,11 +29,13 @@ int main(void) { pcm_alaw_tableinit(); pcm_ulaw_tableinit(); + pcm_vidc_tableinit(); write_fileheader(); WRITE_ARRAY("static const", uint8_t, linear_to_alaw); WRITE_ARRAY("static const", uint8_t, linear_to_ulaw); + WRITE_ARRAY("static const", uint8_t, linear_to_vidc); return 0; } diff --git a/libavcodec/pcm_tablegen.h b/libavcodec/pcm_tablegen.h index 7ce147f768b59..d8763abc40d1c 100644 --- a/libavcodec/pcm_tablegen.h +++ b/libavcodec/pcm_tablegen.h @@ -36,6 +36,12 @@ #define BIAS (0x84) /* Bias for linear code. */ +#define VIDC_SIGN_BIT (1) +#define VIDC_QUANT_MASK (0x1E) +#define VIDC_QUANT_SHIFT (1) +#define VIDC_SEG_SHIFT (5) +#define VIDC_SEG_MASK (0xE0) + /* alaw2linear() - Convert an A-law value to 16-bit linear PCM */ static av_cold int alaw2linear(unsigned char a_val) { @@ -69,14 +75,30 @@ static av_cold int ulaw2linear(unsigned char u_val) return (u_val & SIGN_BIT) ? (BIAS - t) : (t - BIAS); } +static av_cold int vidc2linear(unsigned char u_val) +{ + int t; + + /* + * Extract and bias the quantization bits. Then + * shift up by the segment number and subtract out the bias. + */ + t = (((u_val & VIDC_QUANT_MASK) >> VIDC_QUANT_SHIFT) << 3) + BIAS; + t <<= ((unsigned)u_val & VIDC_SEG_MASK) >> VIDC_SEG_SHIFT; + + return (u_val & VIDC_SIGN_BIT) ? (BIAS - t) : (t - BIAS); +} + #if CONFIG_HARDCODED_TABLES #define pcm_alaw_tableinit() #define pcm_ulaw_tableinit() +#define pcm_vidc_tableinit() #include "libavcodec/pcm_tables.h" #else /* 16384 entries per table */ static uint8_t linear_to_alaw[16384]; static uint8_t linear_to_ulaw[16384]; +static uint8_t linear_to_vidc[16384]; static av_cold void build_xlaw_table(uint8_t *linear_to_xlaw, int (*xlaw2linear)(unsigned char), @@ -111,6 +133,11 @@ static void pcm_ulaw_tableinit(void) { build_xlaw_table(linear_to_ulaw, ulaw2linear, 0xff); } + +static void pcm_vidc_tableinit(void) +{ + build_xlaw_table(linear_to_vidc, vidc2linear, 0xff); +} #endif /* CONFIG_HARDCODED_TABLES */ #endif /* AVCODEC_PCM_TABLEGEN_H */ diff --git a/libavcodec/pictordec.c b/libavcodec/pictordec.c index b29a484534150..65d2d49060b4f 100644 --- a/libavcodec/pictordec.c +++ b/libavcodec/pictordec.c @@ -236,6 +236,9 @@ static int decode_frame(AVCodecContext *avctx, } } + if (s->nb_planes - plane > 1) + return AVERROR_INVALIDDATA; + if (plane < s->nb_planes && x < avctx->width) { int run = (y + 1) * avctx->width - x; if (bits_per_plane == 8) diff --git a/libavcodec/profiles.c b/libavcodec/profiles.c index c31399f83e142..e6f937fdb4ffc 100644 --- a/libavcodec/profiles.c +++ b/libavcodec/profiles.c @@ -151,4 +151,23 @@ const AVProfile ff_sbc_profiles[] = { { FF_PROFILE_UNKNOWN }, }; +const AVProfile ff_prores_profiles[] = { + { FF_PROFILE_PRORES_PROXY, "Proxy" }, + { FF_PROFILE_PRORES_LT, "LT" }, + { FF_PROFILE_PRORES_STANDARD, "Standard" }, + { FF_PROFILE_PRORES_HQ, "HQ" }, + { FF_PROFILE_PRORES_4444, "4444" }, + { FF_PROFILE_PRORES_XQ, "XQ" }, + { FF_PROFILE_UNKNOWN } +}; + +const AVProfile ff_mjpeg_profiles[] = { + { FF_PROFILE_MJPEG_HUFFMAN_BASELINE_DCT, "Baseline" }, + { FF_PROFILE_MJPEG_HUFFMAN_EXTENDED_SEQUENTIAL_DCT, "Sequential" }, + { FF_PROFILE_MJPEG_HUFFMAN_PROGRESSIVE_DCT, "Progressive" }, + { FF_PROFILE_MJPEG_HUFFMAN_LOSSLESS, "Lossless" }, + { FF_PROFILE_MJPEG_JPEG_LS, "JPEG LS" }, + { FF_PROFILE_UNKNOWN } +}; + #endif /* !CONFIG_SMALL */ diff --git a/libavcodec/profiles.h b/libavcodec/profiles.h index 9d7e211e15da3..ab61e03e15331 100644 --- a/libavcodec/profiles.h +++ b/libavcodec/profiles.h @@ -33,5 +33,7 @@ extern const AVProfile ff_vc1_profiles[]; extern const AVProfile ff_vp9_profiles[]; extern const AVProfile ff_av1_profiles[]; extern const AVProfile ff_sbc_profiles[]; +extern const AVProfile ff_prores_profiles[]; +extern const AVProfile ff_mjpeg_profiles[]; #endif /* AVCODEC_PROFILES_H */ diff --git a/libavcodec/proresdec2.c b/libavcodec/proresdec2.c index d818e5d8da582..130a4e3fe8b39 100644 --- a/libavcodec/proresdec2.c +++ b/libavcodec/proresdec2.c @@ -33,6 +33,7 @@ #include "get_bits.h" #include "idctdsp.h" #include "internal.h" +#include "profiles.h" #include "simple_idct.h" #include "proresdec.h" #include "proresdata.h" @@ -61,6 +62,30 @@ static av_cold int decode_init(AVCodecContext *avctx) permute(ctx->progressive_scan, ff_prores_progressive_scan, idct_permutation); permute(ctx->interlaced_scan, ff_prores_interlaced_scan, idct_permutation); + switch (avctx->codec_tag) { + case MKTAG('a','p','c','o'): + avctx->profile = FF_PROFILE_PRORES_PROXY; + break; + case MKTAG('a','p','c','s'): + avctx->profile = FF_PROFILE_PRORES_LT; + break; + case MKTAG('a','p','c','n'): + avctx->profile = FF_PROFILE_PRORES_STANDARD; + break; + case MKTAG('a','p','c','h'): + avctx->profile = FF_PROFILE_PRORES_HQ; + break; + case MKTAG('a','p','4','h'): + avctx->profile = FF_PROFILE_PRORES_4444; + break; + case MKTAG('a','p','4','x'): + avctx->profile = FF_PROFILE_PRORES_XQ; + break; + default: + avctx->profile = FF_PROFILE_UNKNOWN; + av_log(avctx, AV_LOG_WARNING, "Unknown prores profile %d\n", avctx->codec_tag); + } + return 0; } @@ -730,4 +755,5 @@ AVCodec ff_prores_decoder = { .close = decode_close, .decode = decode_frame, .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS, + .profiles = NULL_IF_CONFIG_SMALL(ff_prores_profiles), }; diff --git a/libavcodec/proresenc_anatoliy.c b/libavcodec/proresenc_anatoliy.c index 6b9ce4a59ade9..f2fe98dbd9c8c 100644 --- a/libavcodec/proresenc_anatoliy.c +++ b/libavcodec/proresenc_anatoliy.c @@ -30,6 +30,7 @@ #include "avcodec.h" #include "dct.h" #include "internal.h" +#include "profiles.h" #include "proresdata.h" #include "put_bits.h" #include "bytestream.h" @@ -37,18 +38,12 @@ #define DEFAULT_SLICE_MB_WIDTH 8 -#define FF_PROFILE_PRORES_PROXY 0 -#define FF_PROFILE_PRORES_LT 1 -#define FF_PROFILE_PRORES_STANDARD 2 -#define FF_PROFILE_PRORES_HQ 3 -#define FF_PROFILE_PRORES_444 4 - static const AVProfile profiles[] = { { FF_PROFILE_PRORES_PROXY, "apco"}, { FF_PROFILE_PRORES_LT, "apcs"}, { FF_PROFILE_PRORES_STANDARD, "apcn"}, { FF_PROFILE_PRORES_HQ, "apch"}, - { FF_PROFILE_PRORES_444, "ap4h"}, + { FF_PROFILE_PRORES_4444, "ap4h"}, { FF_PROFILE_UNKNOWN } }; @@ -160,11 +155,13 @@ typedef struct { uint8_t* fill_y; uint8_t* fill_u; uint8_t* fill_v; + uint8_t* fill_a; int qmat_luma[16][64]; int qmat_chroma[16][64]; int is_422; + int need_alpha; } ProresContext; static void encode_codeword(PutBitContext *pb, int val, int codebook) @@ -366,6 +363,80 @@ static av_always_inline unsigned encode_slice_data(AVCodecContext *avctx, return *y_data_size + *u_data_size + *v_data_size; } +static void put_alpha_diff(PutBitContext *pb, int cur, int prev) +{ + const int abits = 16; + const int dbits = 7; + const int dsize = 1 << dbits - 1; + int diff = cur - prev; + + diff = av_mod_uintp2(diff, abits); + if (diff >= (1 << abits) - dsize) + diff -= 1 << abits; + if (diff < -dsize || diff > dsize || !diff) { + put_bits(pb, 1, 1); + put_bits(pb, abits, diff); + } else { + put_bits(pb, 1, 0); + put_bits(pb, dbits - 1, FFABS(diff) - 1); + put_bits(pb, 1, diff < 0); + } +} + +static inline void put_alpha_run(PutBitContext *pb, int run) +{ + if (run) { + put_bits(pb, 1, 0); + if (run < 0x10) + put_bits(pb, 4, run); + else + put_bits(pb, 15, run); + } else { + put_bits(pb, 1, 1); + } +} + +static av_always_inline int encode_alpha_slice_data(AVCodecContext *avctx, int8_t * src_a, + unsigned mb_count, uint8_t *buf, unsigned data_size, unsigned* a_data_size) +{ + const int abits = 16; + const int mask = (1 << abits) - 1; + const int num_coeffs = mb_count * 256; + int prev = mask, cur; + int idx = 0; + int run = 0; + int16_t * blocks = (int16_t *)src_a; + PutBitContext pb; + init_put_bits(&pb, buf, data_size); + + cur = blocks[idx++]; + put_alpha_diff(&pb, cur, prev); + prev = cur; + do { + cur = blocks[idx++]; + if (cur != prev) { + put_alpha_run (&pb, run); + put_alpha_diff(&pb, cur, prev); + prev = cur; + run = 0; + } else { + run++; + } + } while (idx < num_coeffs); + if (run) + put_alpha_run(&pb, run); + flush_put_bits(&pb); + *a_data_size = put_bits_count(&pb) >> 3; + + if (put_bits_left(&pb) < 0) { + av_log(avctx, AV_LOG_ERROR, + "Underestimated required buffer size.\n"); + return AVERROR_BUG; + } else { + return 0; + } +} + static void subimage_with_fill(uint16_t *src, unsigned x, unsigned y, unsigned stride, unsigned width, unsigned height, uint16_t *dst, unsigned dst_width, unsigned dst_height) @@ -396,15 +467,46 @@ static void subimage_with_fill(uint16_t *src, unsigned x, unsigned y, } } +/* reorganize alpha data and convert 10b -> 16b */ +static void subimage_alpha_with_fill(uint16_t *src, unsigned x, unsigned y, + unsigned stride, unsigned width, unsigned height, uint16_t *dst, + unsigned dst_width, unsigned dst_height) +{ + int box_width = FFMIN(width - x, dst_width); + int box_height = FFMIN(height - y, dst_height); + int i, j, src_stride = stride >> 1; + uint16_t last_pix, *last_line; + + src += y * src_stride + x; + for (i = 0; i < box_height; ++i) { + for (j = 0; j < box_width; ++j) { + dst[j] = src[j] << 6; /* 10b to 16b */ + } + last_pix = dst[j - 1] << 6; /* 10b to 16b */ + for (; j < dst_width; j++) + dst[j] = last_pix; + src += src_stride; + dst += dst_width; + } + last_line = dst - dst_width; + for (; i < dst_height; i++) { + for (j = 0; j < dst_width; ++j) { + dst[j] = last_line[j]; + } + dst += dst_width; + } +} + static int encode_slice(AVCodecContext *avctx, const AVFrame *pic, int mb_x, int mb_y, unsigned mb_count, uint8_t *buf, unsigned data_size, int unsafe, int *qp) { - int luma_stride, chroma_stride; - int hdr_size = 6, slice_size; - uint8_t *dest_y, *dest_u, *dest_v; - unsigned y_data_size = 0, u_data_size = 0, v_data_size = 0; + int luma_stride, chroma_stride, alpha_stride = 0; ProresContext* ctx = avctx->priv_data; + int hdr_size = 6 + (ctx->need_alpha * 2); /* v data size is write when there is alpha */ + int ret = 0, slice_size; + uint8_t *dest_y, *dest_u, *dest_v; + unsigned y_data_size = 0, u_data_size = 0, v_data_size = 0, a_data_size = 0; FDCTDSPContext *fdsp = &ctx->fdsp; int tgt_bits = (mb_count * bitrate_table[avctx->profile]) >> 2; int low_bytes = (tgt_bits - (tgt_bits >> 3)) >> 3; // 12% bitrate fluctuation @@ -417,6 +519,9 @@ static int encode_slice(AVCodecContext *avctx, const AVFrame *pic, int mb_x, luma_stride = pic->linesize[0]; chroma_stride = pic->linesize[1]; + if (ctx->need_alpha) + alpha_stride = pic->linesize[3]; + dest_y = pic->data[0] + (mb_y << 4) * luma_stride + (mb_x << 5); dest_u = pic->data[1] + (mb_y << 4) * chroma_stride + (mb_x << (5 - ctx->is_422)); dest_v = pic->data[2] + (mb_y << 4) * chroma_stride + (mb_x << (5 - ctx->is_422)); @@ -436,7 +541,7 @@ static int encode_slice(AVCodecContext *avctx, const AVFrame *pic, int mb_x, calc_plane_dct(fdsp, ctx->fill_u, blocks_u, mb_count << (5 - ctx->is_422), mb_count, 1, ctx->is_422); calc_plane_dct(fdsp, ctx->fill_v, blocks_v, mb_count << (5 - ctx->is_422), mb_count, 1, ctx->is_422); - encode_slice_data(avctx, blocks_y, blocks_u, blocks_v, + slice_size = encode_slice_data(avctx, blocks_y, blocks_u, blocks_v, mb_count, buf + hdr_size, data_size - hdr_size, &y_data_size, &u_data_size, &v_data_size, *qp); @@ -475,7 +580,21 @@ static int encode_slice(AVCodecContext *avctx, const AVFrame *pic, int mb_x, AV_WB16(buf + 2, y_data_size); AV_WB16(buf + 4, u_data_size); - return hdr_size + y_data_size + u_data_size + v_data_size; + if (ctx->need_alpha) { + AV_WB16(buf + 6, v_data_size); /* write v data size only if there is alpha */ + + subimage_alpha_with_fill((uint16_t *) pic->data[3], mb_x << 4, mb_y << 4, + alpha_stride, avctx->width, avctx->height, + (uint16_t *) ctx->fill_a, mb_count << 4, 16); + ret = encode_alpha_slice_data(avctx, ctx->fill_a, mb_count, + buf + hdr_size + slice_size, + data_size - hdr_size - slice_size, &a_data_size); + } + + if (ret != 0) { + return ret; + } + return hdr_size + y_data_size + u_data_size + v_data_size + a_data_size; } static int prores_encode_picture(AVCodecContext *avctx, const AVFrame *pic, @@ -510,6 +629,9 @@ static int prores_encode_picture(AVCodecContext *avctx, const AVFrame *pic, sl_size = encode_slice(avctx, pic, mb_x, mb_y, slice_mb_count, sl_data, sl_data_size, unsafe_bot || unsafe_right, &qp); + if (sl_size < 0){ + return sl_size; + } bytestream_put_be16(&sl_data_sizes, sl_size); sl_data += sl_size; @@ -541,27 +663,38 @@ static int prores_encode_frame(AVCodecContext *avctx, AVPacket *pkt, buf = pkt->data; pic_size = prores_encode_picture(avctx, pict, buf + header_size + 8, pkt->size - header_size - 8); + if (pic_size < 0) { + return pic_size; + } bytestream_put_be32(&buf, pic_size + 8 + header_size); bytestream_put_buffer(&buf, "icpf", 4); bytestream_put_be16(&buf, header_size); - bytestream_put_be16(&buf, 0); + bytestream_put_be16(&buf, 0); /* version */ bytestream_put_buffer(&buf, "fmpg", 4); bytestream_put_be16(&buf, avctx->width); bytestream_put_be16(&buf, avctx->height); - if (avctx->profile == FF_PROFILE_PRORES_444) { + if (avctx->profile == FF_PROFILE_PRORES_4444) { *buf++ = 0xC2; // 444, not interlaced } else { *buf++ = 0x82; // 422, not interlaced } - *buf++ = 0; + *buf++ = 0; /* reserved */ *buf++ = pict->color_primaries; *buf++ = pict->color_trc; *buf++ = pict->colorspace; - *buf++ = 32; - *buf++ = 0; - *buf++ = 3; + if (avctx->profile >= FF_PROFILE_PRORES_4444) { + if (avctx->pix_fmt == AV_PIX_FMT_YUV444P10) { + *buf++ = 0xA0;/* src b64a and no alpha */ + } else { + *buf++ = 0xA2;/* src b64a and 16b alpha */ + } + } else { + *buf++ = 32;/* src v210 and no alpha */ + } + *buf++ = 0; /* reserved */ + *buf++ = 3; /* luma and chroma matrix present */ bytestream_put_buffer(&buf, QMAT_LUMA[avctx->profile], 64); bytestream_put_buffer(&buf, QMAT_CHROMA[avctx->profile], 64); @@ -586,6 +719,7 @@ static av_cold int prores_encode_init(AVCodecContext *avctx) ProresContext* ctx = avctx->priv_data; avctx->bits_per_raw_sample = 10; + ctx->need_alpha = 0; if (avctx->width & 0x1) { av_log(avctx, AV_LOG_ERROR, @@ -605,13 +739,19 @@ static av_cold int prores_encode_init(AVCodecContext *avctx) av_log(avctx, AV_LOG_INFO, "encoding with ProRes standard (apcn) profile\n"); } else if (avctx->pix_fmt == AV_PIX_FMT_YUV444P10) { - avctx->profile = FF_PROFILE_PRORES_444; + avctx->profile = FF_PROFILE_PRORES_4444; av_log(avctx, AV_LOG_INFO, - "encoding with ProRes 444 (ap4h) profile\n"); + "encoding with ProRes 4444 (ap4h) profile\n"); + } else if (avctx->pix_fmt == AV_PIX_FMT_YUVA444P10) { + avctx->profile = FF_PROFILE_PRORES_4444; + av_log(avctx, AV_LOG_INFO, + "encoding with ProRes 4444+ (ap4h) profile\n"); + } else { + av_log(avctx, AV_LOG_ERROR, "Unknown pixel format\n"); + return AVERROR(EINVAL); } - } else if (avctx->profile < FF_PROFILE_PRORES_PROXY - || avctx->profile > FF_PROFILE_PRORES_444) { + || avctx->profile > FF_PROFILE_PRORES_4444) { av_log( avctx, AV_LOG_ERROR, @@ -622,13 +762,14 @@ static av_cold int prores_encode_init(AVCodecContext *avctx) av_log(avctx, AV_LOG_ERROR, "encoding with ProRes 444 (ap4h) profile, need YUV444P10 input\n"); return AVERROR(EINVAL); - } else if ((avctx->pix_fmt == AV_PIX_FMT_YUV444P10) && (avctx->profile < FF_PROFILE_PRORES_444)){ + } else if ((avctx->pix_fmt == AV_PIX_FMT_YUV444P10 || avctx->pix_fmt == AV_PIX_FMT_YUVA444P10) + && (avctx->profile < FF_PROFILE_PRORES_4444)){ av_log(avctx, AV_LOG_ERROR, "encoding with ProRes Proxy/LT/422/422 HQ (apco, apcs, apcn, ap4h) profile, need YUV422P10 input\n"); return AVERROR(EINVAL); } - if (avctx->profile < FF_PROFILE_PRORES_444) { /* 422 versions */ + if (avctx->profile < FF_PROFILE_PRORES_4444) { /* 422 versions */ ctx->is_422 = 1; if ((avctx->height & 0xf) || (avctx->width & 0xf)) { ctx->fill_y = av_malloc(4 * (DEFAULT_SLICE_MB_WIDTH << 8)); @@ -646,6 +787,12 @@ static av_cold int prores_encode_init(AVCodecContext *avctx) ctx->fill_u = ctx->fill_y + (DEFAULT_SLICE_MB_WIDTH << 9); ctx->fill_v = ctx->fill_u + (DEFAULT_SLICE_MB_WIDTH << 9); } + if (avctx->pix_fmt == AV_PIX_FMT_YUVA444P10) { + ctx->need_alpha = 1; + ctx->fill_a = av_malloc(DEFAULT_SLICE_MB_WIDTH << 9); /* 8 blocks x 16px x 16px x sizeof (uint16) */ + if (!ctx->fill_a) + return AVERROR(ENOMEM); + } } ff_fdctdsp_init(&ctx->fdsp, avctx); @@ -664,6 +811,7 @@ static av_cold int prores_encode_close(AVCodecContext *avctx) { ProresContext* ctx = avctx->priv_data; av_freep(&ctx->fill_y); + av_freep(&ctx->fill_a); return 0; } @@ -677,9 +825,9 @@ AVCodec ff_prores_aw_encoder = { .init = prores_encode_init, .close = prores_encode_close, .encode2 = prores_encode_frame, - .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_NONE}, + .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_NONE}, .capabilities = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY, - .profiles = profiles + .profiles = NULL_IF_CONFIG_SMALL(ff_prores_profiles), }; AVCodec ff_prores_encoder = { @@ -691,7 +839,7 @@ AVCodec ff_prores_encoder = { .init = prores_encode_init, .close = prores_encode_close, .encode2 = prores_encode_frame, - .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_NONE}, + .pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_NONE}, .capabilities = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY, - .profiles = profiles + .profiles = NULL_IF_CONFIG_SMALL(ff_prores_profiles), }; diff --git a/libavcodec/proresenc_kostya.c b/libavcodec/proresenc_kostya.c index 81f3865ea6ef4..9a77d24fb62e5 100644 --- a/libavcodec/proresenc_kostya.c +++ b/libavcodec/proresenc_kostya.c @@ -28,6 +28,7 @@ #include "avcodec.h" #include "fdctdsp.h" #include "put_bits.h" +#include "profiles.h" #include "bytestream.h" #include "internal.h" #include "proresdata.h" @@ -1431,4 +1432,5 @@ AVCodec ff_prores_ks_encoder = { AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_NONE }, .priv_class = &proresenc_class, + .profiles = NULL_IF_CONFIG_SMALL(ff_prores_profiles), }; diff --git a/libavcodec/prosumer.c b/libavcodec/prosumer.c index 6e98677b556d0..505de71980103 100644 --- a/libavcodec/prosumer.c +++ b/libavcodec/prosumer.c @@ -57,27 +57,25 @@ static int decompress(GetByteContext *gb, int size, PutByteContext *pb, const ui b = lut[2 * idx]; while (1) { - if (bytestream2_get_bytes_left_p(pb) <= 0) + if (bytestream2_get_bytes_left_p(pb) <= 0 || bytestream2_get_eof(pb)) return 0; - if (((b & 0xFF00u) != 0x8000u) || (b & 0xFFu)) { + if ((b & 0xFF00u) != 0x8000u || (b & 0xFFu)) { if ((b & 0xFF00u) != 0x8000u) { bytestream2_put_le16(pb, b); - } else if (b & 0xFFu) { + } else { idx = 0; for (int i = 0; i < (b & 0xFFu); i++) bytestream2_put_le32(pb, 0); } c = b >> 16; if (c & 0xFF00u) { - c = (((c >> 8) & 0xFFu) | (c & 0xFF00)) & 0xF00F; fill = lut[2 * idx + 1]; - if ((c & 0xFF00u) == 0x1000) { + if ((c & 0xF000u) == 0x1000) { bytestream2_put_le16(pb, fill); - c &= 0xFFFF00FFu; } else { bytestream2_put_le32(pb, fill); - c &= 0xFFFF00FFu; } + c = (c >> 8) & 0x0Fu; } while (c) { a <<= 4; diff --git a/libavcodec/qpeg.c b/libavcodec/qpeg.c index e1210c1972060..cb452621e7ddd 100644 --- a/libavcodec/qpeg.c +++ b/libavcodec/qpeg.c @@ -85,6 +85,12 @@ static void qpeg_decode_intra(QpegContext *qctx, uint8_t *dst, filled = 0; dst -= stride; rows_to_go--; + while (run - i > width && rows_to_go > 0) { + memset(dst, p, width); + dst -= stride; + rows_to_go--; + i += width; + } if(rows_to_go <= 0) break; } diff --git a/libavcodec/qsvdec.c b/libavcodec/qsvdec.c index 22e7a46a851dc..6753e596a15d6 100644 --- a/libavcodec/qsvdec.c +++ b/libavcodec/qsvdec.c @@ -372,6 +372,8 @@ static int qsv_decode(AVCodecContext *avctx, QSVContext *q, ++q->zero_consume_run; if (q->zero_consume_run > 1) ff_qsv_print_warning(avctx, ret, "A decode call did not consume any data"); + } else if (!*sync && bs.DataOffset) { + ++q->buffered_count; } else { q->zero_consume_run = 0; } @@ -526,6 +528,16 @@ int ff_qsv_process_data(AVCodecContext *avctx, QSVContext *q, AV_PIX_FMT_NONE, AV_PIX_FMT_NONE }; enum AVPixelFormat qsv_format; + AVPacket zero_pkt = {0}; + + if (q->buffered_count) { + q->reinit_flag = 1; + /* decode zero-size pkt to flush the buffered pkt before reinit */ + q->buffered_count--; + return qsv_decode(avctx, q, frame, got_frame, &zero_pkt); + } + + q->reinit_flag = 0; qsv_format = ff_qsv_map_pixfmt(q->parser->format, &q->fourcc); if (qsv_format < 0) { diff --git a/libavcodec/qsvdec.h b/libavcodec/qsvdec.h index 5b7b03a48b5cd..111536caba335 100644 --- a/libavcodec/qsvdec.h +++ b/libavcodec/qsvdec.h @@ -53,6 +53,8 @@ typedef struct QSVContext { AVFifoBuffer *async_fifo; int zero_consume_run; + int buffered_count; + int reinit_flag; // the internal parser and codec context for parsing the data AVCodecParserContext *parser; diff --git a/libavcodec/qsvdec_h2645.c b/libavcodec/qsvdec_h2645.c index d9d2318d1a423..b8a78aa81b5a7 100644 --- a/libavcodec/qsvdec_h2645.c +++ b/libavcodec/qsvdec_h2645.c @@ -146,10 +146,11 @@ static int qsv_decode_frame(AVCodecContext *avctx, void *data, /* no more data */ if (av_fifo_size(s->packet_fifo) < sizeof(AVPacket)) return avpkt->size ? avpkt->size : ff_qsv_process_data(avctx, &s->qsv, frame, got_frame, avpkt); - - av_packet_unref(&s->buffer_pkt); - - av_fifo_generic_read(s->packet_fifo, &s->buffer_pkt, sizeof(s->buffer_pkt), NULL); + /* in progress of reinit, no read from fifo and keep the buffer_pkt */ + if (!s->qsv.reinit_flag) { + av_packet_unref(&s->buffer_pkt); + av_fifo_generic_read(s->packet_fifo, &s->buffer_pkt, sizeof(s->buffer_pkt), NULL); + } } ret = ff_qsv_process_data(avctx, &s->qsv, frame, got_frame, &s->buffer_pkt); @@ -159,6 +160,8 @@ static int qsv_decode_frame(AVCodecContext *avctx, void *data, av_packet_unref(&s->buffer_pkt); return ret; } + if (s->qsv.reinit_flag) + continue; s->buffer_pkt.size -= ret; s->buffer_pkt.data += ret; diff --git a/libavcodec/qsvdec_other.c b/libavcodec/qsvdec_other.c index 993c7a8e807f0..03251d2c8536d 100644 --- a/libavcodec/qsvdec_other.c +++ b/libavcodec/qsvdec_other.c @@ -132,9 +132,11 @@ static int qsv_decode_frame(AVCodecContext *avctx, void *data, /* no more data */ if (av_fifo_size(s->packet_fifo) < sizeof(AVPacket)) return avpkt->size ? avpkt->size : ff_qsv_process_data(avctx, &s->qsv, frame, got_frame, avpkt); - - av_packet_unref(&s->input_ref); - av_fifo_generic_read(s->packet_fifo, &s->input_ref, sizeof(s->input_ref), NULL); + /* in progress of reinit, no read from fifo and keep the buffer_pkt */ + if (!s->qsv.reinit_flag) { + av_packet_unref(&s->input_ref); + av_fifo_generic_read(s->packet_fifo, &s->input_ref, sizeof(s->input_ref), NULL); + } } ret = ff_qsv_process_data(avctx, &s->qsv, frame, got_frame, &s->input_ref); @@ -145,6 +147,8 @@ static int qsv_decode_frame(AVCodecContext *avctx, void *data, return ret; } + if (s->qsv.reinit_flag) + continue; s->input_ref.size -= ret; s->input_ref.data += ret; diff --git a/libavcodec/qsvenc_jpeg.c b/libavcodec/qsvenc_jpeg.c index c18fe91940bb3..1e7785a8261de 100644 --- a/libavcodec/qsvenc_jpeg.c +++ b/libavcodec/qsvenc_jpeg.c @@ -64,6 +64,7 @@ static av_cold int qsv_enc_close(AVCodecContext *avctx) #define OFFSET(x) offsetof(QSVMJPEGEncContext, x) #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM static const AVOption options[] = { + { "async_depth", "Maximum processing parallelism", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = ASYNC_DEPTH_DEFAULT }, 1, INT_MAX, VE }, { NULL }, }; @@ -74,6 +75,11 @@ static const AVClass class = { .version = LIBAVUTIL_VERSION_INT, }; +static const AVCodecDefault qsv_enc_defaults[] = { + { "global_quality", "80" }, + { NULL }, +}; + AVCodec ff_mjpeg_qsv_encoder = { .name = "mjpeg_qsv", .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Intel Quick Sync Video acceleration)"), @@ -88,5 +94,6 @@ AVCodec ff_mjpeg_qsv_encoder = { AV_PIX_FMT_QSV, AV_PIX_FMT_NONE }, .priv_class = &class, + .defaults = qsv_enc_defaults, .wrapper_name = "qsv", }; diff --git a/libavcodec/utils.c b/libavcodec/utils.c index 285bfdbc63cb4..1661d48b90631 100644 --- a/libavcodec/utils.c +++ b/libavcodec/utils.c @@ -1438,6 +1438,7 @@ int av_get_exact_bits_per_sample(enum AVCodecID codec_id) case AV_CODEC_ID_DSD_MSBF_PLANAR: case AV_CODEC_ID_PCM_ALAW: case AV_CODEC_ID_PCM_MULAW: + case AV_CODEC_ID_PCM_VIDC: case AV_CODEC_ID_PCM_S8: case AV_CODEC_ID_PCM_S8_PLANAR: case AV_CODEC_ID_PCM_U8: diff --git a/libavcodec/vaapi_encode.c b/libavcodec/vaapi_encode.c index 2c34cdce2cf59..2fe8501287108 100644 --- a/libavcodec/vaapi_encode.c +++ b/libavcodec/vaapi_encode.c @@ -319,16 +319,60 @@ static int vaapi_encode_issue(AVCodecContext *avctx, } } + if (pic->nb_slices == 0) + pic->nb_slices = ctx->nb_slices; if (pic->nb_slices > 0) { + int rounding; + pic->slices = av_mallocz_array(pic->nb_slices, sizeof(*pic->slices)); if (!pic->slices) { err = AVERROR(ENOMEM); goto fail; } + + for (i = 0; i < pic->nb_slices; i++) + pic->slices[i].row_size = ctx->slice_size; + + rounding = ctx->slice_block_rows - ctx->nb_slices * ctx->slice_size; + if (rounding > 0) { + // Place rounding error at top and bottom of frame. + av_assert0(rounding < pic->nb_slices); + // Some Intel drivers contain a bug where the encoder will fail + // if the last slice is smaller than the one before it. Since + // that's straightforward to avoid here, just do so. + if (rounding <= 2) { + for (i = 0; i < rounding; i++) + ++pic->slices[i].row_size; + } else { + for (i = 0; i < (rounding + 1) / 2; i++) + ++pic->slices[pic->nb_slices - i - 1].row_size; + for (i = 0; i < rounding / 2; i++) + ++pic->slices[i].row_size; + } + } else if (rounding < 0) { + // Remove rounding error from last slice only. + av_assert0(rounding < ctx->slice_size); + pic->slices[pic->nb_slices - 1].row_size += rounding; + } } for (i = 0; i < pic->nb_slices; i++) { slice = &pic->slices[i]; slice->index = i; + if (i == 0) { + slice->row_start = 0; + slice->block_start = 0; + } else { + const VAAPIEncodeSlice *prev = &pic->slices[i - 1]; + slice->row_start = prev->row_start + prev->row_size; + slice->block_start = prev->block_start + prev->block_size; + } + slice->block_size = slice->row_size * ctx->slice_block_cols; + + av_log(avctx, AV_LOG_DEBUG, "Slice %d: %d-%d (%d rows), " + "%d-%d (%d blocks).\n", i, slice->row_start, + slice->row_start + slice->row_size - 1, slice->row_size, + slice->block_start, slice->block_start + slice->block_size - 1, + slice->block_size); if (ctx->codec->slice_params_size > 0) { slice->codec_slice_params = av_mallocz(ctx->codec->slice_params_size); @@ -1233,7 +1277,8 @@ static av_cold int vaapi_encode_init_rate_control(AVCodecContext *avctx) ctx->va_rc_mode = VA_RC_CQP; return 0; } - if (avctx->flags & AV_CODEC_FLAG_QSCALE || + if (ctx->codec->flags & FLAG_CONSTANT_QUALITY_ONLY || + avctx->flags & AV_CODEC_FLAG_QSCALE || avctx->bit_rate <= 0) { if (rc_attr.value & VA_RC_CQP) { av_log(avctx, AV_LOG_VERBOSE, "Using constant-quality mode.\n"); @@ -1444,6 +1489,106 @@ static av_cold int vaapi_encode_init_gop_structure(AVCodecContext *avctx) return 0; } +static av_cold int vaapi_encode_init_slice_structure(AVCodecContext *avctx) +{ + VAAPIEncodeContext *ctx = avctx->priv_data; + VAConfigAttrib attr[2] = { { VAConfigAttribEncMaxSlices }, + { VAConfigAttribEncSliceStructure } }; + VAStatus vas; + uint32_t max_slices, slice_structure; + int req_slices; + + if (!(ctx->codec->flags & FLAG_SLICE_CONTROL)) { + if (avctx->slices > 0) { + av_log(avctx, AV_LOG_WARNING, "Multiple slices were requested " + "but this codec does not support controlling slices.\n"); + } + return 0; + } + + ctx->slice_block_rows = (avctx->height + ctx->slice_block_height - 1) / + ctx->slice_block_height; + ctx->slice_block_cols = (avctx->width + ctx->slice_block_width - 1) / + ctx->slice_block_width; + + if (avctx->slices <= 1) { + ctx->nb_slices = 1; + ctx->slice_size = ctx->slice_block_rows; + return 0; + } + + vas = vaGetConfigAttributes(ctx->hwctx->display, + ctx->va_profile, + ctx->va_entrypoint, + attr, FF_ARRAY_ELEMS(attr)); + if (vas != VA_STATUS_SUCCESS) { + av_log(avctx, AV_LOG_ERROR, "Failed to query slice " + "attributes: %d (%s).\n", vas, vaErrorStr(vas)); + return AVERROR_EXTERNAL; + } + max_slices = attr[0].value; + slice_structure = attr[1].value; + if (max_slices == VA_ATTRIB_NOT_SUPPORTED || + slice_structure == VA_ATTRIB_NOT_SUPPORTED) { + av_log(avctx, AV_LOG_ERROR, "Driver does not support encoding " + "pictures as multiple slices.\n."); + return AVERROR(EINVAL); + } + + // For fixed-size slices currently we only support whole rows, making + // rectangular slices. This could be extended to arbitrary runs of + // blocks, but since slices tend to be a conformance requirement and + // most cases (such as broadcast or bluray) want rectangular slices + // only it would need to be gated behind another option. + if (avctx->slices > ctx->slice_block_rows) { + av_log(avctx, AV_LOG_WARNING, "Not enough rows to use " + "configured number of slices (%d < %d); using " + "maximum.\n", ctx->slice_block_rows, avctx->slices); + req_slices = ctx->slice_block_rows; + } else { + req_slices = avctx->slices; + } + if (slice_structure & VA_ENC_SLICE_STRUCTURE_ARBITRARY_ROWS || + slice_structure & VA_ENC_SLICE_STRUCTURE_ARBITRARY_MACROBLOCKS) { + ctx->nb_slices = req_slices; + ctx->slice_size = ctx->slice_block_rows / ctx->nb_slices; + } else if (slice_structure & VA_ENC_SLICE_STRUCTURE_POWER_OF_TWO_ROWS) { + int k; + for (k = 1;; k *= 2) { + if (2 * k * (req_slices - 1) + 1 >= ctx->slice_block_rows) + break; + } + ctx->nb_slices = (ctx->slice_block_rows + k - 1) / k; + ctx->slice_size = k; +#if VA_CHECK_VERSION(1, 0, 0) + } else if (slice_structure & VA_ENC_SLICE_STRUCTURE_EQUAL_ROWS) { + ctx->nb_slices = ctx->slice_block_rows; + ctx->slice_size = 1; +#endif + } else { + av_log(avctx, AV_LOG_ERROR, "Driver does not support any usable " + "slice structure modes (%#x).\n", slice_structure); + return AVERROR(EINVAL); + } + + if (ctx->nb_slices > avctx->slices) { + av_log(avctx, AV_LOG_WARNING, "Slice count rounded up to " + "%d (from %d) due to driver constraints on slice " + "structure.\n", ctx->nb_slices, avctx->slices); + } + if (ctx->nb_slices > max_slices) { + av_log(avctx, AV_LOG_ERROR, "Driver does not support " + "encoding with %d slices (max %"PRIu32").\n", + ctx->nb_slices, max_slices); + return AVERROR(EINVAL); + } + + av_log(avctx, AV_LOG_VERBOSE, "Encoding pictures with %d slices " + "(default size %d block rows).\n", + ctx->nb_slices, ctx->slice_size); + return 0; +} + static av_cold int vaapi_encode_init_packed_headers(AVCodecContext *avctx) { VAAPIEncodeContext *ctx = avctx->priv_data; @@ -1734,6 +1879,10 @@ av_cold int ff_vaapi_encode_init(AVCodecContext *avctx) if (err < 0) goto fail; + err = vaapi_encode_init_slice_structure(avctx); + if (err < 0) + goto fail; + err = vaapi_encode_init_packed_headers(avctx); if (err < 0) goto fail; diff --git a/libavcodec/vaapi_encode.h b/libavcodec/vaapi_encode.h index 091889f9ae83c..965fe65c0b63a 100644 --- a/libavcodec/vaapi_encode.h +++ b/libavcodec/vaapi_encode.h @@ -52,6 +52,10 @@ enum { typedef struct VAAPIEncodeSlice { int index; + int row_start; + int row_size; + int block_start; + int block_size; void *priv_data; void *codec_slice_params; } VAAPIEncodeSlice; @@ -125,6 +129,10 @@ typedef struct VAAPIEncodeContext { int surface_width; int surface_height; + // The block size for slice calculations. + int slice_block_width; + int slice_block_height; + // Everything above this point must be set before calling // ff_vaapi_encode_init(). @@ -224,6 +232,12 @@ typedef struct VAAPIEncodeContext { int64_t dts_pts_diff; int64_t ts_ring[MAX_REORDER_DELAY * 3]; + // Slice structure. + int slice_block_rows; + int slice_block_cols; + int nb_slices; + int slice_size; + // Frame type decision. int gop_size; int p_per_i; @@ -234,11 +248,21 @@ typedef struct VAAPIEncodeContext { int end_of_stream; } VAAPIEncodeContext; +enum { + // Codec supports controlling the subdivision of pictures into slices. + FLAG_SLICE_CONTROL = 1 << 0, + // Codec only supports constant quality (no rate control). + FLAG_CONSTANT_QUALITY_ONLY = 1 << 1, +}; + typedef struct VAAPIEncodeType { // List of supported profiles and corresponding VAAPI profiles. // (Must end with FF_PROFILE_UNKNOWN.) const VAAPIEncodeProfile *profiles; + // Codec feature flags. + int flags; + // Perform any extra codec-specific configuration after the // codec context is initialised (set up the private data and // add any necessary global parameters). diff --git a/libavcodec/vaapi_encode_h264.c b/libavcodec/vaapi_encode_h264.c index 8feae0d42fb10..7bb77cfba2ed2 100644 --- a/libavcodec/vaapi_encode_h264.c +++ b/libavcodec/vaapi_encode_h264.c @@ -733,8 +733,6 @@ static int vaapi_encode_h264_init_picture_params(AVCodecContext *avctx, vpic->pic_fields.bits.idr_pic_flag = (pic->type == PICTURE_TYPE_IDR); vpic->pic_fields.bits.reference_pic_flag = (pic->type != PICTURE_TYPE_B); - pic->nb_slices = 1; - return 0; } @@ -758,8 +756,7 @@ static int vaapi_encode_h264_init_slice_params(AVCodecContext *avctx, sh->nal_unit_header.nal_ref_idc = pic->type != PICTURE_TYPE_B; } - // Only one slice per frame. - sh->first_mb_in_slice = 0; + sh->first_mb_in_slice = slice->block_start; sh->slice_type = priv->slice_type; sh->pic_parameter_set_id = pps->pic_parameter_set_id; @@ -780,8 +777,8 @@ static int vaapi_encode_h264_init_slice_params(AVCodecContext *avctx, sh->slice_qp_delta = priv->fixed_qp_idr - (pps->pic_init_qp_minus26 + 26); - vslice->macroblock_address = sh->first_mb_in_slice; - vslice->num_macroblocks = priv->mb_width * priv->mb_height; + vslice->macroblock_address = slice->block_start; + vslice->num_macroblocks = slice->block_size; vslice->macroblock_info = VA_INVALID_ID; @@ -903,6 +900,8 @@ static const VAAPIEncodeProfile vaapi_encode_h264_profiles[] = { static const VAAPIEncodeType vaapi_encode_type_h264 = { .profiles = vaapi_encode_h264_profiles, + .flags = FLAG_SLICE_CONTROL, + .configure = &vaapi_encode_h264_configure, .sequence_params_size = sizeof(VAEncSequenceParameterBufferH264), @@ -978,6 +977,8 @@ static av_cold int vaapi_encode_h264_init(AVCodecContext *avctx) ctx->surface_width = FFALIGN(avctx->width, 16); ctx->surface_height = FFALIGN(avctx->height, 16); + ctx->slice_block_height = ctx->slice_block_width = 16; + return ff_vaapi_encode_init(avctx); } diff --git a/libavcodec/vaapi_encode_h265.c b/libavcodec/vaapi_encode_h265.c index 10312fbd60a39..367fa5fde2d65 100644 --- a/libavcodec/vaapi_encode_h265.c +++ b/libavcodec/vaapi_encode_h265.c @@ -54,9 +54,6 @@ typedef struct VAAPIEncodeH265Context { int sei; // Derived settings. - unsigned int ctu_width; - unsigned int ctu_height; - int fixed_qp_idr; int fixed_qp_p; int fixed_qp_b; @@ -349,7 +346,8 @@ static int vaapi_encode_h265_init_sequence_params(AVCodecContext *avctx) level = ff_h265_guess_level(ptl, avctx->bit_rate, ctx->surface_width, ctx->surface_height, - 1, 1, 1, (ctx->b_per_p > 0) + 1); + ctx->nb_slices, 1, 1, + (ctx->b_per_p > 0) + 1); if (level) { av_log(avctx, AV_LOG_VERBOSE, "Using level %s.\n", level->name); ptl->general_level_idc = level->level_idc; @@ -850,8 +848,6 @@ static int vaapi_encode_h265_init_picture_params(AVCodecContext *avctx, av_assert0(0 && "invalid picture type"); } - pic->nb_slices = 1; - return 0; } @@ -876,9 +872,8 @@ static int vaapi_encode_h265_init_slice_params(AVCodecContext *avctx, sh->slice_pic_parameter_set_id = pps->pps_pic_parameter_set_id; - // Currently we only support one slice per frame. - sh->first_slice_segment_in_pic_flag = 1; - sh->slice_segment_address = 0; + sh->first_slice_segment_in_pic_flag = slice->index == 0; + sh->slice_segment_address = slice->block_start; sh->slice_type = priv->slice_type; @@ -968,7 +963,7 @@ static int vaapi_encode_h265_init_slice_params(AVCodecContext *avctx, *vslice = (VAEncSliceParameterBufferHEVC) { .slice_segment_address = sh->slice_segment_address, - .num_ctu_in_slice = priv->ctu_width * priv->ctu_height, + .num_ctu_in_slice = slice->block_size, .slice_type = sh->slice_type, .slice_pic_parameter_set_id = sh->slice_pic_parameter_set_id, @@ -989,7 +984,7 @@ static int vaapi_encode_h265_init_slice_params(AVCodecContext *avctx, .slice_tc_offset_div2 = sh->slice_tc_offset_div2, .slice_fields.bits = { - .last_slice_of_pic_flag = 1, + .last_slice_of_pic_flag = slice->index == pic->nb_slices - 1, .dependent_slice_segment_flag = sh->dependent_slice_segment_flag, .colour_plane_id = sh->colour_plane_id, .slice_temporal_mvp_enabled_flag = @@ -1041,13 +1036,6 @@ static av_cold int vaapi_encode_h265_configure(AVCodecContext *avctx) if (err < 0) return err; - priv->ctu_width = FFALIGN(ctx->surface_width, 32) / 32; - priv->ctu_height = FFALIGN(ctx->surface_height, 32) / 32; - - av_log(avctx, AV_LOG_VERBOSE, "Input %ux%u -> Surface %ux%u -> CTU %ux%u.\n", - avctx->width, avctx->height, ctx->surface_width, - ctx->surface_height, priv->ctu_width, priv->ctu_height); - if (ctx->va_rc_mode == VA_RC_CQP) { priv->fixed_qp_p = priv->qp; if (avctx->i_quant_factor > 0.0) @@ -1092,6 +1080,8 @@ static const VAAPIEncodeProfile vaapi_encode_h265_profiles[] = { static const VAAPIEncodeType vaapi_encode_type_h265 = { .profiles = vaapi_encode_h265_profiles, + .flags = FLAG_SLICE_CONTROL, + .configure = &vaapi_encode_h265_configure, .sequence_params_size = sizeof(VAEncSequenceParameterBufferHEVC), @@ -1138,6 +1128,9 @@ static av_cold int vaapi_encode_h265_init(AVCodecContext *avctx) ctx->surface_width = FFALIGN(avctx->width, 16); ctx->surface_height = FFALIGN(avctx->height, 16); + // CTU size is currently hard-coded to 32. + ctx->slice_block_width = ctx->slice_block_height = 32; + return ff_vaapi_encode_init(avctx); } diff --git a/libavcodec/vaapi_encode_mjpeg.c b/libavcodec/vaapi_encode_mjpeg.c index fe8439ce88000..79f43473f52ea 100644 --- a/libavcodec/vaapi_encode_mjpeg.c +++ b/libavcodec/vaapi_encode_mjpeg.c @@ -476,6 +476,8 @@ static const VAAPIEncodeProfile vaapi_encode_mjpeg_profiles[] = { static const VAAPIEncodeType vaapi_encode_type_mjpeg = { .profiles = vaapi_encode_mjpeg_profiles, + .flags = FLAG_CONSTANT_QUALITY_ONLY, + .configure = &vaapi_encode_mjpeg_configure, .picture_params_size = sizeof(VAEncPictureParameterBufferJPEG), diff --git a/libavcodec/vaapi_encode_mpeg2.c b/libavcodec/vaapi_encode_mpeg2.c index 1377eeb9bbd9c..22d7e306bb4f3 100644 --- a/libavcodec/vaapi_encode_mpeg2.c +++ b/libavcodec/vaapi_encode_mpeg2.c @@ -35,9 +35,6 @@ typedef struct VAAPIEncodeMPEG2Context { int level; // Derived settings. - int mb_width; - int mb_height; - int quant_i; int quant_p; int quant_b; @@ -477,8 +474,6 @@ static int vaapi_encode_mpeg2_init_picture_params(AVCodecContext *avctx, vpic->f_code[1][0] = pce->f_code[1][0]; vpic->f_code[1][1] = pce->f_code[1][1]; - pic->nb_slices = priv->mb_height; - return 0; } @@ -490,8 +485,8 @@ static int vaapi_encode_mpeg2_init_slice_params(AVCodecContext *avctx, VAEncSliceParameterBufferMPEG2 *vslice = slice->codec_slice_params; int qp; - vslice->macroblock_address = priv->mb_width * slice->index; - vslice->num_macroblocks = priv->mb_width; + vslice->macroblock_address = slice->block_start; + vslice->num_macroblocks = slice->block_size; switch (pic->type) { case PICTURE_TYPE_IDR: @@ -525,9 +520,6 @@ static av_cold int vaapi_encode_mpeg2_configure(AVCodecContext *avctx) if (err < 0) return err; - priv->mb_width = FFALIGN(avctx->width, 16) / 16; - priv->mb_height = FFALIGN(avctx->height, 16) / 16; - if (ctx->va_rc_mode == VA_RC_CQP) { priv->quant_p = av_clip(avctx->global_quality, 1, 31); if (avctx->i_quant_factor > 0.0) @@ -553,6 +545,12 @@ static av_cold int vaapi_encode_mpeg2_configure(AVCodecContext *avctx) av_assert0(0 && "Invalid RC mode."); } + ctx->slice_block_rows = FFALIGN(avctx->height, 16) / 16; + ctx->slice_block_cols = FFALIGN(avctx->width, 16) / 16; + + ctx->nb_slices = ctx->slice_block_rows; + ctx->slice_size = 1; + return 0; } diff --git a/libavcodec/vdpau.c b/libavcodec/vdpau.c index 1b2ec989cda47..167f06d7aebbf 100644 --- a/libavcodec/vdpau.c +++ b/libavcodec/vdpau.c @@ -208,8 +208,12 @@ int ff_vdpau_common_init(AVCodecContext *avctx, VdpDecoderProfile profile, return vdpau_error(status); if (avctx->codec_id == AV_CODEC_ID_HEVC && strncmp(info_string, "NVIDIA ", 7) == 0 && !(avctx->hwaccel_flags & AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH)) { - av_log(avctx, AV_LOG_VERBOSE, "HEVC with NVIDIA VDPAU drivers is buggy, skipping.\n"); - return AVERROR(ENOTSUP); + int driver_version = 0; + sscanf(info_string, "NVIDIA VDPAU Driver Shared Library %d", &driver_version); + if (driver_version < 410) { + av_log(avctx, AV_LOG_VERBOSE, "HEVC with NVIDIA VDPAU drivers is buggy, skipping.\n"); + return AVERROR(ENOTSUP); + } } status = vdctx->get_proc_address(vdctx->device, diff --git a/libavcodec/version.h b/libavcodec/version.h index 9098882f47afe..66de337d001ab 100644 --- a/libavcodec/version.h +++ b/libavcodec/version.h @@ -28,8 +28,8 @@ #include "libavutil/version.h" #define LIBAVCODEC_VERSION_MAJOR 58 -#define LIBAVCODEC_VERSION_MINOR 33 -#define LIBAVCODEC_VERSION_MICRO 102 +#define LIBAVCODEC_VERSION_MINOR 39 +#define LIBAVCODEC_VERSION_MICRO 100 #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ LIBAVCODEC_VERSION_MINOR, \ diff --git a/libavcodec/vp3.c b/libavcodec/vp3.c index 0e6da89abbcad..9df2fda49def1 100644 --- a/libavcodec/vp3.c +++ b/libavcodec/vp3.c @@ -223,6 +223,10 @@ typedef struct Vp3DecodeContext { * which of the fragments are coded */ int *coded_fragment_list[3]; + int *kf_coded_fragment_list; + int *nkf_coded_fragment_list; + int num_kf_coded_fragment[3]; + VLC dc_vlc[16]; VLC ac_vlc_1[16]; VLC ac_vlc_2[16]; @@ -271,7 +275,8 @@ static av_cold void free_tables(AVCodecContext *avctx) av_freep(&s->superblock_coding); av_freep(&s->all_fragments); - av_freep(&s->coded_fragment_list[0]); + av_freep(&s->nkf_coded_fragment_list); + av_freep(&s->kf_coded_fragment_list); av_freep(&s->dct_tokens_base); av_freep(&s->superblock_fragments); av_freep(&s->macroblock_coding); @@ -538,44 +543,65 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb) s->total_num_coded_frags = 0; memset(s->macroblock_coding, MODE_COPY, s->macroblock_count); + s->coded_fragment_list[0] = s->keyframe ? s->kf_coded_fragment_list + : s->nkf_coded_fragment_list; + for (plane = 0; plane < 3; plane++) { int sb_start = superblock_starts[plane]; int sb_end = sb_start + (plane ? s->c_superblock_count : s->y_superblock_count); int num_coded_frags = 0; - for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) { - if (s->keyframe == 0 && get_bits_left(gb) < plane0_num_coded_frags >> 2) { - return AVERROR_INVALIDDATA; - } - /* iterate through all 16 fragments in a superblock */ - for (j = 0; j < 16; j++) { - /* if the fragment is in bounds, check its coding status */ - current_fragment = s->superblock_fragments[i * 16 + j]; - if (current_fragment != -1) { - int coded = s->superblock_coding[i]; - - if (s->superblock_coding[i] == SB_PARTIALLY_CODED) { - /* fragment may or may not be coded; this is the case - * that cares about the fragment coding runs */ - if (current_run-- == 0) { - bit ^= 1; - current_run = get_vlc2(gb, s->fragment_run_length_vlc.table, 5, 2); + if (s->keyframe) { + if (s->num_kf_coded_fragment[plane] == -1) { + for (i = sb_start; i < sb_end; i++) { + /* iterate through all 16 fragments in a superblock */ + for (j = 0; j < 16; j++) { + /* if the fragment is in bounds, check its coding status */ + current_fragment = s->superblock_fragments[i * 16 + j]; + if (current_fragment != -1) { + s->coded_fragment_list[plane][num_coded_frags++] = + current_fragment; } - coded = bit; } + } + s->num_kf_coded_fragment[plane] = num_coded_frags; + } else + num_coded_frags = s->num_kf_coded_fragment[plane]; + } else { + for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) { + if (get_bits_left(gb) < plane0_num_coded_frags >> 2) { + return AVERROR_INVALIDDATA; + } + /* iterate through all 16 fragments in a superblock */ + for (j = 0; j < 16; j++) { + /* if the fragment is in bounds, check its coding status */ + current_fragment = s->superblock_fragments[i * 16 + j]; + if (current_fragment != -1) { + int coded = s->superblock_coding[i]; + + if (coded == SB_PARTIALLY_CODED) { + /* fragment may or may not be coded; this is the case + * that cares about the fragment coding runs */ + if (current_run-- == 0) { + bit ^= 1; + current_run = get_vlc2(gb, s->fragment_run_length_vlc.table, 5, 2); + } + coded = bit; + } - if (coded) { - /* default mode; actual mode will be decoded in - * the next phase */ - s->all_fragments[current_fragment].coding_method = - MODE_INTER_NO_MV; - s->coded_fragment_list[plane][num_coded_frags++] = - current_fragment; - } else { - /* not coded; copy this fragment from the prior frame */ - s->all_fragments[current_fragment].coding_method = - MODE_COPY; + if (coded) { + /* default mode; actual mode will be decoded in + * the next phase */ + s->all_fragments[current_fragment].coding_method = + MODE_INTER_NO_MV; + s->coded_fragment_list[plane][num_coded_frags++] = + current_fragment; + } else { + /* not coded; copy this fragment from the prior frame */ + s->all_fragments[current_fragment].coding_method = + MODE_COPY; + } } } } @@ -1691,7 +1717,9 @@ static av_cold int allocate_tables(AVCodecContext *avctx) s->superblock_coding = av_mallocz(s->superblock_count); s->all_fragments = av_mallocz_array(s->fragment_count, sizeof(Vp3Fragment)); - s->coded_fragment_list[0] = av_mallocz_array(s->fragment_count, sizeof(int)); + s-> kf_coded_fragment_list = av_mallocz_array(s->fragment_count, sizeof(int)); + s->nkf_coded_fragment_list = av_mallocz_array(s->fragment_count, sizeof(int)); + memset(s-> num_kf_coded_fragment, -1, sizeof(s-> num_kf_coded_fragment)); s->dct_tokens_base = av_mallocz_array(s->fragment_count, 64 * sizeof(*s->dct_tokens_base)); @@ -1703,7 +1731,8 @@ static av_cold int allocate_tables(AVCodecContext *avctx) s->macroblock_coding = av_mallocz(s->macroblock_count + 1); if (!s->superblock_coding || !s->all_fragments || - !s->dct_tokens_base || !s->coded_fragment_list[0] || + !s->dct_tokens_base || !s->kf_coded_fragment_list || + !s->nkf_coded_fragment_list || !s->superblock_fragments || !s->macroblock_coding || !s->motion_val[0] || !s->motion_val[1]) { vp3_decode_end(avctx); @@ -2257,6 +2286,8 @@ static int vp3_init_thread_copy(AVCodecContext *avctx) s->superblock_coding = NULL; s->all_fragments = NULL; s->coded_fragment_list[0] = NULL; + s-> kf_coded_fragment_list= NULL; + s->nkf_coded_fragment_list= NULL; s->dct_tokens_base = NULL; s->superblock_fragments = NULL; s->macroblock_coding = NULL; diff --git a/libavcodec/vp56.h b/libavcodec/vp56.h index b8dda9e73a549..70e1d38a83733 100644 --- a/libavcodec/vp56.h +++ b/libavcodec/vp56.h @@ -227,6 +227,14 @@ int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, extern const uint8_t ff_vp56_norm_shift[256]; int ff_vp56_init_range_decoder(VP56RangeCoder *c, const uint8_t *buf, int buf_size); +/** + * vp5689 returns 1 if the end of the stream has been reached, 0 otherwise. + */ +static av_always_inline int vpX_rac_is_end(VP56RangeCoder *c) +{ + return c->end <= c->buffer && c->bits >= 0; +} + static av_always_inline unsigned int vp56_rac_renorm(VP56RangeCoder *c) { int shift = ff_vp56_norm_shift[c->high]; diff --git a/libavcodec/vp9.c b/libavcodec/vp9.c index b1178c9c0cb0c..acf3ffc9e7304 100644 --- a/libavcodec/vp9.c +++ b/libavcodec/vp9.c @@ -1306,6 +1306,9 @@ static int decode_tiles(AVCodecContext *avctx, decode_sb_mem(td, row, col, lflvl_ptr, yoff2, uvoff2, BL_64X64); } else { + if (vpX_rac_is_end(td->c)) { + return AVERROR_INVALIDDATA; + } decode_sb(td, row, col, lflvl_ptr, yoff2, uvoff2, BL_64X64); } diff --git a/libavcodec/vp9_parser.c b/libavcodec/vp9_parser.c index 9531f34a32531..c957a75667b57 100644 --- a/libavcodec/vp9_parser.c +++ b/libavcodec/vp9_parser.c @@ -36,12 +36,16 @@ static int parse(AVCodecParserContext *ctx, *out_data = data; *out_size = size; - if ((res = init_get_bits8(&gb, data, size)) < 0) + if (!size || (res = init_get_bits8(&gb, data, size)) < 0) return size; // parsers can't return errors get_bits(&gb, 2); // frame marker profile = get_bits1(&gb); profile |= get_bits1(&gb) << 1; if (profile == 3) profile += get_bits1(&gb); + if (profile > 3) + return size; + + avctx->profile = profile; if (get_bits1(&gb)) { keyframe = 0; diff --git a/libavdevice/decklink_dec.cpp b/libavdevice/decklink_dec.cpp index deb8f787ee66c..9de8fa0c9da1a 100644 --- a/libavdevice/decklink_dec.cpp +++ b/libavdevice/decklink_dec.cpp @@ -1167,14 +1167,14 @@ av_cold int ff_decklink_read_header(AVFormatContext *avctx) break; case bmdFormat8BitARGB: st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO; - st->codecpar->codec_tag = avcodec_pix_fmt_to_codec_tag((enum AVPixelFormat)st->codecpar->format); st->codecpar->format = AV_PIX_FMT_0RGB; + st->codecpar->codec_tag = avcodec_pix_fmt_to_codec_tag((enum AVPixelFormat)st->codecpar->format); st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 32, st->time_base.den, st->time_base.num); break; case bmdFormat8BitBGRA: st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO; - st->codecpar->codec_tag = avcodec_pix_fmt_to_codec_tag((enum AVPixelFormat)st->codecpar->format); st->codecpar->format = AV_PIX_FMT_BGR0; + st->codecpar->codec_tag = avcodec_pix_fmt_to_codec_tag((enum AVPixelFormat)st->codecpar->format); st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 32, st->time_base.den, st->time_base.num); break; case bmdFormat10BitRGB: diff --git a/libavdevice/libndi_newtek_dec.c b/libavdevice/libndi_newtek_dec.c index 4fb719770e57c..d2d5648c4bbac 100644 --- a/libavdevice/libndi_newtek_dec.c +++ b/libavdevice/libndi_newtek_dec.c @@ -33,6 +33,7 @@ struct NDIContext { int find_sources; int64_t wait_sources; int allow_video_fields; + char *extra_ips; /* Runtime */ NDIlib_recv_create_t *recv; @@ -99,7 +100,7 @@ static int ndi_find_sources(AVFormatContext *avctx, const char *name, NDIlib_sou struct NDIContext *ctx = avctx->priv_data; const NDIlib_source_t *ndi_srcs = NULL; const NDIlib_find_create_t find_create_desc = { .show_local_sources = true, - .p_groups = NULL, .p_extra_ips = NULL }; + .p_groups = NULL, .p_extra_ips = ctx->extra_ips }; if (!ctx->ndi_find) ctx->ndi_find = NDIlib_find_create2(&find_create_desc); @@ -317,6 +318,7 @@ static const AVOption options[] = { { "find_sources", "Find available sources" , OFFSET(find_sources), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, DEC }, { "wait_sources", "Time to wait until the number of online sources have changed" , OFFSET(wait_sources), AV_OPT_TYPE_DURATION, { .i64 = 1000000 }, 100000, 20000000, DEC }, { "allow_video_fields", "When this flag is FALSE, all video that you receive will be progressive" , OFFSET(allow_video_fields), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, DEC }, + { "extra_ips", "List of comma separated ip addresses to scan for remote sources", OFFSET(extra_ips), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC }, { NULL }, }; diff --git a/libavdevice/sdl2.c b/libavdevice/sdl2.c index da5143078eef8..d6fc74a66c765 100644 --- a/libavdevice/sdl2.c +++ b/libavdevice/sdl2.c @@ -40,6 +40,7 @@ typedef struct { SDL_Renderer *renderer; char *window_title; int window_width, window_height; /**< size of the window */ + int window_x, window_y; /**< position of the window */ int window_fullscreen; int window_borderless; int enable_quit_action; @@ -155,8 +156,6 @@ static int sdl2_write_trailer(AVFormatContext *s) return 0; } -#define SDL_BASE_FLAGS (SDL_SWSURFACE|SDL_WINDOW_RESIZABLE) - static int sdl2_write_header(AVFormatContext *s) { SDLContext *sdl = s->priv_data; @@ -196,8 +195,9 @@ static int sdl2_write_header(AVFormatContext *s) } /* resize texture to width and height from the codec context information */ - flags = SDL_BASE_FLAGS | (sdl->window_fullscreen ? SDL_WINDOW_FULLSCREEN : 0) | - (sdl->window_borderless ? SDL_WINDOW_BORDERLESS : 0); + flags = SDL_WINDOW_HIDDEN | + (sdl->window_fullscreen ? SDL_WINDOW_FULLSCREEN : 0) | + (sdl->window_borderless ? SDL_WINDOW_BORDERLESS : SDL_WINDOW_RESIZABLE); /* initialization */ if (!sdl->inited){ @@ -216,6 +216,8 @@ static int sdl2_write_header(AVFormatContext *s) } SDL_SetWindowTitle(sdl->window, sdl->window_title); + SDL_SetWindowPosition(sdl->window, sdl->window_x, sdl->window_y); + SDL_ShowWindow(sdl->window); sdl->texture = SDL_CreateTexture(sdl->renderer, sdl->texture_fmt, SDL_TEXTUREACCESS_STREAMING, codecpar->width, codecpar->height); @@ -337,6 +339,8 @@ static int sdl2_write_packet(AVFormatContext *s, AVPacket *pkt) static const AVOption options[] = { { "window_title", "set SDL window title", OFFSET(window_title), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM }, { "window_size", "set SDL window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, { .str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM }, + { "window_x", "set SDL window x position", OFFSET(window_x), AV_OPT_TYPE_INT, { .i64 = SDL_WINDOWPOS_CENTERED }, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM }, + { "window_y", "set SDL window y position", OFFSET(window_y), AV_OPT_TYPE_INT, { .i64 = SDL_WINDOWPOS_CENTERED }, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM }, { "window_fullscreen", "set SDL window fullscreen", OFFSET(window_fullscreen), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, AV_OPT_FLAG_ENCODING_PARAM }, { "window_borderless", "set SDL window border off", OFFSET(window_borderless), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, AV_OPT_FLAG_ENCODING_PARAM }, { "window_enable_quit", "set if quit action is available", OFFSET(enable_quit_action), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM }, diff --git a/libavdevice/version.h b/libavdevice/version.h index e6ee009cc4554..edf26bdc1a853 100644 --- a/libavdevice/version.h +++ b/libavdevice/version.h @@ -28,8 +28,8 @@ #include "libavutil/version.h" #define LIBAVDEVICE_VERSION_MAJOR 58 -#define LIBAVDEVICE_VERSION_MINOR 4 -#define LIBAVDEVICE_VERSION_MICRO 105 +#define LIBAVDEVICE_VERSION_MINOR 6 +#define LIBAVDEVICE_VERSION_MICRO 100 #define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \ LIBAVDEVICE_VERSION_MINOR, \ diff --git a/libavfilter/Makefile b/libavfilter/Makefile index b03b2457ebc4a..79a89a1ab1a98 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -166,6 +166,7 @@ OBJS-$(CONFIG_BOXBLUR_FILTER) += vf_boxblur.o boxblur.o OBJS-$(CONFIG_BOXBLUR_OPENCL_FILTER) += vf_avgblur_opencl.o opencl.o \ opencl/avgblur.o boxblur.o OBJS-$(CONFIG_BWDIF_FILTER) += vf_bwdif.o +OBJS-$(CONFIG_CHROMAHOLD_FILTER) += vf_chromakey.o OBJS-$(CONFIG_CHROMAKEY_FILTER) += vf_chromakey.o OBJS-$(CONFIG_CIESCOPE_FILTER) += vf_ciescope.o OBJS-$(CONFIG_CODECVIEW_FILTER) += vf_codecview.o @@ -239,6 +240,7 @@ OBJS-$(CONFIG_FSPP_FILTER) += vf_fspp.o OBJS-$(CONFIG_GBLUR_FILTER) += vf_gblur.o OBJS-$(CONFIG_GEQ_FILTER) += vf_geq.o OBJS-$(CONFIG_GRADFUN_FILTER) += vf_gradfun.o +OBJS-$(CONFIG_GRAPHMONITOR_FILTER) += f_graphmonitor.o OBJS-$(CONFIG_GREYEDGE_FILTER) += vf_colorconstancy.o OBJS-$(CONFIG_HALDCLUT_FILTER) += vf_lut3d.o framesync.o OBJS-$(CONFIG_HFLIP_FILTER) += vf_hflip.o @@ -341,7 +343,8 @@ OBJS-$(CONFIG_SELECTIVECOLOR_FILTER) += vf_selectivecolor.o OBJS-$(CONFIG_SENDCMD_FILTER) += f_sendcmd.o OBJS-$(CONFIG_SEPARATEFIELDS_FILTER) += vf_separatefields.o OBJS-$(CONFIG_SETDAR_FILTER) += vf_aspect.o -OBJS-$(CONFIG_SETFIELD_FILTER) += vf_setfield.o +OBJS-$(CONFIG_SETFIELD_FILTER) += vf_setparams.o +OBJS-$(CONFIG_SETPARAMS_FILTER) += vf_setparams.o OBJS-$(CONFIG_SETPTS_FILTER) += setpts.o OBJS-$(CONFIG_SETRANGE_FILTER) += vf_setparams.o OBJS-$(CONFIG_SETSAR_FILTER) += vf_aspect.o @@ -380,6 +383,7 @@ OBJS-$(CONFIG_TMIX_FILTER) += vf_mix.o framesync.o OBJS-$(CONFIG_TONEMAP_FILTER) += vf_tonemap.o colorspace.o OBJS-$(CONFIG_TONEMAP_OPENCL_FILTER) += vf_tonemap_opencl.o colorspace.o opencl.o \ opencl/tonemap.o opencl/colorspace_common.o +OBJS-$(CONFIG_TPAD_FILTER) += vf_tpad.o OBJS-$(CONFIG_TRANSPOSE_FILTER) += vf_transpose.o OBJS-$(CONFIG_TRANSPOSE_NPP_FILTER) += vf_transpose_npp.o OBJS-$(CONFIG_TRIM_FILTER) += trim.o @@ -392,6 +396,7 @@ OBJS-$(CONFIG_VAGUEDENOISER_FILTER) += vf_vaguedenoiser.o OBJS-$(CONFIG_VECTORSCOPE_FILTER) += vf_vectorscope.o OBJS-$(CONFIG_VFLIP_FILTER) += vf_vflip.o OBJS-$(CONFIG_VFRDET_FILTER) += vf_vfrdet.o +OBJS-$(CONFIG_VIBRANCE_FILTER) += vf_vibrance.o OBJS-$(CONFIG_VIDSTABDETECT_FILTER) += vidstabutils.o vf_vidstabdetect.o OBJS-$(CONFIG_VIDSTABTRANSFORM_FILTER) += vidstabutils.o vf_vidstabtransform.o OBJS-$(CONFIG_VIGNETTE_FILTER) += vf_vignette.o @@ -402,7 +407,9 @@ OBJS-$(CONFIG_W3FDIF_FILTER) += vf_w3fdif.o OBJS-$(CONFIG_WAVEFORM_FILTER) += vf_waveform.o OBJS-$(CONFIG_WEAVE_FILTER) += vf_weave.o OBJS-$(CONFIG_XBR_FILTER) += vf_xbr.o -OBJS-$(CONFIG_YADIF_FILTER) += vf_yadif.o +OBJS-$(CONFIG_XSTACK_FILTER) += vf_stack.o framesync.o +OBJS-$(CONFIG_YADIF_FILTER) += vf_yadif.o yadif_common.o +OBJS-$(CONFIG_YADIF_CUDA_FILTER) += vf_yadif_cuda.o vf_yadif_cuda.ptx.o yadif_common.o OBJS-$(CONFIG_ZMQ_FILTER) += f_zmq.o OBJS-$(CONFIG_ZOOMPAN_FILTER) += vf_zoompan.o OBJS-$(CONFIG_ZSCALE_FILTER) += vf_zscale.o @@ -433,6 +440,7 @@ OBJS-$(CONFIG_NULLSINK_FILTER) += vsink_nullsink.o # multimedia filters OBJS-$(CONFIG_ABITSCOPE_FILTER) += avf_abitscope.o OBJS-$(CONFIG_ADRAWGRAPH_FILTER) += f_drawgraph.o +OBJS-$(CONFIG_AGRAPHMONITOR_FILTER) += f_graphmonitor.o OBJS-$(CONFIG_AHISTOGRAM_FILTER) += avf_ahistogram.o OBJS-$(CONFIG_APHASEMETER_FILTER) += avf_aphasemeter.o OBJS-$(CONFIG_AVECTORSCOPE_FILTER) += avf_avectorscope.o diff --git a/libavfilter/af_adelay.c b/libavfilter/af_adelay.c index 46c3d51040fb9..98c7ca5a323fd 100644 --- a/libavfilter/af_adelay.c +++ b/libavfilter/af_adelay.c @@ -23,6 +23,7 @@ #include "libavutil/samplefmt.h" #include "avfilter.h" #include "audio.h" +#include "filters.h" #include "internal.h" typedef struct ChanDelay { @@ -38,8 +39,10 @@ typedef struct AudioDelayContext { ChanDelay *chandelay; int nb_delays; int block_align; - unsigned max_delay; + int64_t padding; + int64_t max_delay; int64_t next_pts; + int eof; void (*delay_channel)(ChanDelay *d, int nb_samples, const uint8_t *src, uint8_t *dst); @@ -158,6 +161,21 @@ static int config_input(AVFilterLink *inlink) } } + s->padding = s->chandelay[0].delay; + for (i = 1; i < s->nb_delays; i++) { + ChanDelay *d = &s->chandelay[i]; + + s->padding = FFMIN(s->padding, d->delay); + } + + if (s->padding) { + for (i = 0; i < s->nb_delays; i++) { + ChanDelay *d = &s->chandelay[i]; + + d->delay -= s->padding; + } + } + for (i = 0; i < s->nb_delays; i++) { ChanDelay *d = &s->chandelay[i]; @@ -210,26 +228,30 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame) s->delay_channel(d, frame->nb_samples, src, dst); } - s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base); + out_frame->pts = s->next_pts; + s->next_pts += av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base); av_frame_free(&frame); return ff_filter_frame(ctx->outputs[0], out_frame); } -static int request_frame(AVFilterLink *outlink) +static int activate(AVFilterContext *ctx) { - AVFilterContext *ctx = outlink->src; + AVFilterLink *inlink = ctx->inputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; AudioDelayContext *s = ctx->priv; - int ret; + AVFrame *frame = NULL; + int ret, status; + int64_t pts; - ret = ff_request_frame(ctx->inputs[0]); - if (ret == AVERROR_EOF && !ctx->is_disabled && s->max_delay) { - int nb_samples = FFMIN(s->max_delay, 2048); - AVFrame *frame; + FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); + + if (s->padding) { + int nb_samples = FFMIN(s->padding, 2048); frame = ff_get_audio_buffer(outlink, nb_samples); if (!frame) return AVERROR(ENOMEM); - s->max_delay -= nb_samples; + s->padding -= nb_samples; av_samples_set_silence(frame->extended_data, 0, frame->nb_samples, @@ -240,10 +262,47 @@ static int request_frame(AVFilterLink *outlink) if (s->next_pts != AV_NOPTS_VALUE) s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base); - ret = filter_frame(ctx->inputs[0], frame); + return ff_filter_frame(outlink, frame); + } + + ret = ff_inlink_consume_frame(inlink, &frame); + if (ret < 0) + return ret; + + if (ret > 0) + return filter_frame(inlink, frame); + + if (ff_inlink_acknowledge_status(inlink, &status, &pts)) { + if (status == AVERROR_EOF) + s->eof = 1; } - return ret; + if (s->eof && s->max_delay) { + int nb_samples = FFMIN(s->max_delay, 2048); + + frame = ff_get_audio_buffer(outlink, nb_samples); + if (!frame) + return AVERROR(ENOMEM); + s->max_delay -= nb_samples; + + av_samples_set_silence(frame->extended_data, 0, + frame->nb_samples, + outlink->channels, + frame->format); + + frame->pts = s->next_pts; + return filter_frame(inlink, frame); + } + + if (s->eof && s->max_delay == 0) { + ff_outlink_set_status(outlink, AVERROR_EOF, s->next_pts); + return 0; + } + + if (!s->eof) + FF_FILTER_FORWARD_WANTED(outlink, inlink); + + return FFERROR_NOT_READY; } static av_cold void uninit(AVFilterContext *ctx) @@ -262,16 +321,14 @@ static const AVFilterPad adelay_inputs[] = { .name = "default", .type = AVMEDIA_TYPE_AUDIO, .config_props = config_input, - .filter_frame = filter_frame, }, { NULL } }; static const AVFilterPad adelay_outputs[] = { { - .name = "default", - .request_frame = request_frame, - .type = AVMEDIA_TYPE_AUDIO, + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, }, { NULL } }; @@ -282,6 +339,7 @@ AVFilter ff_af_adelay = { .query_formats = query_formats, .priv_size = sizeof(AudioDelayContext), .priv_class = &adelay_class, + .activate = activate, .uninit = uninit, .inputs = adelay_inputs, .outputs = adelay_outputs, diff --git a/libavfilter/af_afade.c b/libavfilter/af_afade.c index 9aab644039cdc..8c7678107a6fa 100644 --- a/libavfilter/af_afade.c +++ b/libavfilter/af_afade.c @@ -23,10 +23,6 @@ * fade audio filter */ -#define FF_INTERNAL_FIELDS 1 -#include "framequeue.h" - -#include "libavutil/audio_fifo.h" #include "libavutil/opt.h" #include "audio.h" #include "avfilter.h" @@ -43,9 +39,7 @@ typedef struct AudioFadeContext { int64_t start_time; int overlap; int cf0_eof; - int prev_size; int crossfade_is_over; - AVAudioFifo *fifo[2]; int64_t pts; void (*fade_samples)(uint8_t **dst, uint8_t * const *src, @@ -472,8 +466,8 @@ static int activate(AVFilterContext *ctx) return ff_filter_frame(outlink, in); } - if (ff_framequeue_queued_samples(&ctx->inputs[0]->fifo) > s->nb_samples) { - nb_samples = ff_framequeue_queued_samples(&ctx->inputs[0]->fifo) - s->nb_samples; + if (ff_inlink_queued_samples(ctx->inputs[0]) > s->nb_samples) { + nb_samples = ff_inlink_queued_samples(ctx->inputs[0]) - s->nb_samples; if (nb_samples > 0) { ret = ff_inlink_consume_samples(ctx->inputs[0], nb_samples, nb_samples, &in); if (ret < 0) { @@ -484,7 +478,7 @@ static int activate(AVFilterContext *ctx) s->pts += av_rescale_q(in->nb_samples, (AVRational){ 1, outlink->sample_rate }, outlink->time_base); return ff_filter_frame(outlink, in); - } else if (ff_framequeue_queued_samples(&ctx->inputs[1]->fifo) >= s->nb_samples) { + } else if (ff_inlink_queued_samples(ctx->inputs[1]) >= s->nb_samples) { if (s->overlap) { out = ff_get_audio_buffer(outlink, s->nb_samples); if (!out) @@ -554,10 +548,10 @@ static int activate(AVFilterContext *ctx) return ff_filter_frame(outlink, out); } } else if (ff_outlink_frame_wanted(ctx->outputs[0])) { - if (!s->cf0_eof && ctx->inputs[0]->status_in) { + if (!s->cf0_eof && ff_outlink_get_status(ctx->inputs[0])) { s->cf0_eof = 1; } - if (ctx->inputs[1]->status_in) { + if (ff_outlink_get_status(ctx->inputs[1])) { ff_outlink_set_status(ctx->outputs[0], AVERROR_EOF, AV_NOPTS_VALUE); return 0; } diff --git a/libavfilter/af_afftdn.c b/libavfilter/af_afftdn.c index fbcb0f18d5e56..ed2c1c1defbe8 100644 --- a/libavfilter/af_afftdn.c +++ b/libavfilter/af_afftdn.c @@ -28,6 +28,7 @@ #include "avfilter.h" #include "audio.h" #include "formats.h" +#include "filters.h" #define C (M_LN10 * 0.1) #define RATIO 0.98 @@ -1153,7 +1154,7 @@ static void get_auto_noise_levels(AudioFFTDeNoiseContext *s, } } -static int filter_frame(AVFilterLink *inlink, AVFrame *frame) +static int output_frame(AVFilterLink *inlink) { AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; @@ -1162,117 +1163,145 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame) ThreadData td; int ret = 0; - if (s->pts == AV_NOPTS_VALUE) - s->pts = frame->pts; + in = ff_get_audio_buffer(outlink, s->window_length); + if (!in) + return AVERROR(ENOMEM); - ret = av_audio_fifo_write(s->fifo, (void **)frame->extended_data, frame->nb_samples); - av_frame_free(&frame); + ret = av_audio_fifo_peek(s->fifo, (void **)in->extended_data, s->window_length); if (ret < 0) - return ret; + goto end; - while (av_audio_fifo_size(s->fifo) >= s->window_length) { - if (!in) { - in = ff_get_audio_buffer(outlink, s->window_length); - if (!in) - return AVERROR(ENOMEM); - } + if (s->track_noise) { + for (int ch = 0; ch < inlink->channels; ch++) { + DeNoiseChannel *dnch = &s->dnch[ch]; + double levels[15]; - ret = av_audio_fifo_peek(s->fifo, (void **)in->extended_data, s->window_length); - if (ret < 0) - break; + get_auto_noise_levels(s, dnch, levels); + set_noise_profile(s, dnch, levels, 0); + } - if (s->track_noise) { - for (int ch = 0; ch < inlink->channels; ch++) { - DeNoiseChannel *dnch = &s->dnch[ch]; - double levels[15]; + if (s->noise_floor != s->last_noise_floor) + set_parameters(s); + } - get_auto_noise_levels(s, dnch, levels); - set_noise_profile(s, dnch, levels, 0); - } + if (s->sample_noise_start) { + for (int ch = 0; ch < inlink->channels; ch++) { + DeNoiseChannel *dnch = &s->dnch[ch]; - if (s->noise_floor != s->last_noise_floor) - set_parameters(s); + init_sample_noise(dnch); } + s->sample_noise_start = 0; + s->sample_noise = 1; + } - if (s->sample_noise_start) { - for (int ch = 0; ch < inlink->channels; ch++) { - DeNoiseChannel *dnch = &s->dnch[ch]; + if (s->sample_noise) { + for (int ch = 0; ch < inlink->channels; ch++) { + DeNoiseChannel *dnch = &s->dnch[ch]; - init_sample_noise(dnch); - } - s->sample_noise_start = 0; - s->sample_noise = 1; + sample_noise_block(s, dnch, in, ch); } + } - if (s->sample_noise) { - for (int ch = 0; ch < inlink->channels; ch++) { - DeNoiseChannel *dnch = &s->dnch[ch]; + if (s->sample_noise_end) { + for (int ch = 0; ch < inlink->channels; ch++) { + DeNoiseChannel *dnch = &s->dnch[ch]; + double sample_noise[15]; - sample_noise_block(s, dnch, in, ch); - } + finish_sample_noise(s, dnch, sample_noise); + set_noise_profile(s, dnch, sample_noise, 1); + set_band_parameters(s, dnch); } + s->sample_noise = 0; + s->sample_noise_end = 0; + } - if (s->sample_noise_end) { - for (int ch = 0; ch < inlink->channels; ch++) { - DeNoiseChannel *dnch = &s->dnch[ch]; - double sample_noise[15]; + s->block_count++; + td.in = in; + ctx->internal->execute(ctx, filter_channel, &td, NULL, + FFMIN(outlink->channels, ff_filter_get_nb_threads(ctx))); - finish_sample_noise(s, dnch, sample_noise); - set_noise_profile(s, dnch, sample_noise, 1); - set_band_parameters(s, dnch); - } - s->sample_noise = 0; - s->sample_noise_end = 0; - } - - s->block_count++; - td.in = in; - ctx->internal->execute(ctx, filter_channel, &td, NULL, - FFMIN(outlink->channels, ff_filter_get_nb_threads(ctx))); + out = ff_get_audio_buffer(outlink, s->sample_advance); + if (!out) { + ret = AVERROR(ENOMEM); + goto end; + } - out = ff_get_audio_buffer(outlink, s->sample_advance); - if (!out) { - ret = AVERROR(ENOMEM); + for (int ch = 0; ch < inlink->channels; ch++) { + DeNoiseChannel *dnch = &s->dnch[ch]; + double *src = dnch->out_samples; + float *orig = (float *)in->extended_data[ch]; + float *dst = (float *)out->extended_data[ch]; + + switch (s->output_mode) { + case IN_MODE: + for (int m = 0; m < s->sample_advance; m++) + dst[m] = orig[m]; + break; + case OUT_MODE: + for (int m = 0; m < s->sample_advance; m++) + dst[m] = src[m]; break; + case NOISE_MODE: + for (int m = 0; m < s->sample_advance; m++) + dst[m] = orig[m] - src[m]; + break; + default: + av_frame_free(&out); + ret = AVERROR_BUG; + goto end; } + memmove(src, src + s->sample_advance, (s->window_length - s->sample_advance) * sizeof(*src)); + memset(src + (s->window_length - s->sample_advance), 0, s->sample_advance * sizeof(*src)); + } - for (int ch = 0; ch < inlink->channels; ch++) { - DeNoiseChannel *dnch = &s->dnch[ch]; - double *src = dnch->out_samples; - float *orig = (float *)in->extended_data[ch]; - float *dst = (float *)out->extended_data[ch]; - - switch (s->output_mode) { - case IN_MODE: - for (int m = 0; m < s->sample_advance; m++) - dst[m] = orig[m]; - break; - case OUT_MODE: - for (int m = 0; m < s->sample_advance; m++) - dst[m] = src[m]; - break; - case NOISE_MODE: - for (int m = 0; m < s->sample_advance; m++) - dst[m] = orig[m] - src[m]; - break; - default: - return AVERROR_BUG; - } - memmove(src, src + s->sample_advance, (s->window_length - s->sample_advance) * sizeof(*src)); - memset(src + (s->window_length - s->sample_advance), 0, s->sample_advance * sizeof(*src)); - } + av_audio_fifo_drain(s->fifo, s->sample_advance); + + out->pts = s->pts; + ret = ff_filter_frame(outlink, out); + if (ret < 0) + goto end; + s->pts += s->sample_advance; +end: + av_frame_free(&in); - av_audio_fifo_drain(s->fifo, s->sample_advance); + return ret; +} - out->pts = s->pts; - ret = ff_filter_frame(outlink, out); +static int activate(AVFilterContext *ctx) +{ + AVFilterLink *inlink = ctx->inputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; + AudioFFTDeNoiseContext *s = ctx->priv; + AVFrame *frame = NULL; + int ret; + + FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); + + ret = ff_inlink_consume_frame(inlink, &frame); + if (ret < 0) + return ret; + + if (ret > 0) { + if (s->pts == AV_NOPTS_VALUE) + s->pts = frame->pts; + + ret = av_audio_fifo_write(s->fifo, (void **)frame->extended_data, frame->nb_samples); + av_frame_free(&frame); if (ret < 0) - break; - s->pts += s->sample_advance; + return ret; } - av_frame_free(&in); - return ret; + if (av_audio_fifo_size(s->fifo) >= s->window_length) + return output_frame(inlink); + + FF_FILTER_FORWARD_STATUS(inlink, outlink); + if (ff_outlink_frame_wanted(outlink) && + av_audio_fifo_size(s->fifo) < s->window_length) { + ff_inlink_request_frame(inlink); + return 0; + } + + return FFERROR_NOT_READY; } static av_cold void uninit(AVFilterContext *ctx) @@ -1352,7 +1381,8 @@ static int process_command(AVFilterContext *ctx, const char *cmd, const char *ar if (!strcmp(args, "start")) { s->sample_noise_start = 1; s->sample_noise_end = 0; - } else if (!strcmp(args, "end")) { + } else if (!strcmp(args, "end") || + !strcmp(args, "stop")) { s->sample_noise_start = 0; s->sample_noise_end = 1; } @@ -1393,7 +1423,6 @@ static const AVFilterPad inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_AUDIO, - .filter_frame = filter_frame, .config_props = config_input, }, { NULL } @@ -1413,6 +1442,7 @@ AVFilter ff_af_afftdn = { .query_formats = query_formats, .priv_size = sizeof(AudioFFTDeNoiseContext), .priv_class = &afftdn_class, + .activate = activate, .uninit = uninit, .inputs = inputs, .outputs = outputs, diff --git a/libavfilter/af_afir.c b/libavfilter/af_afir.c index 244da3ab4cff3..36fde60594f20 100644 --- a/libavfilter/af_afir.c +++ b/libavfilter/af_afir.c @@ -120,14 +120,13 @@ static int fir_frame(AudioFIRContext *s, AVFrame *in, AVFilterLink *outlink) { AVFilterContext *ctx = outlink->src; AVFrame *out = NULL; - int ret; s->nb_samples = in->nb_samples; - if (!s->want_skip) { - out = ff_get_audio_buffer(outlink, s->nb_samples); - if (!out) - return AVERROR(ENOMEM); + out = ff_get_audio_buffer(outlink, s->nb_samples); + if (!out) { + av_frame_free(&in); + return AVERROR(ENOMEM); } if (s->pts == AV_NOPTS_VALUE) @@ -137,26 +136,18 @@ static int fir_frame(AudioFIRContext *s, AVFrame *in, AVFilterLink *outlink) s->part_index = (s->part_index + 1) % s->nb_partitions; - if (!s->want_skip) { - out->pts = s->pts; - if (s->pts != AV_NOPTS_VALUE) - s->pts += av_rescale_q(out->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base); - } + out->pts = s->pts; + if (s->pts != AV_NOPTS_VALUE) + s->pts += av_rescale_q(out->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base); s->index++; if (s->index == 3) s->index = 0; av_frame_free(&in); + s->in[0] = NULL; - if (s->want_skip == 1) { - s->want_skip = 0; - ret = 0; - } else { - ret = ff_filter_frame(outlink, out); - } - - return ret; + return ff_filter_frame(outlink, out); } static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color) @@ -211,8 +202,9 @@ static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t col static void draw_response(AVFilterContext *ctx, AVFrame *out) { AudioFIRContext *s = ctx->priv; - float *mag, *phase, min = FLT_MAX, max = FLT_MIN; - int prev_ymag = -1, prev_yphase = -1; + float *mag, *phase, *delay, min = FLT_MAX, max = FLT_MIN; + float min_delay = FLT_MAX, max_delay = FLT_MIN; + int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1; char text[32]; int channel, i, x; @@ -220,44 +212,56 @@ static void draw_response(AVFilterContext *ctx, AVFrame *out) phase = av_malloc_array(s->w, sizeof(*phase)); mag = av_malloc_array(s->w, sizeof(*mag)); - if (!mag || !phase) + delay = av_malloc_array(s->w, sizeof(*delay)); + if (!mag || !phase || !delay) goto end; channel = av_clip(s->ir_channel, 0, s->in[1]->channels - 1); for (i = 0; i < s->w; i++) { const float *src = (const float *)s->in[1]->extended_data[channel]; double w = i * M_PI / (s->w - 1); - double real = 0.; - double imag = 0.; + double div, real_num = 0., imag_num = 0., real = 0., imag = 0.; for (x = 0; x < s->nb_taps; x++) { real += cos(-x * w) * src[x]; imag += sin(-x * w) * src[x]; + real_num += cos(-x * w) * src[x] * x; + imag_num += sin(-x * w) * src[x] * x; } mag[i] = hypot(real, imag); phase[i] = atan2(imag, real); + div = real * real + imag * imag; + delay[i] = (real_num * real + imag_num * imag) / div; min = fminf(min, mag[i]); max = fmaxf(max, mag[i]); + min_delay = fminf(min_delay, delay[i]); + max_delay = fmaxf(max_delay, delay[i]); } for (i = 0; i < s->w; i++) { int ymag = mag[i] / max * (s->h - 1); + int ydelay = (delay[i] - min_delay) / (max_delay - min_delay) * (s->h - 1); int yphase = (0.5 * (1. + phase[i] / M_PI)) * (s->h - 1); ymag = s->h - 1 - av_clip(ymag, 0, s->h - 1); yphase = s->h - 1 - av_clip(yphase, 0, s->h - 1); + ydelay = s->h - 1 - av_clip(ydelay, 0, s->h - 1); if (prev_ymag < 0) prev_ymag = ymag; if (prev_yphase < 0) prev_yphase = yphase; + if (prev_ydelay < 0) + prev_ydelay = ydelay; draw_line(out, i, ymag, FFMAX(i - 1, 0), prev_ymag, 0xFFFF00FF); draw_line(out, i, yphase, FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00); + draw_line(out, i, ydelay, FFMAX(i - 1, 0), prev_ydelay, 0xFF00FFFF); prev_ymag = ymag; prev_yphase = yphase; + prev_ydelay = ydelay; } if (s->w > 400 && s->h > 100) { @@ -268,9 +272,18 @@ static void draw_response(AVFilterContext *ctx, AVFrame *out) drawtext(out, 2, 12, "Min Magnitude:", 0xDDDDDDDD); snprintf(text, sizeof(text), "%.2f", min); drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD); + + drawtext(out, 2, 22, "Max Delay:", 0xDDDDDDDD); + snprintf(text, sizeof(text), "%.2f", max_delay); + drawtext(out, 11 * 8 + 2, 22, text, 0xDDDDDDDD); + + drawtext(out, 2, 32, "Min Delay:", 0xDDDDDDDD); + snprintf(text, sizeof(text), "%.2f", min_delay); + drawtext(out, 11 * 8 + 2, 32, text, 0xDDDDDDDD); } end: + av_free(delay); av_free(phase); av_free(mag); } @@ -285,8 +298,8 @@ static int convert_coeffs(AVFilterContext *ctx) if (s->nb_taps <= 0) return AVERROR(EINVAL); - for (n = 4; (1 << n) < s->nb_taps; n++); - N = FFMIN(n, 16); + for (n = av_log2(s->minp); (1 << n) < s->nb_taps; n++); + N = FFMIN(n, av_log2(s->maxp)); s->ir_length = 1 << n; s->fft_length = (1 << (N + 1)) + 1; s->part_size = 1 << (N - 1); @@ -337,7 +350,7 @@ static int convert_coeffs(AVFilterContext *ctx) switch (s->gtype) { case -1: - /* nothinkg to do */ + /* nothing to do */ break; case 0: for (ch = 0; ch < ctx->inputs[1]->channels; ch++) { @@ -461,6 +474,8 @@ static int activate(AVFilterContext *ctx) if (!s->eof_coeffs) { if (ff_outlink_frame_wanted(ctx->outputs[0])) ff_inlink_request_frame(ctx->inputs[1]); + else if (s->response && ff_outlink_frame_wanted(ctx->outputs[1])) + ff_inlink_request_frame(ctx->inputs[1]); return 0; } } @@ -471,34 +486,28 @@ static int activate(AVFilterContext *ctx) return ret; } - if (s->need_padding) { - in = ff_get_audio_buffer(outlink, s->part_size); - if (!in) - return AVERROR(ENOMEM); - s->need_padding = 0; - ret = 1; - } else { - ret = ff_inlink_consume_samples(ctx->inputs[0], s->part_size, s->part_size, &in); - } - - if (ret > 0) { + ret = ff_inlink_consume_samples(ctx->inputs[0], s->part_size, s->part_size, &in); + if (ret > 0) ret = fir_frame(s, in, outlink); - if (ret < 0) - return ret; - } if (ret < 0) return ret; if (s->response && s->have_coeffs) { - if (ff_outlink_frame_wanted(ctx->outputs[1])) { - s->video->pts = s->pts; - ret = ff_filter_frame(ctx->outputs[1], av_frame_clone(s->video)); - if (ret < 0) - return ret; + int64_t old_pts = s->video->pts; + int64_t new_pts = av_rescale_q(s->pts, ctx->inputs[0]->time_base, ctx->outputs[1]->time_base); + + if (ff_outlink_frame_wanted(ctx->outputs[1]) && old_pts < new_pts) { + s->video->pts = new_pts; + return ff_filter_frame(ctx->outputs[1], av_frame_clone(s->video)); } } + if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->part_size) { + ff_filter_set_ready(ctx, 10); + return 0; + } + if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) { if (status == AVERROR_EOF) { ff_outlink_set_status(ctx->outputs[0], status, pts); @@ -508,17 +517,20 @@ static int activate(AVFilterContext *ctx) } } - if (ff_outlink_frame_wanted(ctx->outputs[0])) { + if (ff_outlink_frame_wanted(ctx->outputs[0]) && + !ff_outlink_get_status(ctx->inputs[0])) { ff_inlink_request_frame(ctx->inputs[0]); return 0; } - if (s->response && ff_outlink_frame_wanted(ctx->outputs[1])) { + if (s->response && + ff_outlink_frame_wanted(ctx->outputs[1]) && + !ff_outlink_get_status(ctx->inputs[0])) { ff_inlink_request_frame(ctx->inputs[0]); return 0; } - return 0; + return FFERROR_NOT_READY; } static int query_formats(AVFilterContext *ctx) @@ -595,8 +607,6 @@ static int config_output(AVFilterLink *outlink) s->nb_channels = outlink->channels; s->nb_coef_channels = ctx->inputs[1]->channels; - s->want_skip = 1; - s->need_padding = 1; s->pts = AV_NOPTS_VALUE; return 0; @@ -660,6 +670,8 @@ static int config_video(AVFilterLink *outlink) outlink->sample_aspect_ratio = (AVRational){1,1}; outlink->w = s->w; outlink->h = s->h; + outlink->frame_rate = s->frame_rate; + outlink->time_base = av_inv_q(outlink->frame_rate); av_frame_free(&s->video); s->video = ff_get_video_buffer(outlink, outlink->w, outlink->h); @@ -752,6 +764,9 @@ static const AVOption afir_options[] = { { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF }, { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF }, { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF }, + { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF }, + { "minp", "set min partition size", OFFSET(minp), AV_OPT_TYPE_INT, {.i64=16}, 16, 65536, AF }, + { "maxp", "set max partition size", OFFSET(maxp), AV_OPT_TYPE_INT, {.i64=65536}, 16, 65536, AF }, { NULL } }; diff --git a/libavfilter/af_afir.h b/libavfilter/af_afir.h index 7d4f32eaebc99..13f7c98d72104 100644 --- a/libavfilter/af_afir.h +++ b/libavfilter/af_afir.h @@ -44,7 +44,10 @@ typedef struct AudioFIRContext { float max_ir_len; int response; int w, h; + AVRational frame_rate; int ir_channel; + int minp; + int maxp; float gain; @@ -63,8 +66,6 @@ typedef struct AudioFIRContext { int nb_coef_channels; int one2many; int nb_samples; - int want_skip; - int need_padding; RDFTContext **rdft, **irdft; float **sum; diff --git a/libavfilter/af_aiir.c b/libavfilter/af_aiir.c index 845d542d29efb..20dea98cbbd75 100644 --- a/libavfilter/af_aiir.c +++ b/libavfilter/af_aiir.c @@ -63,6 +63,7 @@ typedef struct AudioIIRContext { int response; int w, h; int ir_channel; + AVRational rate; AVFrame *video; @@ -939,11 +940,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) if (s->response) { AVFilterLink *outlink = ctx->outputs[1]; + int64_t old_pts = s->video->pts; + int64_t new_pts = av_rescale_q(out->pts, ctx->inputs[0]->time_base, outlink->time_base); - s->video->pts = out->pts; - ret = ff_filter_frame(outlink, av_frame_clone(s->video)); - if (ret < 0) - return ret; + if (new_pts > old_pts) { + s->video->pts = new_pts; + ret = ff_filter_frame(outlink, av_frame_clone(s->video)); + if (ret < 0) + return ret; + } } return ff_filter_frame(outlink, out); @@ -957,6 +962,8 @@ static int config_video(AVFilterLink *outlink) outlink->sample_aspect_ratio = (AVRational){1,1}; outlink->w = s->w; outlink->h = s->h; + outlink->frame_rate = s->rate; + outlink->time_base = av_inv_q(outlink->frame_rate); return 0; } @@ -1070,6 +1077,7 @@ static const AVOption aiir_options[] = { { "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF }, { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF }, { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF }, + { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF }, { NULL }, }; diff --git a/libavfilter/af_amerge.c b/libavfilter/af_amerge.c index 3961c90701d86..567f25982d8ff 100644 --- a/libavfilter/af_amerge.c +++ b/libavfilter/af_amerge.c @@ -23,9 +23,6 @@ * Audio merging filter */ -#define FF_INTERNAL_FIELDS 1 -#include "framequeue.h" - #include "libavutil/avstring.h" #include "libavutil/bprint.h" #include "libavutil/channel_layout.h" @@ -285,9 +282,9 @@ static int activate(AVFilterContext *ctx) FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx); - nb_samples = ff_framequeue_queued_samples(&ctx->inputs[0]->fifo); + nb_samples = ff_inlink_queued_samples(ctx->inputs[0]); for (i = 1; i < ctx->nb_inputs && nb_samples > 0; i++) { - nb_samples = FFMIN(ff_framequeue_queued_samples(&ctx->inputs[i]->fifo), nb_samples); + nb_samples = FFMIN(ff_inlink_queued_samples(ctx->inputs[i]), nb_samples); } if (nb_samples) { @@ -297,7 +294,7 @@ static int activate(AVFilterContext *ctx) } for (i = 0; i < ctx->nb_inputs; i++) { - if (ff_framequeue_queued_samples(&ctx->inputs[i]->fifo)) + if (ff_inlink_queued_samples(ctx->inputs[i])) continue; if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) { diff --git a/libavfilter/af_amultiply.c b/libavfilter/af_amultiply.c index a742f6a9c6f98..b35eca725002a 100644 --- a/libavfilter/af_amultiply.c +++ b/libavfilter/af_amultiply.c @@ -24,9 +24,6 @@ #include "libavutil/float_dsp.h" #include "libavutil/opt.h" -#define FF_INTERNAL_FIELDS 1 -#include "framequeue.h" - #include "audio.h" #include "avfilter.h" #include "formats.h" @@ -85,8 +82,8 @@ static int activate(AVFilterContext *ctx) FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx); - nb_samples = FFMIN(ff_framequeue_queued_samples(&ctx->inputs[0]->fifo), - ff_framequeue_queued_samples(&ctx->inputs[1]->fifo)); + nb_samples = FFMIN(ff_inlink_queued_samples(ctx->inputs[0]), + ff_inlink_queued_samples(ctx->inputs[1])); for (i = 0; i < ctx->nb_inputs && nb_samples > 0; i++) { if (s->frames[i]) continue; @@ -150,7 +147,7 @@ static int activate(AVFilterContext *ctx) if (ff_outlink_frame_wanted(ctx->outputs[0])) { for (i = 0; i < 2; i++) { - if (ff_framequeue_queued_samples(&ctx->inputs[i]->fifo) > 0) + if (ff_inlink_queued_samples(ctx->inputs[i]) > 0) continue; ff_inlink_request_frame(ctx->inputs[i]); return 0; diff --git a/libavfilter/af_headphone.c b/libavfilter/af_headphone.c index 760b97b733c86..0c7e4a295795a 100644 --- a/libavfilter/af_headphone.c +++ b/libavfilter/af_headphone.c @@ -475,7 +475,7 @@ static int convert_coeffs(AVFilterContext *ctx, AVFilterLink *inlink) ret = ff_inlink_consume_samples(ctx->inputs[i + 1], len, len, &s->in[i + 1].frame); if (ret < 0) - return ret; + goto fail; ptr = (float *)s->in[i + 1].frame->extended_data[0]; if (s->hrir_fmt == HRIR_STEREO) { diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c index 725bac94a0aca..484b080dea84f 100644 --- a/libavfilter/allfilters.c +++ b/libavfilter/allfilters.c @@ -156,6 +156,7 @@ extern AVFilter ff_vf_bm3d; extern AVFilter ff_vf_boxblur; extern AVFilter ff_vf_boxblur_opencl; extern AVFilter ff_vf_bwdif; +extern AVFilter ff_vf_chromahold; extern AVFilter ff_vf_chromakey; extern AVFilter ff_vf_ciescope; extern AVFilter ff_vf_codecview; @@ -226,6 +227,7 @@ extern AVFilter ff_vf_fspp; extern AVFilter ff_vf_gblur; extern AVFilter ff_vf_geq; extern AVFilter ff_vf_gradfun; +extern AVFilter ff_vf_graphmonitor; extern AVFilter ff_vf_greyedge; extern AVFilter ff_vf_haldclut; extern AVFilter ff_vf_hflip; @@ -326,6 +328,7 @@ extern AVFilter ff_vf_sendcmd; extern AVFilter ff_vf_separatefields; extern AVFilter ff_vf_setdar; extern AVFilter ff_vf_setfield; +extern AVFilter ff_vf_setparams; extern AVFilter ff_vf_setpts; extern AVFilter ff_vf_setrange; extern AVFilter ff_vf_setsar; @@ -362,6 +365,7 @@ extern AVFilter ff_vf_tlut2; extern AVFilter ff_vf_tmix; extern AVFilter ff_vf_tonemap; extern AVFilter ff_vf_tonemap_opencl; +extern AVFilter ff_vf_tpad; extern AVFilter ff_vf_transpose; extern AVFilter ff_vf_transpose_npp; extern AVFilter ff_vf_trim; @@ -373,6 +377,7 @@ extern AVFilter ff_vf_vaguedenoiser; extern AVFilter ff_vf_vectorscope; extern AVFilter ff_vf_vflip; extern AVFilter ff_vf_vfrdet; +extern AVFilter ff_vf_vibrance; extern AVFilter ff_vf_vidstabdetect; extern AVFilter ff_vf_vidstabtransform; extern AVFilter ff_vf_vignette; @@ -383,7 +388,9 @@ extern AVFilter ff_vf_w3fdif; extern AVFilter ff_vf_waveform; extern AVFilter ff_vf_weave; extern AVFilter ff_vf_xbr; +extern AVFilter ff_vf_xstack; extern AVFilter ff_vf_yadif; +extern AVFilter ff_vf_yadif_cuda; extern AVFilter ff_vf_zmq; extern AVFilter ff_vf_zoompan; extern AVFilter ff_vf_zscale; @@ -414,6 +421,7 @@ extern AVFilter ff_vsink_nullsink; /* multimedia filters */ extern AVFilter ff_avf_abitscope; extern AVFilter ff_avf_adrawgraph; +extern AVFilter ff_avf_agraphmonitor; extern AVFilter ff_avf_ahistogram; extern AVFilter ff_avf_aphasemeter; extern AVFilter ff_avf_avectorscope; diff --git a/libavfilter/asrc_hilbert.c b/libavfilter/asrc_hilbert.c index a3a395254fef3..a51c676c6fe14 100644 --- a/libavfilter/asrc_hilbert.c +++ b/libavfilter/asrc_hilbert.c @@ -67,6 +67,7 @@ static const AVOption hilbert_options[] = { { "cauchy", "Cauchy", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_CAUCHY}, 0, 0, FLAGS, "win_func" }, { "parzen", "Parzen", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_PARZEN}, 0, 0, FLAGS, "win_func" }, { "poisson", "Poisson", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_POISSON}, 0, 0, FLAGS, "win_func" }, + { "bohman" , "Bohman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BOHMAN}, 0, 0, FLAGS, "win_func" }, {NULL} }; diff --git a/libavfilter/avf_showfreqs.c b/libavfilter/avf_showfreqs.c index 22f28ec387157..ff6a762547fe7 100644 --- a/libavfilter/avf_showfreqs.c +++ b/libavfilter/avf_showfreqs.c @@ -118,6 +118,7 @@ static const AVOption showfreqs_options[] = { { "cauchy", "Cauchy", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_CAUCHY}, 0, 0, FLAGS, "win_func" }, { "parzen", "Parzen", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_PARZEN}, 0, 0, FLAGS, "win_func" }, { "poisson", "Poisson", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_POISSON}, 0, 0, FLAGS, "win_func" }, + { "bohman", "Bohman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BOHMAN} , 0, 0, FLAGS, "win_func" }, { "overlap", "set window overlap", OFFSET(overlap), AV_OPT_TYPE_FLOAT, {.dbl=1.}, 0., 1., FLAGS }, { "averaging", "set time averaging", OFFSET(avg), AV_OPT_TYPE_INT, {.i64=1}, 0, INT32_MAX, FLAGS }, { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS }, diff --git a/libavfilter/avf_showspectrum.c b/libavfilter/avf_showspectrum.c index 41693a0ce1538..e8d3f1ec8d22b 100644 --- a/libavfilter/avf_showspectrum.c +++ b/libavfilter/avf_showspectrum.c @@ -46,7 +46,7 @@ enum DisplayMode { COMBINED, SEPARATE, NB_MODES }; enum DataMode { D_MAGNITUDE, D_PHASE, NB_DMODES }; enum DisplayScale { LINEAR, SQRT, CBRT, LOG, FOURTHRT, FIFTHRT, NB_SCALES }; -enum ColorMode { CHANNEL, INTENSITY, RAINBOW, MORELAND, NEBULAE, FIRE, FIERY, FRUIT, COOL, MAGMA, GREEN, NB_CLMODES }; +enum ColorMode { CHANNEL, INTENSITY, RAINBOW, MORELAND, NEBULAE, FIRE, FIERY, FRUIT, COOL, MAGMA, GREEN, VIRIDIS, PLASMA, CIVIDIS, TERRAIN, NB_CLMODES }; enum SlideMode { REPLACE, SCROLL, FULLFRAME, RSCROLL, NB_SLIDES }; enum Orientation { VERTICAL, HORIZONTAL, NB_ORIENTATIONS }; @@ -123,6 +123,10 @@ static const AVOption showspectrum_options[] = { { "cool", "cool based coloring", 0, AV_OPT_TYPE_CONST, {.i64=COOL}, 0, 0, FLAGS, "color" }, { "magma", "magma based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MAGMA}, 0, 0, FLAGS, "color" }, { "green", "green based coloring", 0, AV_OPT_TYPE_CONST, {.i64=GREEN}, 0, 0, FLAGS, "color" }, + { "viridis", "viridis based coloring", 0, AV_OPT_TYPE_CONST, {.i64=VIRIDIS}, 0, 0, FLAGS, "color" }, + { "plasma", "plasma based coloring", 0, AV_OPT_TYPE_CONST, {.i64=PLASMA}, 0, 0, FLAGS, "color" }, + { "cividis", "cividis based coloring", 0, AV_OPT_TYPE_CONST, {.i64=CIVIDIS}, 0, 0, FLAGS, "color" }, + { "terrain", "terrain based coloring", 0, AV_OPT_TYPE_CONST, {.i64=TERRAIN}, 0, 0, FLAGS, "color" }, { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=SQRT}, LINEAR, NB_SCALES-1, FLAGS, "scale" }, { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" }, { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" }, @@ -152,6 +156,7 @@ static const AVOption showspectrum_options[] = { { "cauchy", "Cauchy", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_CAUCHY}, 0, 0, FLAGS, "win_func" }, { "parzen", "Parzen", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_PARZEN}, 0, 0, FLAGS, "win_func" }, { "poisson", "Poisson", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_POISSON}, 0, 0, FLAGS, "win_func" }, + { "bohman", "Bohman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BOHMAN}, 0, 0, FLAGS, "win_func" }, { "orientation", "set orientation", OFFSET(orientation), AV_OPT_TYPE_INT, {.i64=VERTICAL}, 0, NB_ORIENTATIONS-1, FLAGS, "orientation" }, { "vertical", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VERTICAL}, 0, 0, FLAGS, "orientation" }, { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=HORIZONTAL}, 0, 0, FLAGS, "orientation" }, @@ -247,12 +252,43 @@ static const struct ColorTable { { 0.35, 85/256., (138-128)/256., (179-128)/256. }, { 0.48, 96/256., (128-128)/256., (189-128)/256. }, { 0.64, 128/256., (103-128)/256., (214-128)/256. }, - { 0.78, 167/256., (85-128)/256., (174-128)/256. }, - { 1, 205/256., (80-128)/256., (152-128)/256. }}, + { 0.92, 205/256., (80-128)/256., (152-128)/256. }, + { 1, 1, 0, 0 }}, [GREEN] = { { 0, 0, 0, 0 }, { .75, .5, 0, -.5 }, { 1, 1, 0, 0 }}, + [VIRIDIS] = { + { 0, 0, 0, 0 }, + { 0.10, 0x39/255., (0x9D -128)/255., (0x8F -128)/255. }, + { 0.23, 0x5C/255., (0x9A -128)/255., (0x68 -128)/255. }, + { 0.35, 0x69/255., (0x93 -128)/255., (0x57 -128)/255. }, + { 0.48, 0x76/255., (0x88 -128)/255., (0x4B -128)/255. }, + { 0.64, 0x8A/255., (0x72 -128)/255., (0x4F -128)/255. }, + { 0.80, 0xA3/255., (0x50 -128)/255., (0x66 -128)/255. }, + { 1, 0xCC/255., (0x2F -128)/255., (0x87 -128)/255. }}, + [PLASMA] = { + { 0, 0, 0, 0 }, + { 0.10, 0x27/255., (0xC2 -128)/255., (0x82 -128)/255. }, + { 0.58, 0x5B/255., (0x9A -128)/255., (0xAE -128)/255. }, + { 0.70, 0x89/255., (0x44 -128)/255., (0xAB -128)/255. }, + { 0.80, 0xB4/255., (0x2B -128)/255., (0x9E -128)/255. }, + { 0.91, 0xD2/255., (0x38 -128)/255., (0x92 -128)/255. }, + { 1, 1, 0, 0. }}, + [CIVIDIS] = { + { 0, 0, 0, 0 }, + { 0.20, 0x28/255., (0x98 -128)/255., (0x6F -128)/255. }, + { 0.50, 0x48/255., (0x95 -128)/255., (0x74 -128)/255. }, + { 0.63, 0x69/255., (0x84 -128)/255., (0x7F -128)/255. }, + { 0.76, 0x89/255., (0x75 -128)/255., (0x84 -128)/255. }, + { 0.90, 0xCE/255., (0x35 -128)/255., (0x95 -128)/255. }, + { 1, 1, 0, 0. }}, + [TERRAIN] = { + { 0, 0, 0, 0 }, + { 0.15, 0, .5, 0 }, + { 0.60, 1, -.5, -.5 }, + { 0.85, 1, -.5, .5 }, + { 1, 1, 0, 0 }}, }; static av_cold void uninit(AVFilterContext *ctx) @@ -482,6 +518,10 @@ static void color_range(ShowSpectrumContext *s, int ch, case FRUIT: case COOL: case GREEN: + case VIRIDIS: + case PLASMA: + case CIVIDIS: + case TERRAIN: case MAGMA: case INTENSITY: *uf = *yf; @@ -1396,6 +1436,10 @@ static const AVOption showspectrumpic_options[] = { { "cool", "cool based coloring", 0, AV_OPT_TYPE_CONST, {.i64=COOL}, 0, 0, FLAGS, "color" }, { "magma", "magma based coloring", 0, AV_OPT_TYPE_CONST, {.i64=MAGMA}, 0, 0, FLAGS, "color" }, { "green", "green based coloring", 0, AV_OPT_TYPE_CONST, {.i64=GREEN}, 0, 0, FLAGS, "color" }, + { "viridis", "viridis based coloring", 0, AV_OPT_TYPE_CONST, {.i64=VIRIDIS}, 0, 0, FLAGS, "color" }, + { "plasma", "plasma based coloring", 0, AV_OPT_TYPE_CONST, {.i64=PLASMA}, 0, 0, FLAGS, "color" }, + { "cividis", "cividis based coloring", 0, AV_OPT_TYPE_CONST, {.i64=CIVIDIS}, 0, 0, FLAGS, "color" }, + { "terrain", "terrain based coloring", 0, AV_OPT_TYPE_CONST, {.i64=TERRAIN}, 0, 0, FLAGS, "color" }, { "scale", "set display scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64=LOG}, 0, NB_SCALES-1, FLAGS, "scale" }, { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=LINEAR}, 0, 0, FLAGS, "scale" }, { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SQRT}, 0, 0, FLAGS, "scale" }, @@ -1425,6 +1469,7 @@ static const AVOption showspectrumpic_options[] = { { "cauchy", "Cauchy", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_CAUCHY}, 0, 0, FLAGS, "win_func" }, { "parzen", "Parzen", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_PARZEN}, 0, 0, FLAGS, "win_func" }, { "poisson", "Poisson", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_POISSON}, 0, 0, FLAGS, "win_func" }, + { "bohman", "Bohman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BOHMAN}, 0, 0, FLAGS, "win_func" }, { "orientation", "set orientation", OFFSET(orientation), AV_OPT_TYPE_INT, {.i64=VERTICAL}, 0, NB_ORIENTATIONS-1, FLAGS, "orientation" }, { "vertical", NULL, 0, AV_OPT_TYPE_CONST, {.i64=VERTICAL}, 0, 0, FLAGS, "orientation" }, { "horizontal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=HORIZONTAL}, 0, 0, FLAGS, "orientation" }, diff --git a/libavfilter/f_ebur128.c b/libavfilter/f_ebur128.c index e03adc9ba117b..f613d8def23f9 100644 --- a/libavfilter/f_ebur128.c +++ b/libavfilter/f_ebur128.c @@ -777,6 +777,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) int x, y, ret; uint8_t *p; double gauge_value; + int y_loudness_lu_graph, y_loudness_lu_gauge; if (ebur128->gauge_type == GAUGE_TYPE_MOMENTARY) { gauge_value = loudness_400 - ebur128->target; @@ -784,8 +785,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) gauge_value = loudness_3000 - ebur128->target; } - const int y_loudness_lu_graph = lu_to_y(ebur128, loudness_3000 - ebur128->target); - const int y_loudness_lu_gauge = lu_to_y(ebur128, gauge_value); + y_loudness_lu_graph = lu_to_y(ebur128, loudness_3000 - ebur128->target); + y_loudness_lu_gauge = lu_to_y(ebur128, gauge_value); /* draw the graph using the short-term loudness */ p = pic->data[0] + ebur128->graph.y*pic->linesize[0] + ebur128->graph.x*3; diff --git a/libavfilter/f_graphmonitor.c b/libavfilter/f_graphmonitor.c new file mode 100644 index 0000000000000..7052c84d9b7c8 --- /dev/null +++ b/libavfilter/f_graphmonitor.c @@ -0,0 +1,425 @@ +/* + * Copyright (c) 2018 Paul B Mahol + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "float.h" + +#include "libavutil/pixdesc.h" +#include "libavutil/eval.h" +#include "libavutil/intreadwrite.h" +#include "libavutil/opt.h" +#include "libavutil/timestamp.h" +#include "libavutil/xga_font_data.h" +#include "avfilter.h" +#include "filters.h" +#include "formats.h" +#include "internal.h" +#include "video.h" + +typedef struct GraphMonitorContext { + const AVClass *class; + + int w, h; + float opacity; + int mode; + int flags; + AVRational frame_rate; + + int64_t pts; + uint8_t white[4]; + uint8_t yellow[4]; + uint8_t red[4]; + uint8_t green[4]; + uint8_t bg[4]; +} GraphMonitorContext; + +enum { + MODE_QUEUE = 1 << 0, + MODE_FCIN = 1 << 1, + MODE_FCOUT = 1 << 2, + MODE_PTS = 1 << 3, + MODE_TIME = 1 << 4, + MODE_TB = 1 << 5, + MODE_FMT = 1 << 6, + MODE_SIZE = 1 << 7, + MODE_RATE = 1 << 8, +}; + +#define OFFSET(x) offsetof(GraphMonitorContext, x) +#define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption graphmonitor_options[] = { + { "size", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF }, + { "s", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF }, + { "opacity", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VF }, + { "o", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VF }, + { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" }, + { "m", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" }, + { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, VF, "mode" }, + { "compact", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, VF, "mode" }, + { "flags", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=MODE_QUEUE}, 0, INT_MAX, VF, "flags" }, + { "f", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=MODE_QUEUE}, 0, INT_MAX, VF, "flags" }, + { "queue", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_QUEUE}, 0, 0, VF, "flags" }, + { "frame_count_in", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FCOUT}, 0, 0, VF, "flags" }, + { "frame_count_out", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FCIN}, 0, 0, VF, "flags" }, + { "pts", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_PTS}, 0, 0, VF, "flags" }, + { "time", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_TIME}, 0, 0, VF, "flags" }, + { "timebase", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_TB}, 0, 0, VF, "flags" }, + { "format", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FMT}, 0, 0, VF, "flags" }, + { "size", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_SIZE}, 0, 0, VF, "flags" }, + { "rate", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_RATE}, 0, 0, VF, "flags" }, + { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF }, + { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF }, + { NULL } +}; + +static int query_formats(AVFilterContext *ctx) +{ + AVFilterLink *outlink = ctx->outputs[0]; + static const enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_RGBA, + AV_PIX_FMT_NONE + }; + int ret; + + AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts); + if ((ret = ff_formats_ref(fmts_list, &outlink->in_formats)) < 0) + return ret; + + return 0; +} + +static void clear_image(GraphMonitorContext *s, AVFrame *out, AVFilterLink *outlink) +{ + int bg = AV_RN32(s->bg); + + for (int i = 0; i < out->height; i++) + for (int j = 0; j < out->width; j++) + AV_WN32(out->data[0] + i * out->linesize[0] + j * 4, bg); +} + +static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint8_t *color) +{ + const uint8_t *font; + int font_height; + int i; + + font = avpriv_cga_font, font_height = 8; + + if (y + 8 >= pic->height || + x + strlen(txt) * 8 >= pic->width) + return; + + for (i = 0; txt[i]; i++) { + int char_y, mask; + + uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8)*4; + for (char_y = 0; char_y < font_height; char_y++) { + for (mask = 0x80; mask; mask >>= 1) { + if (font[txt[i] * font_height + char_y] & mask) { + p[0] = color[0]; + p[1] = color[1]; + p[2] = color[2]; + } + p += 4; + } + p += pic->linesize[0] - 8 * 4; + } + } +} + +static int filter_have_queued(AVFilterContext *filter) +{ + for (int j = 0; j < filter->nb_inputs; j++) { + AVFilterLink *l = filter->inputs[j]; + size_t frames = ff_inlink_queued_frames(l); + + if (frames) + return 1; + } + + for (int j = 0; j < filter->nb_outputs; j++) { + AVFilterLink *l = filter->outputs[j]; + size_t frames = ff_inlink_queued_frames(l); + + if (frames) + return 1; + } + + return 0; +} + +static void draw_items(AVFilterContext *ctx, AVFrame *out, + int xpos, int ypos, + AVFilterLink *l, + size_t frames) +{ + GraphMonitorContext *s = ctx->priv; + char buffer[1024] = { 0 }; + + if (s->flags & MODE_FMT) { + if (l->type == AVMEDIA_TYPE_VIDEO) { + snprintf(buffer, sizeof(buffer)-1, " | format: %s", + av_get_pix_fmt_name(l->format)); + } else if (l->type == AVMEDIA_TYPE_AUDIO) { + snprintf(buffer, sizeof(buffer)-1, " | format: %s", + av_get_sample_fmt_name(l->format)); + } + drawtext(out, xpos, ypos, buffer, s->white); + xpos += strlen(buffer) * 8; + } + if (s->flags & MODE_SIZE) { + if (l->type == AVMEDIA_TYPE_VIDEO) { + snprintf(buffer, sizeof(buffer)-1, " | size: %dx%d", l->w, l->h); + } else if (l->type == AVMEDIA_TYPE_AUDIO) { + snprintf(buffer, sizeof(buffer)-1, " | channels: %d", l->channels); + } + drawtext(out, xpos, ypos, buffer, s->white); + xpos += strlen(buffer) * 8; + } + if (s->flags & MODE_RATE) { + if (l->type == AVMEDIA_TYPE_VIDEO) { + snprintf(buffer, sizeof(buffer)-1, " | fps: %d/%d", l->frame_rate.num, l->frame_rate.den); + } else if (l->type == AVMEDIA_TYPE_AUDIO) { + snprintf(buffer, sizeof(buffer)-1, " | samplerate: %d", l->sample_rate); + } + drawtext(out, xpos, ypos, buffer, s->white); + xpos += strlen(buffer) * 8; + } + if (s->flags & MODE_TB) { + snprintf(buffer, sizeof(buffer)-1, " | tb: %d/%d", l->time_base.num, l->time_base.den); + drawtext(out, xpos, ypos, buffer, s->white); + xpos += strlen(buffer) * 8; + } + if (s->flags & MODE_QUEUE) { + snprintf(buffer, sizeof(buffer)-1, " | queue: "); + drawtext(out, xpos, ypos, buffer, s->white); + xpos += strlen(buffer) * 8; + snprintf(buffer, sizeof(buffer)-1, "%"PRId64, frames); + drawtext(out, xpos, ypos, buffer, frames > 0 ? frames >= 10 ? frames >= 50 ? s->red : s->yellow : s->green : s->white); + xpos += strlen(buffer) * 8; + } + if (s->flags & MODE_FCIN) { + snprintf(buffer, sizeof(buffer)-1, " | in: %"PRId64, l->frame_count_in); + drawtext(out, xpos, ypos, buffer, s->white); + xpos += strlen(buffer) * 8; + } + if (s->flags & MODE_FCOUT) { + snprintf(buffer, sizeof(buffer)-1, " | out: %"PRId64, l->frame_count_out); + drawtext(out, xpos, ypos, buffer, s->white); + xpos += strlen(buffer) * 8; + } + if (s->flags & MODE_PTS) { + snprintf(buffer, sizeof(buffer)-1, " | pts: %s", av_ts2str(l->current_pts_us)); + drawtext(out, xpos, ypos, buffer, s->white); + xpos += strlen(buffer) * 8; + } + if (s->flags & MODE_TIME) { + snprintf(buffer, sizeof(buffer)-1, " | time: %s", av_ts2timestr(l->current_pts_us, &AV_TIME_BASE_Q)); + drawtext(out, xpos, ypos, buffer, s->white); + xpos += strlen(buffer) * 8; + } +} + +static int create_frame(AVFilterContext *ctx, int64_t pts) +{ + GraphMonitorContext *s = ctx->priv; + AVFilterLink *outlink = ctx->outputs[0]; + AVFrame *out; + int xpos, ypos = 0; + + out = ff_get_video_buffer(outlink, outlink->w, outlink->h); + if (!out) + return AVERROR(ENOMEM); + + clear_image(s, out, outlink); + + for (int i = 0; i < ctx->graph->nb_filters; i++) { + AVFilterContext *filter = ctx->graph->filters[i]; + char buffer[1024] = { 0 }; + + if (s->mode && !filter_have_queued(filter)) + continue; + + xpos = 0; + drawtext(out, xpos, ypos, filter->name, s->white); + xpos += strlen(filter->name) * 8 + 10; + drawtext(out, xpos, ypos, filter->filter->name, s->white); + ypos += 10; + for (int j = 0; j < filter->nb_inputs; j++) { + AVFilterLink *l = filter->inputs[j]; + size_t frames = ff_inlink_queued_frames(l); + + if (s->mode && !frames) + continue; + + xpos = 10; + snprintf(buffer, sizeof(buffer)-1, "in%d: ", j); + drawtext(out, xpos, ypos, buffer, s->white); + xpos += strlen(buffer) * 8; + drawtext(out, xpos, ypos, l->src->name, s->white); + xpos += strlen(l->src->name) * 8 + 10; + draw_items(ctx, out, xpos, ypos, l, frames); + ypos += 10; + } + + ypos += 2; + for (int j = 0; j < filter->nb_outputs; j++) { + AVFilterLink *l = filter->outputs[j]; + size_t frames = ff_inlink_queued_frames(l); + + if (s->mode && !frames) + continue; + + xpos = 10; + snprintf(buffer, sizeof(buffer)-1, "out%d: ", j); + drawtext(out, xpos, ypos, buffer, s->white); + xpos += strlen(buffer) * 8; + drawtext(out, xpos, ypos, l->dst->name, s->white); + xpos += strlen(l->dst->name) * 8 + 10; + draw_items(ctx, out, xpos, ypos, l, frames); + ypos += 10; + } + ypos += 5; + } + + out->pts = pts; + s->pts = pts; + return ff_filter_frame(outlink, out); +} + +static int activate(AVFilterContext *ctx) +{ + GraphMonitorContext *s = ctx->priv; + AVFilterLink *inlink = ctx->inputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; + int64_t pts = AV_NOPTS_VALUE; + + FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); + + if (ff_inlink_queued_frames(inlink)) { + AVFrame *frame = NULL; + int ret; + + ret = ff_inlink_consume_frame(inlink, &frame); + if (ret < 0) + return ret; + if (ret > 0) { + pts = frame->pts; + av_frame_free(&frame); + } + } + + if (pts != AV_NOPTS_VALUE) { + pts = av_rescale_q(pts, inlink->time_base, outlink->time_base); + if (s->pts < pts && ff_outlink_frame_wanted(outlink)) + return create_frame(ctx, pts); + } + + FF_FILTER_FORWARD_STATUS(inlink, outlink); + FF_FILTER_FORWARD_WANTED(outlink, inlink); + + return FFERROR_NOT_READY; +} + +static int config_output(AVFilterLink *outlink) +{ + GraphMonitorContext *s = outlink->src->priv; + + s->bg[3] = 255 * s->opacity; + s->white[0] = s->white[1] = s->white[2] = 255; + s->yellow[0] = s->yellow[1] = 255; + s->red[0] = 255; + s->green[1] = 255; + outlink->w = s->w; + outlink->h = s->h; + outlink->sample_aspect_ratio = (AVRational){1,1}; + outlink->frame_rate = s->frame_rate; + outlink->time_base = av_inv_q(s->frame_rate); + + return 0; +} + +#if CONFIG_GRAPHMONITOR_FILTER + +AVFILTER_DEFINE_CLASS(graphmonitor); + +static const AVFilterPad graphmonitor_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +static const AVFilterPad graphmonitor_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_output, + }, + { NULL } +}; + +AVFilter ff_vf_graphmonitor = { + .name = "graphmonitor", + .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."), + .priv_size = sizeof(GraphMonitorContext), + .priv_class = &graphmonitor_class, + .query_formats = query_formats, + .activate = activate, + .inputs = graphmonitor_inputs, + .outputs = graphmonitor_outputs, +}; + +#endif // CONFIG_GRAPHMONITOR_FILTER + +#if CONFIG_AGRAPHMONITOR_FILTER + +#define agraphmonitor_options graphmonitor_options +AVFILTER_DEFINE_CLASS(agraphmonitor); + +static const AVFilterPad agraphmonitor_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_AUDIO, + }, + { NULL } +}; + +static const AVFilterPad agraphmonitor_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_output, + }, + { NULL } +}; + +AVFilter ff_avf_agraphmonitor = { + .name = "agraphmonitor", + .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."), + .priv_size = sizeof(GraphMonitorContext), + .priv_class = &agraphmonitor_class, + .query_formats = query_formats, + .activate = activate, + .inputs = agraphmonitor_inputs, + .outputs = agraphmonitor_outputs, +}; +#endif // CONFIG_AGRAPHMONITOR_FILTER diff --git a/libavfilter/f_loop.c b/libavfilter/f_loop.c index 255fe643da853..d9d55f983722f 100644 --- a/libavfilter/f_loop.c +++ b/libavfilter/f_loop.c @@ -25,6 +25,7 @@ #include "libavutil/opt.h" #include "avfilter.h" #include "audio.h" +#include "filters.h" #include "formats.h" #include "internal.h" #include "video.h" @@ -44,6 +45,7 @@ typedef struct LoopContext { int64_t ignored_samples; int loop; + int eof; int64_t size; int64_t start; int64_t pts; @@ -267,7 +269,7 @@ static int push_frame(AVFilterContext *ctx) { AVFilterLink *outlink = ctx->outputs[0]; LoopContext *s = ctx->priv; - int64_t pts; + int64_t pts, duration; int ret; AVFrame *out = av_frame_clone(s->frames[s->current_frame]); @@ -275,7 +277,11 @@ static int push_frame(AVFilterContext *ctx) if (!out) return AVERROR(ENOMEM); out->pts += s->duration - s->start_pts; - pts = out->pts + out->pkt_duration; + if (out->pkt_duration) + duration = out->pkt_duration; + else + duration = av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base); + pts = out->pts + duration; ret = ff_filter_frame(outlink, out); s->current_frame++; @@ -295,6 +301,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame) AVFilterContext *ctx = inlink->dst; AVFilterLink *outlink = ctx->outputs[0]; LoopContext *s = ctx->priv; + int64_t duration; int ret = 0; if (inlink->frame_count_out >= s->start && s->size > 0 && s->loop != 0) { @@ -307,7 +314,11 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame) return AVERROR(ENOMEM); } s->nb_frames++; - s->duration = frame->pts + frame->pkt_duration; + if (frame->pkt_duration) + duration = frame->pkt_duration; + else + duration = av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base); + s->duration = frame->pts + duration; ret = ff_filter_frame(outlink, frame); } else { av_frame_free(&frame); @@ -321,25 +332,44 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame) return ret; } -static int request_frame(AVFilterLink *outlink) +static int activate(AVFilterContext *ctx) { - AVFilterContext *ctx = outlink->src; + AVFilterLink *inlink = ctx->inputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; LoopContext *s = ctx->priv; - int ret = 0; + AVFrame *frame = NULL; + int ret, status; + int64_t pts; - if ((!s->size) || - (s->nb_frames < s->size) || - (s->nb_frames >= s->size && s->loop == 0)) { - ret = ff_request_frame(ctx->inputs[0]); - } else { - ret = push_frame(ctx); + FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); + + if (!s->eof && (s->nb_frames < s->size || !s->loop)) { + ret = ff_inlink_consume_frame(inlink, &frame); + if (ret < 0) + return ret; + if (ret > 0) + return filter_frame(inlink, frame); } - if (ret == AVERROR_EOF && s->nb_frames > 0 && s->loop != 0) { - ret = push_frame(ctx); + if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) { + if (status == AVERROR_EOF) + s->eof = 1; } - return ret; + if (s->eof && (s->loop == 0 || s->nb_frames < s->size)) { + ff_outlink_set_status(outlink, AVERROR_EOF, s->duration); + return 0; + } + + if (!s->eof && (!s->size || + (s->nb_frames < s->size) || + (s->nb_frames >= s->size && s->loop == 0))) { + FF_FILTER_FORWARD_WANTED(outlink, inlink); + } else if (s->loop && s->nb_frames == s->size) { + return push_frame(ctx); + } + + return FFERROR_NOT_READY; } static const AVOption loop_options[] = { @@ -353,18 +383,16 @@ AVFILTER_DEFINE_CLASS(loop); static const AVFilterPad inputs[] = { { - .name = "default", - .type = AVMEDIA_TYPE_VIDEO, - .filter_frame = filter_frame, + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, }, { NULL } }; static const AVFilterPad outputs[] = { { - .name = "default", - .type = AVMEDIA_TYPE_VIDEO, - .request_frame = request_frame, + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, }, { NULL } }; @@ -376,6 +404,7 @@ AVFilter ff_vf_loop = { .priv_class = &loop_class, .init = init, .uninit = uninit, + .activate = activate, .inputs = inputs, .outputs = outputs, }; diff --git a/libavfilter/split.c b/libavfilter/split.c index 8b260a9ba3371..89af360cb09d7 100644 --- a/libavfilter/split.c +++ b/libavfilter/split.c @@ -30,11 +30,9 @@ #include "libavutil/mem.h" #include "libavutil/opt.h" -#define FF_INTERNAL_FIELDS 1 -#include "framequeue.h" - #include "avfilter.h" #include "audio.h" +#include "filters.h" #include "formats.h" #include "internal.h" #include "video.h" @@ -84,7 +82,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame) for (i = 0; i < ctx->nb_outputs; i++) { AVFrame *buf_out; - if (ctx->outputs[i]->status_in) + if (ff_outlink_get_status(ctx->outputs[i])) continue; buf_out = av_frame_clone(frame); if (!buf_out) { diff --git a/libavfilter/version.h b/libavfilter/version.h index bb57c5fbedd24..83b18008ce1a6 100644 --- a/libavfilter/version.h +++ b/libavfilter/version.h @@ -30,7 +30,7 @@ #include "libavutil/version.h" #define LIBAVFILTER_VERSION_MAJOR 7 -#define LIBAVFILTER_VERSION_MINOR 34 +#define LIBAVFILTER_VERSION_MINOR 43 #define LIBAVFILTER_VERSION_MICRO 100 #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ diff --git a/libavfilter/vf_chromakey.c b/libavfilter/vf_chromakey.c index 88414783bc800..42b7d71d49406 100644 --- a/libavfilter/vf_chromakey.c +++ b/libavfilter/vf_chromakey.c @@ -38,6 +38,9 @@ typedef struct ChromakeyContext { int hsub_log2; int vsub_log2; + + int (*do_slice)(AVFilterContext *ctx, void *arg, + int jobnr, int nb_jobs); } ChromakeyContext; static uint8_t do_chromakey_pixel(ChromakeyContext *ctx, uint8_t u[9], uint8_t v[9]) @@ -103,12 +106,45 @@ static int do_chromakey_slice(AVFilterContext *avctx, void *arg, int jobnr, int return 0; } +static int do_chromahold_slice(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs) +{ + ChromakeyContext *ctx = avctx->priv; + AVFrame *frame = arg; + const int slice_start = ((frame->height >> ctx->vsub_log2) * jobnr) / nb_jobs; + const int slice_end = ((frame->height >> ctx->vsub_log2) * (jobnr + 1)) / nb_jobs; + + int x, y, alpha; + + for (y = slice_start; y < slice_end; ++y) { + for (x = 0; x < frame->width >> ctx->hsub_log2; ++x) { + int u = frame->data[1][frame->linesize[1] * y + x]; + int v = frame->data[2][frame->linesize[2] * y + x]; + double diff; + int du, dv; + + du = u - ctx->chromakey_uv[0]; + dv = v - ctx->chromakey_uv[1]; + + diff = sqrt((du * du + dv * dv) / (255.0 * 255.0)); + + alpha = diff > ctx->similarity; + if (alpha) { + frame->data[1][frame->linesize[1] * y + x] = 128; + frame->data[2][frame->linesize[2] * y + x] = 128; + } + } + } + + return 0; +} + static int filter_frame(AVFilterLink *link, AVFrame *frame) { AVFilterContext *avctx = link->dst; + ChromakeyContext *ctx = avctx->priv; int res; - if (res = avctx->internal->execute(avctx, do_chromakey_slice, frame, NULL, FFMIN(frame->height, ff_filter_get_nb_threads(avctx)))) + if (res = avctx->internal->execute(avctx, ctx->do_slice, frame, NULL, FFMIN(frame->height, ff_filter_get_nb_threads(avctx)))) return res; return ff_filter_frame(avctx->outputs[0], frame); @@ -130,6 +166,12 @@ static av_cold int initialize_chromakey(AVFilterContext *avctx) ctx->chromakey_uv[1] = RGB_TO_V(ctx->chromakey_rgba); } + if (!strcmp(avctx->filter->name, "chromakey")) { + ctx->do_slice = do_chromakey_slice; + } else { + ctx->do_slice = do_chromahold_slice; + } + return 0; } @@ -142,9 +184,19 @@ static av_cold int query_formats(AVFilterContext *avctx) AV_PIX_FMT_NONE }; + static const enum AVPixelFormat hold_pixel_fmts[] = { + AV_PIX_FMT_YUV420P, + AV_PIX_FMT_YUV422P, + AV_PIX_FMT_YUV444P, + AV_PIX_FMT_YUVA420P, + AV_PIX_FMT_YUVA422P, + AV_PIX_FMT_YUVA444P, + AV_PIX_FMT_NONE + }; + AVFilterFormats *formats = NULL; - formats = ff_make_format_list(pixel_fmts); + formats = ff_make_format_list(!strcmp(avctx->filter->name, "chromahold") ? hold_pixel_fmts : pixel_fmts); if (!formats) return AVERROR(ENOMEM); @@ -206,3 +258,43 @@ AVFilter ff_vf_chromakey = { .outputs = chromakey_outputs, .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS, }; + +static const AVOption chromahold_options[] = { + { "color", "set the chromahold key color", OFFSET(chromakey_rgba), AV_OPT_TYPE_COLOR, { .str = "black" }, CHAR_MIN, CHAR_MAX, FLAGS }, + { "similarity", "set the chromahold similarity value", OFFSET(similarity), AV_OPT_TYPE_FLOAT, { .dbl = 0.01 }, 0.01, 1.0, FLAGS }, + { "yuv", "color parameter is in yuv instead of rgb", OFFSET(is_yuv), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS }, + { NULL } +}; + +static const AVFilterPad chromahold_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .needs_writable = 1, + .filter_frame = filter_frame, + .config_props = config_input, + }, + { NULL } +}; + +static const AVFilterPad chromahold_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +AVFILTER_DEFINE_CLASS(chromahold); + +AVFilter ff_vf_chromahold = { + .name = "chromahold", + .description = NULL_IF_CONFIG_SMALL("Turns a certain color range into gray."), + .priv_size = sizeof(ChromakeyContext), + .priv_class = &chromahold_class, + .init = initialize_chromakey, + .query_formats = query_formats, + .inputs = chromahold_inputs, + .outputs = chromahold_outputs, + .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS, +}; diff --git a/libavfilter/vf_datascope.c b/libavfilter/vf_datascope.c index 6bcc18e85e045..c9039a60f65d2 100644 --- a/libavfilter/vf_datascope.c +++ b/libavfilter/vf_datascope.c @@ -802,8 +802,8 @@ static void draw_trace8(OscilloscopeContext *s, AVFrame *frame) if ((1 << c) & s->components) { int x = i * s->width / s->nb_values; int px = (i - 1) * s->width / s->nb_values; - int py = s->height - s->values[i-1].p[c] * s->height / 256; - int y = s->height - s->values[i].p[c] * s->height / 256; + int py = s->height - s->values[i-1].p[s->rgba_map[c]] * s->height / 256; + int y = s->height - s->values[i].p[s->rgba_map[c]] * s->height / 256; draw_line(&s->draw, s->ox + x, s->oy + y, s->ox + px, s->oy + py, frame, s->colors[c]); } @@ -821,8 +821,8 @@ static void draw_trace16(OscilloscopeContext *s, AVFrame *frame) if ((1 << c) & s->components) { int x = i * s->width / s->nb_values; int px = (i - 1) * s->width / s->nb_values; - int py = s->height - s->values[i-1].p[c] * s->height / s->max; - int y = s->height - s->values[i].p[c] * s->height / s->max; + int py = s->height - s->values[i-1].p[s->rgba_map[c]] * s->height / s->max; + int y = s->height - s->values[i].p[s->rgba_map[c]] * s->height / s->max; draw_line(&s->draw, s->ox + x, s->oy + y, s->ox + px, s->oy + py, frame, s->colors[c]); } @@ -996,9 +996,9 @@ static int oscilloscope_filter_frame(AVFilterLink *inlink, AVFrame *frame) for (i = 0; i < s->nb_values; i++) { for (c = 0; c < s->nb_comps; c++) { if ((1 << c) & s->components) { - max[c] = FFMAX(max[c], s->values[i].p[c]); - min[c] = FFMIN(min[c], s->values[i].p[c]); - average[c] += s->values[i].p[c]; + max[c] = FFMAX(max[c], s->values[i].p[s->rgba_map[c]]); + min[c] = FFMIN(min[c], s->values[i].p[s->rgba_map[c]]); + average[c] += s->values[i].p[s->rgba_map[c]]; } } } @@ -1013,7 +1013,7 @@ static int oscilloscope_filter_frame(AVFilterLink *inlink, AVFrame *frame) const char yuva[4] = { 'Y', 'U', 'V', 'A' }; char text[128]; - snprintf(text, sizeof(text), "%c avg:%.1f min:%d max:%d\n", s->is_rgb ? rgba[c] : yuva[c], average[s->rgba_map[c]], min[s->rgba_map[c]], max[s->rgba_map[c]]); + snprintf(text, sizeof(text), "%c avg:%.1f min:%d max:%d\n", s->is_rgb ? rgba[c] : yuva[c], average[c], min[c], max[c]); draw_text(&s->draw, frame, &s->white, s->ox + 2 + 280 * i++, s->oy + s->height + 4, text, 0); } } diff --git a/libavfilter/vf_extractplanes.c b/libavfilter/vf_extractplanes.c index c1c8e694cac6d..f9171572ed261 100644 --- a/libavfilter/vf_extractplanes.c +++ b/libavfilter/vf_extractplanes.c @@ -23,11 +23,9 @@ #include "libavutil/opt.h" #include "libavutil/pixdesc.h" -#define FF_INTERNAL_FIELDS 1 -#include "libavfilter/framequeue.h" - #include "avfilter.h" #include "drawutils.h" +#include "filters.h" #include "internal.h" #define PLANE_R 0x01 @@ -282,7 +280,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame) const int idx = s->map[i]; AVFrame *out; - if (outlink->status_in) + if (ff_outlink_get_status(outlink)) continue; out = ff_get_video_buffer(outlink, outlink->w, outlink->h); diff --git a/libavfilter/vf_lut3d.c b/libavfilter/vf_lut3d.c index 4d985c599ffae..f328b30a8f34f 100644 --- a/libavfilter/vf_lut3d.c +++ b/libavfilter/vf_lut3d.c @@ -411,6 +411,8 @@ static int parse_cube(AVFilterContext *ctx, FILE *f) av_log(ctx, AV_LOG_DEBUG, "min: %f %f %f | max: %f %f %f\n", min[0], min[1], min[2], max[0], max[1], max[2]); goto try_again; + } else if (!strncmp(line, "TITLE", 5)) { + goto try_again; } } while (skip_line(line)); if (sscanf(line, "%f %f %f", &vec->r, &vec->g, &vec->b) != 3) @@ -1051,6 +1053,8 @@ static int parse_cube_1d(AVFilterContext *ctx, FILE *f) min[1] = min[2] = min[0]; max[1] = max[2] = max[0]; goto try_again; + } else if (!strncmp(line, "TITLE", 5)) { + goto try_again; } } while (skip_line(line)); if (sscanf(line, "%f %f %f", &lut1d->lut[0][i], &lut1d->lut[1][i], &lut1d->lut[2][i]) != 3) diff --git a/libavfilter/vf_overlay_qsv.c b/libavfilter/vf_overlay_qsv.c index 20871786eea41..9aabb594ba9e2 100644 --- a/libavfilter/vf_overlay_qsv.c +++ b/libavfilter/vf_overlay_qsv.c @@ -160,7 +160,7 @@ static int eval_expr(AVFilterContext *ctx) static int have_alpha_planar(AVFilterLink *link) { - enum AVPixelFormat pix_fmt; + enum AVPixelFormat pix_fmt = link->format; const AVPixFmtDescriptor *desc; AVHWFramesContext *fctx; diff --git a/libavfilter/vf_pixdesctest.c b/libavfilter/vf_pixdesctest.c index 2d0749e20bc0b..680d1a772ab37 100644 --- a/libavfilter/vf_pixdesctest.c +++ b/libavfilter/vf_pixdesctest.c @@ -31,7 +31,7 @@ typedef struct PixdescTestContext { const AVPixFmtDescriptor *pix_desc; - uint16_t *line; + uint32_t *line; } PixdescTestContext; static av_cold void uninit(AVFilterContext *ctx) @@ -89,17 +89,17 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) const int h1 = c == 1 || c == 2 ? ch : h; for (i = 0; i < h1; i++) { - av_read_image_line(priv->line, + av_read_image_line2(priv->line, (void*)in->data, in->linesize, priv->pix_desc, - 0, i, c, w1, 0); + 0, i, c, w1, 0, 4); - av_write_image_line(priv->line, + av_write_image_line2(priv->line, out->data, out->linesize, priv->pix_desc, - 0, i, c, w1); + 0, i, c, w1, 4); } } diff --git a/libavfilter/vf_scale_cuda.c b/libavfilter/vf_scale_cuda.c index 23ac27a7dc4c0..7b2b78c1ed81b 100644 --- a/libavfilter/vf_scale_cuda.c +++ b/libavfilter/vf_scale_cuda.c @@ -27,7 +27,7 @@ #include "libavutil/avstring.h" #include "libavutil/common.h" #include "libavutil/hwcontext.h" -#include "libavutil/hwcontext_cuda_internal.h" +#include "libavutil/hwcontext_cuda.h" #include "libavutil/internal.h" #include "libavutil/opt.h" #include "libavutil/pixdesc.h" diff --git a/libavfilter/vf_setfield.c b/libavfilter/vf_setfield.c deleted file mode 100644 index f4dc33d7e596e..0000000000000 --- a/libavfilter/vf_setfield.c +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Copyright (c) 2012 Stefano Sabatini - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * set field order - */ - -#include "libavutil/opt.h" -#include "avfilter.h" -#include "internal.h" -#include "video.h" - -enum SetFieldMode { - MODE_AUTO = -1, - MODE_BFF, - MODE_TFF, - MODE_PROG, -}; - -typedef struct SetFieldContext { - const AVClass *class; - int mode; ///< SetFieldMode -} SetFieldContext; - -#define OFFSET(x) offsetof(SetFieldContext, x) -#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM - -static const AVOption setfield_options[] = { - {"mode", "select interlace mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_AUTO}, -1, MODE_PROG, FLAGS, "mode"}, - {"auto", "keep the same input field", 0, AV_OPT_TYPE_CONST, {.i64=MODE_AUTO}, INT_MIN, INT_MAX, FLAGS, "mode"}, - {"bff", "mark as bottom-field-first", 0, AV_OPT_TYPE_CONST, {.i64=MODE_BFF}, INT_MIN, INT_MAX, FLAGS, "mode"}, - {"tff", "mark as top-field-first", 0, AV_OPT_TYPE_CONST, {.i64=MODE_TFF}, INT_MIN, INT_MAX, FLAGS, "mode"}, - {"prog", "mark as progressive", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PROG}, INT_MIN, INT_MAX, FLAGS, "mode"}, - {NULL} -}; - -AVFILTER_DEFINE_CLASS(setfield); - -static int filter_frame(AVFilterLink *inlink, AVFrame *picref) -{ - SetFieldContext *setfield = inlink->dst->priv; - - if (setfield->mode == MODE_PROG) { - picref->interlaced_frame = 0; - } else if (setfield->mode != MODE_AUTO) { - picref->interlaced_frame = 1; - picref->top_field_first = setfield->mode; - } - return ff_filter_frame(inlink->dst->outputs[0], picref); -} - -static const AVFilterPad setfield_inputs[] = { - { - .name = "default", - .type = AVMEDIA_TYPE_VIDEO, - .filter_frame = filter_frame, - }, - { NULL } -}; - -static const AVFilterPad setfield_outputs[] = { - { - .name = "default", - .type = AVMEDIA_TYPE_VIDEO, - }, - { NULL } -}; - -AVFilter ff_vf_setfield = { - .name = "setfield", - .description = NULL_IF_CONFIG_SMALL("Force field for the output video frame."), - .priv_size = sizeof(SetFieldContext), - .priv_class = &setfield_class, - .inputs = setfield_inputs, - .outputs = setfield_outputs, -}; diff --git a/libavfilter/vf_setparams.c b/libavfilter/vf_setparams.c index 8427f98ba824a..fe298e5a06eb2 100644 --- a/libavfilter/vf_setparams.c +++ b/libavfilter/vf_setparams.c @@ -1,4 +1,6 @@ /* + * Copyright (c) 2012 Stefano Sabatini + * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or @@ -22,15 +24,32 @@ #include "internal.h" #include "video.h" +enum SetFieldMode { + MODE_AUTO = -1, + MODE_BFF, + MODE_TFF, + MODE_PROG, +}; + typedef struct SetParamsContext { const AVClass *class; + int field_mode; int color_range; + int color_primaries; + int color_trc; + int colorspace; } SetParamsContext; #define OFFSET(x) offsetof(SetParamsContext, x) #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM -static const AVOption setrange_options[] = { +static const AVOption setparams_options[] = { + {"field_mode", "select interlace mode", OFFSET(field_mode), AV_OPT_TYPE_INT, {.i64=MODE_AUTO}, -1, MODE_PROG, FLAGS, "mode"}, + {"auto", "keep the same input field", 0, AV_OPT_TYPE_CONST, {.i64=MODE_AUTO}, INT_MIN, INT_MAX, FLAGS, "mode"}, + {"bff", "mark as bottom-field-first", 0, AV_OPT_TYPE_CONST, {.i64=MODE_BFF}, INT_MIN, INT_MAX, FLAGS, "mode"}, + {"tff", "mark as top-field-first", 0, AV_OPT_TYPE_CONST, {.i64=MODE_TFF}, INT_MIN, INT_MAX, FLAGS, "mode"}, + {"prog", "mark as progressive", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PROG}, INT_MIN, INT_MAX, FLAGS, "mode"}, + {"range", "select color range", OFFSET(color_range), AV_OPT_TYPE_INT, {.i64=-1},-1, AVCOL_RANGE_NB-1, FLAGS, "range"}, {"auto", "keep the same color range", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, FLAGS, "range"}, {"unspecified", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_UNSPECIFIED}, 0, 0, FLAGS, "range"}, @@ -41,18 +60,87 @@ static const AVOption setrange_options[] = { {"full", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range"}, {"pc", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range"}, {"jpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range"}, + + {"color_primaries", "select color primaries", OFFSET(color_primaries), AV_OPT_TYPE_INT, {.i64=-1}, -1, AVCOL_PRI_NB-1, FLAGS, "color_primaries"}, + {"auto", "keep the same color primaries", 0, AV_OPT_TYPE_CONST, {.i64=-1}, INT_MIN, INT_MAX, FLAGS, "color_primaries"}, + {"bt709", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_BT709}, INT_MIN, INT_MAX, FLAGS, "color_primaries"}, + {"unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_UNSPECIFIED}, INT_MIN, INT_MAX, FLAGS, "color_primaries"}, + {"bt470m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_BT470M}, INT_MIN, INT_MAX, FLAGS, "color_primaries"}, + {"bt470bg", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_BT470BG}, INT_MIN, INT_MAX, FLAGS, "color_primaries"}, + {"smpte170m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_SMPTE170M}, INT_MIN, INT_MAX, FLAGS, "color_primaries"}, + {"smpte240m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_SMPTE240M}, INT_MIN, INT_MAX, FLAGS, "color_primaries"}, + {"film", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_FILM}, INT_MIN, INT_MAX, FLAGS, "color_primaries"}, + {"bt2020", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_BT2020}, INT_MIN, INT_MAX, FLAGS, "color_primaries"}, + {"smpte428", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_SMPTE428}, INT_MIN, INT_MAX, FLAGS, "color_primaries"}, + {"smpte431", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_SMPTE431}, INT_MIN, INT_MAX, FLAGS, "color_primaries"}, + {"smpte432", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_SMPTE432}, INT_MIN, INT_MAX, FLAGS, "color_primaries"}, + {"jedec-p22", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_PRI_JEDEC_P22}, INT_MIN, INT_MAX, FLAGS, "color_primaries"}, + + {"color_trc", "select color transfer", OFFSET(color_trc), AV_OPT_TYPE_INT, {.i64=-1}, -1, AVCOL_TRC_NB-1, FLAGS, "color_trc"}, + {"auto", "keep the same color transfer", 0, AV_OPT_TYPE_CONST, {.i64=-1}, INT_MIN, INT_MAX, FLAGS, "color_trc"}, + {"bt709", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_BT709}, INT_MIN, INT_MAX, FLAGS, "color_trc"}, + {"unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_UNSPECIFIED}, INT_MIN, INT_MAX, FLAGS, "color_trc"}, + {"bt470m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_GAMMA22}, INT_MIN, INT_MAX, FLAGS, "color_trc"}, + {"bt470bg", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_GAMMA28}, INT_MIN, INT_MAX, FLAGS, "color_trc"}, + {"smpte170m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_SMPTE170M}, INT_MIN, INT_MAX, FLAGS, "color_trc"}, + {"smpte240m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_SMPTE240M}, INT_MIN, INT_MAX, FLAGS, "color_trc"}, + {"linear", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_LINEAR}, INT_MIN, INT_MAX, FLAGS, "color_trc"}, + {"log100", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_LOG}, INT_MIN, INT_MAX, FLAGS, "color_trc"}, + {"log316", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_LOG_SQRT}, INT_MIN, INT_MAX, FLAGS, "color_trc"}, + {"iec61966-2-4", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_IEC61966_2_4}, INT_MIN, INT_MAX, FLAGS, "color_trc"}, + {"bt1361e", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_BT1361_ECG}, INT_MIN, INT_MAX, FLAGS, "color_trc"}, + {"iec61966-2-1", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_IEC61966_2_1}, INT_MIN, INT_MAX, FLAGS, "color_trc"}, + {"bt2020-10", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_BT2020_10}, INT_MIN, INT_MAX, FLAGS, "color_trc"}, + {"bt2020-12", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_BT2020_12}, INT_MIN, INT_MAX, FLAGS, "color_trc"}, + {"smpte2084", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_SMPTE2084}, INT_MIN, INT_MAX, FLAGS, "color_trc"}, + {"smpte428", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_SMPTE428}, INT_MIN, INT_MAX, FLAGS, "color_trc"}, + {"arib-std-b67", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_TRC_ARIB_STD_B67}, INT_MIN, INT_MAX, FLAGS, "color_trc"}, + + {"colorspace", "select colorspace", OFFSET(colorspace), AV_OPT_TYPE_INT, {.i64=-1}, -1, AVCOL_SPC_NB-1, FLAGS, "colorspace"}, + {"auto", "keep the same colorspace", 0, AV_OPT_TYPE_CONST, {.i64=-1}, INT_MIN, INT_MAX, FLAGS, "colorspace"}, + {"gbr", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_RGB}, INT_MIN, INT_MAX, FLAGS, "colorspace"}, + {"bt709", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_BT709}, INT_MIN, INT_MAX, FLAGS, "colorspace"}, + {"unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_UNSPECIFIED}, INT_MIN, INT_MAX, FLAGS, "colorspace"}, + {"fcc", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_FCC}, INT_MIN, INT_MAX, FLAGS, "colorspace"}, + {"bt470bg", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_BT470BG}, INT_MIN, INT_MAX, FLAGS, "colorspace"}, + {"smpte170m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_SMPTE170M}, INT_MIN, INT_MAX, FLAGS, "colorspace"}, + {"smpte240m", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_SMPTE240M}, INT_MIN, INT_MAX, FLAGS, "colorspace"}, + {"ycgco", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_YCGCO}, INT_MIN, INT_MAX, FLAGS, "colorspace"}, + {"bt2020nc", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_BT2020_NCL}, INT_MIN, INT_MAX, FLAGS, "colorspace"}, + {"bt2020c", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_BT2020_CL}, INT_MIN, INT_MAX, FLAGS, "colorspace"}, + {"smpte2085", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_SMPTE2085}, INT_MIN, INT_MAX, FLAGS, "colorspace"}, + {"chroma-derived-nc", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_CHROMA_DERIVED_NCL},INT_MIN, INT_MAX, FLAGS, "colorspace"}, + {"chroma-derived-c", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_CHROMA_DERIVED_CL}, INT_MIN, INT_MAX, FLAGS, "colorspace"}, + {"ictcp", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_SPC_ICTCP}, INT_MIN, INT_MAX, FLAGS, "colorspace"}, {NULL} }; -AVFILTER_DEFINE_CLASS(setrange); +AVFILTER_DEFINE_CLASS(setparams); static int filter_frame(AVFilterLink *inlink, AVFrame *frame) { AVFilterContext *ctx = inlink->dst; SetParamsContext *s = ctx->priv; + /* set field */ + if (s->field_mode == MODE_PROG) { + frame->interlaced_frame = 0; + } else if (s->field_mode != MODE_AUTO) { + frame->interlaced_frame = 1; + frame->top_field_first = s->field_mode; + } + + /* set range */ if (s->color_range >= 0) frame->color_range = s->color_range; + + /* set color prim, trc, space */ + if (s->color_primaries >= 0) + frame->color_primaries = s->color_primaries; + if (s->color_trc >= 0) + frame->color_trc = s->color_trc; + if (s->colorspace >= 0) + frame->colorspace = s->colorspace; return ff_filter_frame(ctx->outputs[0], frame); } @@ -73,11 +161,85 @@ static const AVFilterPad outputs[] = { { NULL } }; +AVFilter ff_vf_setparams = { + .name = "setparams", + .description = NULL_IF_CONFIG_SMALL("Force field, or color property for the output video frame."), + .priv_size = sizeof(SetParamsContext), + .priv_class = &setparams_class, + .inputs = inputs, + .outputs = outputs, +}; + +#if CONFIG_SETRANGE_FILTER + +static const AVOption setrange_options[] = { + {"range", "select color range", OFFSET(color_range), AV_OPT_TYPE_INT, {.i64=-1},-1, AVCOL_RANGE_NB-1, FLAGS, "range"}, + {"auto", "keep the same color range", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, FLAGS, "range"}, + {"unspecified", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_UNSPECIFIED}, 0, 0, FLAGS, "range"}, + {"unknown", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_UNSPECIFIED}, 0, 0, FLAGS, "range"}, + {"limited", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range"}, + {"tv", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range"}, + {"mpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_MPEG}, 0, 0, FLAGS, "range"}, + {"full", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range"}, + {"pc", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range"}, + {"jpeg", NULL, 0, AV_OPT_TYPE_CONST, {.i64=AVCOL_RANGE_JPEG}, 0, 0, FLAGS, "range"}, + {NULL} +}; + +AVFILTER_DEFINE_CLASS(setrange); + +static av_cold int init_setrange(AVFilterContext *ctx) +{ + SetParamsContext *s = ctx->priv; + + s->field_mode = MODE_AUTO;/* set field mode to auto */ + s->color_primaries = -1; + s->color_trc = -1; + s->colorspace = -1; + return 0; +} + AVFilter ff_vf_setrange = { .name = "setrange", .description = NULL_IF_CONFIG_SMALL("Force color range for the output video frame."), .priv_size = sizeof(SetParamsContext), + .init = init_setrange, .priv_class = &setrange_class, .inputs = inputs, .outputs = outputs, }; +#endif /* CONFIG_SETRANGE_FILTER */ + +#if CONFIG_SETFIELD_FILTER +static const AVOption setfield_options[] = { + {"mode", "select interlace mode", OFFSET(field_mode), AV_OPT_TYPE_INT, {.i64=MODE_AUTO}, -1, MODE_PROG, FLAGS, "mode"}, + {"auto", "keep the same input field", 0, AV_OPT_TYPE_CONST, {.i64=MODE_AUTO}, INT_MIN, INT_MAX, FLAGS, "mode"}, + {"bff", "mark as bottom-field-first", 0, AV_OPT_TYPE_CONST, {.i64=MODE_BFF}, INT_MIN, INT_MAX, FLAGS, "mode"}, + {"tff", "mark as top-field-first", 0, AV_OPT_TYPE_CONST, {.i64=MODE_TFF}, INT_MIN, INT_MAX, FLAGS, "mode"}, + {"prog", "mark as progressive", 0, AV_OPT_TYPE_CONST, {.i64=MODE_PROG}, INT_MIN, INT_MAX, FLAGS, "mode"}, + {NULL} +}; + +AVFILTER_DEFINE_CLASS(setfield); + +static av_cold int init_setfield(AVFilterContext *ctx) +{ + SetParamsContext *s = ctx->priv; + + s->color_range = -1;/* set range mode to auto */ + s->color_primaries = -1; + s->color_trc = -1; + s->colorspace = -1; + return 0; +} + +AVFilter ff_vf_setfield = { + .name = "setfield", + .description = NULL_IF_CONFIG_SMALL("Force field for the output video frame."), + .priv_size = sizeof(SetParamsContext), + .init = init_setfield, + .priv_class = &setfield_class, + .inputs = inputs, + .outputs = outputs, +}; +#endif /* CONFIG_SETFIELD_FILTER */ diff --git a/libavfilter/vf_showinfo.c b/libavfilter/vf_showinfo.c index d1d1415c0b791..37e73b60aaabd 100644 --- a/libavfilter/vf_showinfo.c +++ b/libavfilter/vf_showinfo.c @@ -32,6 +32,7 @@ #include "libavutil/spherical.h" #include "libavutil/stereo3d.h" #include "libavutil/timestamp.h" +#include "libavutil/timecode.h" #include "avfilter.h" #include "internal.h" @@ -94,6 +95,39 @@ static void dump_stereo3d(AVFilterContext *ctx, AVFrameSideData *sd) av_log(ctx, AV_LOG_INFO, " (inverted)"); } +static void dump_color_property(AVFilterContext *ctx, AVFrame *frame) +{ + const char *color_range_str = av_color_range_name(frame->color_range); + const char *colorspace_str = av_color_space_name(frame->colorspace); + const char *color_primaries_str = av_color_primaries_name(frame->color_primaries); + const char *color_trc_str = av_color_transfer_name(frame->color_trc); + + if (!color_range_str || frame->color_range == AVCOL_RANGE_UNSPECIFIED) { + av_log(ctx, AV_LOG_INFO, "color_range:unknown"); + } else { + av_log(ctx, AV_LOG_INFO, "color_range:%s", color_range_str); + } + + if (!colorspace_str || frame->colorspace == AVCOL_SPC_UNSPECIFIED) { + av_log(ctx, AV_LOG_INFO, " color_space:unknown"); + } else { + av_log(ctx, AV_LOG_INFO, " color_space:%s", colorspace_str); + } + + if (!color_primaries_str || frame->color_primaries == AVCOL_PRI_UNSPECIFIED) { + av_log(ctx, AV_LOG_INFO, " color_primaries:unknown"); + } else { + av_log(ctx, AV_LOG_INFO, " color_primaries:%s", color_primaries_str); + } + + if (!color_trc_str || frame->color_trc == AVCOL_TRC_UNSPECIFIED) { + av_log(ctx, AV_LOG_INFO, " color_trc:unknown"); + } else { + av_log(ctx, AV_LOG_INFO, " color_trc:%s", color_trc_str); + } + av_log(ctx, AV_LOG_INFO, "\n"); +} + static void update_sample_stats(const uint8_t *src, int len, int64_t *sum, int64_t *sum2) { int i; @@ -174,6 +208,15 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame) case AV_FRAME_DATA_STEREO3D: dump_stereo3d(ctx, sd); break; + case AV_FRAME_DATA_S12M_TIMECODE: { + uint32_t *tc = (uint32_t*)sd->data; + for (int j = 1; j < tc[0]; j++) { + char tcbuf[AV_TIMECODE_STR_SIZE]; + av_timecode_make_smpte_tc_string(tcbuf, tc[j], 0); + av_log(ctx, AV_LOG_INFO, "timecode - %s%s", tcbuf, j != tc[0] - 1 ? ", " : ""); + } + break; + } case AV_FRAME_DATA_DISPLAYMATRIX: av_log(ctx, AV_LOG_INFO, "displaymatrix: rotation of %.2f degrees", av_display_rotation_get((int32_t *)sd->data)); @@ -190,6 +233,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame) av_log(ctx, AV_LOG_INFO, "\n"); } + dump_color_property(ctx, frame); + return ff_filter_frame(inlink->dst->outputs[0], frame); } diff --git a/libavfilter/vf_stack.c b/libavfilter/vf_stack.c index b2b8c68041d21..8731674aa71b4 100644 --- a/libavfilter/vf_stack.c +++ b/libavfilter/vf_stack.c @@ -29,14 +29,23 @@ #include "framesync.h" #include "video.h" +typedef struct StackItem { + int x[4], y[4]; + int linesize[4]; + int height[4]; +} StackItem; + typedef struct StackContext { const AVClass *class; const AVPixFmtDescriptor *desc; int nb_inputs; + char *layout; int shortest; int is_vertical; + int is_horizontal; int nb_planes; + StackItem *items; AVFrame **frames; FFFrameSync fs; } StackContext; @@ -66,10 +75,19 @@ static av_cold int init(AVFilterContext *ctx) if (!strcmp(ctx->filter->name, "vstack")) s->is_vertical = 1; + if (!strcmp(ctx->filter->name, "hstack")) + s->is_horizontal = 1; + s->frames = av_calloc(s->nb_inputs, sizeof(*s->frames)); if (!s->frames) return AVERROR(ENOMEM); + if (!strcmp(ctx->filter->name, "xstack")) { + s->items = av_calloc(s->nb_inputs, sizeof(*s->items)); + if (!s->items) + return AVERROR(ENOMEM); + } + for (i = 0; i < s->nb_inputs; i++) { AVFilterPad pad = { 0 }; @@ -112,13 +130,15 @@ static int process_frame(FFFrameSync *fs) int linesize[4]; int height[4]; - if ((ret = av_image_fill_linesizes(linesize, inlink->format, inlink->w)) < 0) { - av_frame_free(&out); - return ret; - } + if (s->is_horizontal || s->is_vertical) { + if ((ret = av_image_fill_linesizes(linesize, inlink->format, inlink->w)) < 0) { + av_frame_free(&out); + return ret; + } - height[1] = height[2] = AV_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h); - height[0] = height[3] = inlink->h; + height[1] = height[2] = AV_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h); + height[0] = height[3] = inlink->h; + } for (p = 0; p < s->nb_planes; p++) { if (s->is_vertical) { @@ -128,13 +148,21 @@ static int process_frame(FFFrameSync *fs) in[i]->linesize[p], linesize[p], height[p]); offset[p] += height[p]; - } else { + } else if (s->is_horizontal) { av_image_copy_plane(out->data[p] + offset[p], out->linesize[p], in[i]->data[p], in[i]->linesize[p], linesize[p], height[p]); offset[p] += linesize[p]; + } else { + StackItem *item = &s->items[i]; + + av_image_copy_plane(out->data[p] + out->linesize[p] * item->y[p] + item->x[p], + out->linesize[p], + in[i]->data[p], + in[i]->linesize[p], + item->linesize[p], item->height[p]); } } } @@ -154,6 +182,10 @@ static int config_output(AVFilterLink *outlink) FFFrameSyncIn *in; int i, ret; + s->desc = av_pix_fmt_desc_get(outlink->format); + if (!s->desc) + return AVERROR_BUG; + if (s->is_vertical) { for (i = 1; i < s->nb_inputs; i++) { if (ctx->inputs[i]->w != width) { @@ -162,7 +194,7 @@ static int config_output(AVFilterLink *outlink) } height += ctx->inputs[i]->h; } - } else { + } else if (s->is_horizontal) { for (i = 1; i < s->nb_inputs; i++) { if (ctx->inputs[i]->h != height) { av_log(ctx, AV_LOG_ERROR, "Input %d height %d does not match input %d height %d.\n", i, ctx->inputs[i]->h, 0, height); @@ -170,11 +202,81 @@ static int config_output(AVFilterLink *outlink) } width += ctx->inputs[i]->w; } + } else { + char *arg, *p = s->layout, *saveptr = NULL; + char *arg2, *p2, *saveptr2 = NULL; + char *arg3, *p3, *saveptr3 = NULL; + int inw, inh, size; + + for (i = 0; i < s->nb_inputs; i++) { + AVFilterLink *inlink = ctx->inputs[i]; + StackItem *item = &s->items[i]; + + if (!(arg = av_strtok(p, "|", &saveptr))) + return AVERROR(EINVAL); + + p = NULL; + + if ((ret = av_image_fill_linesizes(item->linesize, inlink->format, inlink->w)) < 0) { + return ret; + } + + item->height[1] = item->height[2] = AV_CEIL_RSHIFT(inlink->h, s->desc->log2_chroma_h); + item->height[0] = item->height[3] = inlink->h; + + p2 = arg; + inw = inh = 0; + + for (int j = 0; j < 2; j++) { + if (!(arg2 = av_strtok(p2, "_", &saveptr2))) + return AVERROR(EINVAL); + + p2 = NULL; + p3 = arg2; + while ((arg3 = av_strtok(p3, "+", &saveptr3))) { + p3 = NULL; + if (sscanf(arg3, "w%d", &size) == 1) { + if (size == i || size < 0 || size >= s->nb_inputs) + return AVERROR(EINVAL); + + if (!j) + inw += ctx->inputs[size]->w; + else + inh += ctx->inputs[size]->w; + } else if (sscanf(arg3, "h%d", &size) == 1) { + if (size == i || size < 0 || size >= s->nb_inputs) + return AVERROR(EINVAL); + + if (!j) + inw += ctx->inputs[size]->h; + else + inh += ctx->inputs[size]->h; + } else if (sscanf(arg3, "%d", &size) == 1) { + if (size < 0) + return AVERROR(EINVAL); + + if (!j) + inw += size; + else + inh += size; + } else { + return AVERROR(EINVAL); + } + } + } + + if ((ret = av_image_fill_linesizes(item->x, inlink->format, inw)) < 0) { + return ret; + } + + item->y[1] = item->y[2] = AV_CEIL_RSHIFT(inh, s->desc->log2_chroma_h); + item->y[0] = item->y[3] = inh; + + width = FFMAX(width, inlink->w + inw); + height = FFMAX(height, inlink->h + inh); + } } - s->desc = av_pix_fmt_desc_get(outlink->format); - if (!s->desc) - return AVERROR_BUG; s->nb_planes = av_pix_fmt_count_planes(outlink->format); outlink->w = width; @@ -209,6 +311,7 @@ static av_cold void uninit(AVFilterContext *ctx) ff_framesync_uninit(&s->fs); av_freep(&s->frames); + av_freep(&s->items); for (i = 0; i < ctx->nb_inputs; i++) av_freep(&ctx->input_pads[i].name); @@ -276,3 +379,29 @@ AVFilter ff_vf_vstack = { }; #endif /* CONFIG_VSTACK_FILTER */ + +#if CONFIG_XSTACK_FILTER + +static const AVOption xstack_options[] = { + { "inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64=2}, 2, INT_MAX, .flags = FLAGS }, + { "layout", "set custom layout", OFFSET(layout), AV_OPT_TYPE_STRING, {.str="0_0|w0_0"}, 0, 0, .flags = FLAGS }, + { "shortest", "force termination when the shortest input terminates", OFFSET(shortest), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, .flags = FLAGS }, + { NULL }, +}; + +AVFILTER_DEFINE_CLASS(xstack); + +AVFilter ff_vf_xstack = { + .name = "xstack", + .description = NULL_IF_CONFIG_SMALL("Stack video inputs into custom layout."), + .priv_size = sizeof(StackContext), + .priv_class = &xstack_class, + .query_formats = query_formats, + .outputs = outputs, + .init = init, + .uninit = uninit, + .activate = activate, + .flags = AVFILTER_FLAG_DYNAMIC_INPUTS, +}; + +#endif /* CONFIG_XSTACK_FILTER */ diff --git a/libavfilter/vf_thumbnail_cuda.c b/libavfilter/vf_thumbnail_cuda.c index 09377ca7f400c..53df7e0bf734a 100644 --- a/libavfilter/vf_thumbnail_cuda.c +++ b/libavfilter/vf_thumbnail_cuda.c @@ -23,7 +23,7 @@ #include #include "libavutil/hwcontext.h" -#include "libavutil/hwcontext_cuda_internal.h" +#include "libavutil/hwcontext_cuda.h" #include "libavutil/opt.h" #include "libavutil/pixdesc.h" diff --git a/libavfilter/vf_tpad.c b/libavfilter/vf_tpad.c new file mode 100644 index 0000000000000..86e063090bf71 --- /dev/null +++ b/libavfilter/vf_tpad.c @@ -0,0 +1,218 @@ +/* + * Copyright (c) 2018 Paul B Mahol + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/avassert.h" +#include "libavutil/channel_layout.h" +#include "libavutil/opt.h" +#include "avfilter.h" +#include "audio.h" +#include "filters.h" +#include "internal.h" +#include "formats.h" +#include "drawutils.h" + +typedef struct TPadContext { + const AVClass *class; + int pad_start; + int pad_stop; + int start_mode; + int stop_mode; + int64_t start_duration; + int64_t stop_duration; + uint8_t rgba_color[4]; ///< color for the padding area + + FFDrawContext draw; + FFDrawColor color; + int64_t pts; + int eof; + AVFrame *cache_start; + AVFrame *cache_stop; +} TPadContext; + +#define OFFSET(x) offsetof(TPadContext, x) +#define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +static const AVOption tpad_options[] = { + { "start", "set the number of frames to delay input", OFFSET(pad_start), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, VF }, + { "stop", "set the number of frames to add after input finished", OFFSET(pad_stop), AV_OPT_TYPE_INT, {.i64=0}, -1, INT_MAX, VF }, + { "start_mode", "set the mode of added frames to start", OFFSET(start_mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" }, + { "add", "add solid-color frames", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, VF, "mode" }, + { "clone", "clone first/last frame", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, VF, "mode" }, + { "stop_mode", "set the mode of added frames to end", OFFSET(stop_mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" }, + { "start_duration", "set the duration to delay input", OFFSET(start_duration), AV_OPT_TYPE_DURATION, {.i64=0}, 0, INT64_MAX, VF }, + { "stop_duration", "set the duration to pad input", OFFSET(stop_duration), AV_OPT_TYPE_DURATION, {.i64=0}, 0, INT64_MAX, VF }, + { "color", "set the color of the added frames", OFFSET(rgba_color), AV_OPT_TYPE_COLOR, {.str="black"}, 0, 0, VF }, + { NULL } +}; + +AVFILTER_DEFINE_CLASS(tpad); + +static int query_formats(AVFilterContext *ctx) +{ + return ff_set_common_formats(ctx, ff_draw_supported_pixel_formats(0)); +} + +static int activate(AVFilterContext *ctx) +{ + AVFilterLink *inlink = ctx->inputs[0]; + AVFilterLink *outlink = ctx->outputs[0]; + TPadContext *s = ctx->priv; + AVFrame *frame = NULL; + int ret, status; + int64_t pts; + + FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink); + + if (s->start_mode == 0 && s->pad_start > 0 && ff_outlink_frame_wanted(outlink)) { + frame = ff_get_video_buffer(outlink, outlink->w, outlink->h); + if (!frame) + return AVERROR(ENOMEM); + ff_fill_rectangle(&s->draw, &s->color, + frame->data, frame->linesize, + 0, 0, frame->width, frame->height); + frame->pts = s->pts; + s->pts += av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base); + s->pad_start--; + return ff_filter_frame(outlink, frame); + } + + if (s->start_mode == 1 && s->pad_start > 0) { + if (!s->cache_start && ff_inlink_queued_frames(inlink)) { + s->cache_start = ff_inlink_peek_frame(inlink, 0); + } else if (!s->cache_start) { + FF_FILTER_FORWARD_WANTED(outlink, inlink); + } + frame = av_frame_clone(s->cache_start); + if (!frame) + return AVERROR(ENOMEM); + frame->pts = s->pts; + s->pts += av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base); + s->pad_start--; + if (s->pad_start == 0) + s->cache_start = NULL; + return ff_filter_frame(outlink, frame); + } + + if (!s->eof && !s->pad_start) { + ret = ff_inlink_consume_frame(inlink, &frame); + if (ret < 0) + return ret; + if (ret > 0) { + if (s->stop_mode == 1 && s->pad_stop != 0) { + av_frame_free(&s->cache_stop); + s->cache_stop = av_frame_clone(frame); + } + frame->pts += s->pts; + return ff_filter_frame(outlink, frame); + } + } + + if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) { + if (status == AVERROR_EOF) { + if (!s->pad_stop) { + ff_outlink_set_status(outlink, status, pts); + return 0; + } + s->eof = 1; + s->pts += pts; + } + } + + if (s->eof) { + if (!s->pad_stop) { + ff_outlink_set_status(outlink, AVERROR_EOF, s->pts); + return 0; + } + if (s->stop_mode == 0) { + frame = ff_get_video_buffer(outlink, outlink->w, outlink->h); + if (!frame) + return AVERROR(ENOMEM); + ff_fill_rectangle(&s->draw, &s->color, + frame->data, frame->linesize, + 0, 0, frame->width, frame->height); + } else if (s->stop_mode == 1) { + frame = av_frame_clone(s->cache_stop); + if (!frame) + return AVERROR(ENOMEM); + } + frame->pts = s->pts; + s->pts += av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base); + if (s->pad_stop > 0) + s->pad_stop--; + return ff_filter_frame(outlink, frame); + } + + if (!s->pad_start) + FF_FILTER_FORWARD_WANTED(outlink, inlink); + + return FFERROR_NOT_READY; +} + +static int config_input(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->dst; + TPadContext *s = ctx->priv; + + ff_draw_init(&s->draw, inlink->format, 0); + ff_draw_color(&s->draw, &s->color, s->rgba_color); + + if (s->start_duration) + s->pad_start = av_rescale_q(s->start_duration, inlink->frame_rate, av_inv_q(AV_TIME_BASE_Q)); + if (s->stop_duration) + s->pad_stop = av_rescale_q(s->stop_duration, inlink->frame_rate, av_inv_q(AV_TIME_BASE_Q)); + + return 0; +} + +static void uninit(AVFilterContext *ctx) +{ + TPadContext *s = ctx->priv; + + av_frame_free(&s->cache_stop); +} + +static const AVFilterPad tpad_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .config_props = config_input, + }, + { NULL } +}; + +static const AVFilterPad tpad_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +AVFilter ff_vf_tpad = { + .name = "tpad", + .description = NULL_IF_CONFIG_SMALL("Temporarily pad video frames."), + .priv_size = sizeof(TPadContext), + .priv_class = &tpad_class, + .query_formats = query_formats, + .activate = activate, + .uninit = uninit, + .inputs = tpad_inputs, + .outputs = tpad_outputs, +}; diff --git a/libavfilter/vf_vibrance.c b/libavfilter/vf_vibrance.c new file mode 100644 index 0000000000000..1e5bf26512f37 --- /dev/null +++ b/libavfilter/vf_vibrance.c @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2018 Paul B Mahol + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/opt.h" +#include "libavutil/imgutils.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" +#include "video.h" + +typedef struct VibranceContext { + const AVClass *class; + + float intensity; + float balance[3]; + float lcoeffs[3]; + + int depth; + + int (*do_slice)(AVFilterContext *s, void *arg, + int jobnr, int nb_jobs); +} VibranceContext; + +static inline float lerpf(float v0, float v1, float f) +{ + return v0 + (v1 - v0) * f; +} + +static int vibrance_slice8(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs) +{ + VibranceContext *s = avctx->priv; + AVFrame *frame = arg; + const int width = frame->width; + const int height = frame->height; + const float gc = s->lcoeffs[0]; + const float bc = s->lcoeffs[1]; + const float rc = s->lcoeffs[2]; + const float intensity = s->intensity; + const float gintensity = intensity * s->balance[0]; + const float bintensity = intensity * s->balance[1]; + const float rintensity = intensity * s->balance[2]; + const int slice_start = (height * jobnr) / nb_jobs; + const int slice_end = (height * (jobnr + 1)) / nb_jobs; + const int glinesize = frame->linesize[0]; + const int blinesize = frame->linesize[1]; + const int rlinesize = frame->linesize[2]; + uint8_t *gptr = frame->data[0] + slice_start * glinesize; + uint8_t *bptr = frame->data[1] + slice_start * blinesize; + uint8_t *rptr = frame->data[2] + slice_start * rlinesize; + + for (int y = slice_start; y < slice_end; y++) { + for (int x = 0; x < width; x++) { + float g = gptr[x] / 255.f; + float b = bptr[x] / 255.f; + float r = rptr[x] / 255.f; + float max_color = FFMAX3(r, g, b); + float min_color = FFMIN3(r, g, b); + float color_saturation = max_color - min_color; + float luma = g * gc + r * rc + b * bc; + const float cg = 1.f + gintensity * (1.f - FFSIGN(gintensity) * color_saturation); + const float cb = 1.f + bintensity * (1.f - FFSIGN(bintensity) * color_saturation); + const float cr = 1.f + rintensity * (1.f - FFSIGN(rintensity) * color_saturation); + + g = lerpf(luma, g, cg); + b = lerpf(luma, b, cb); + r = lerpf(luma, r, cr); + + gptr[x] = av_clip_uint8(g * 255.f); + bptr[x] = av_clip_uint8(b * 255.f); + rptr[x] = av_clip_uint8(r * 255.f); + } + + gptr += glinesize; + bptr += blinesize; + rptr += rlinesize; + } + + return 0; +} + +static int vibrance_slice16(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs) +{ + VibranceContext *s = avctx->priv; + AVFrame *frame = arg; + const int depth = s->depth; + const float max = (1 << depth) - 1; + const float gc = s->lcoeffs[0]; + const float bc = s->lcoeffs[1]; + const float rc = s->lcoeffs[2]; + const int width = frame->width; + const int height = frame->height; + const float intensity = s->intensity; + const float gintensity = intensity * s->balance[0]; + const float bintensity = intensity * s->balance[1]; + const float rintensity = intensity * s->balance[2]; + const int slice_start = (height * jobnr) / nb_jobs; + const int slice_end = (height * (jobnr + 1)) / nb_jobs; + const int glinesize = frame->linesize[0] / 2; + const int blinesize = frame->linesize[1] / 2; + const int rlinesize = frame->linesize[2] / 2; + uint16_t *gptr = (uint16_t *)frame->data[0] + slice_start * glinesize; + uint16_t *bptr = (uint16_t *)frame->data[1] + slice_start * blinesize; + uint16_t *rptr = (uint16_t *)frame->data[2] + slice_start * rlinesize; + + for (int y = slice_start; y < slice_end; y++) { + for (int x = 0; x < width; x++) { + float g = gptr[x] / max; + float b = bptr[x] / max; + float r = rptr[x] / max; + float max_color = FFMAX3(r, g, b); + float min_color = FFMIN3(r, g, b); + float color_saturation = max_color - min_color; + float luma = g * gc + r * rc + b * bc; + const float cg = 1.f + gintensity * (1.f - FFSIGN(gintensity) * color_saturation); + const float cb = 1.f + bintensity * (1.f - FFSIGN(bintensity) * color_saturation); + const float cr = 1.f + rintensity * (1.f - FFSIGN(rintensity) * color_saturation); + + g = lerpf(luma, g, cg); + b = lerpf(luma, b, cb); + r = lerpf(luma, r, cr); + + gptr[x] = av_clip_uintp2_c(g * max, depth); + bptr[x] = av_clip_uintp2_c(b * max, depth); + rptr[x] = av_clip_uintp2_c(r * max, depth); + } + + gptr += glinesize; + bptr += blinesize; + rptr += rlinesize; + } + + return 0; +} + +static int filter_frame(AVFilterLink *link, AVFrame *frame) +{ + AVFilterContext *avctx = link->dst; + VibranceContext *s = avctx->priv; + int res; + + if (res = avctx->internal->execute(avctx, s->do_slice, frame, NULL, + FFMIN(frame->height, ff_filter_get_nb_threads(avctx)))) + return res; + + return ff_filter_frame(avctx->outputs[0], frame); +} + +static av_cold int query_formats(AVFilterContext *avctx) +{ + static const enum AVPixelFormat pixel_fmts[] = { + AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, + AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12, + AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16, + AV_PIX_FMT_GBRAP10, AV_PIX_FMT_GBRAP12, AV_PIX_FMT_GBRAP16, + AV_PIX_FMT_NONE + }; + + AVFilterFormats *formats = NULL; + + formats = ff_make_format_list(pixel_fmts); + if (!formats) + return AVERROR(ENOMEM); + + return ff_set_common_formats(avctx, formats); +} + +static av_cold int config_input(AVFilterLink *inlink) +{ + AVFilterContext *avctx = inlink->dst; + VibranceContext *s = avctx->priv; + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); + + s->depth = desc->comp[0].depth; + s->do_slice = s->depth <= 8 ? vibrance_slice8 : vibrance_slice16; + + return 0; +} + +static const AVFilterPad vibrance_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .needs_writable = 1, + .filter_frame = filter_frame, + .config_props = config_input, + }, + { NULL } +}; + +static const AVFilterPad vibrance_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + }, + { NULL } +}; + +#define OFFSET(x) offsetof(VibranceContext, x) +#define VF AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM + +static const AVOption vibrance_options[] = { + { "intensity", "set the intensity value", OFFSET(intensity), AV_OPT_TYPE_FLOAT, {.dbl=0}, -2, 2, VF }, + { "rbal", "set the red balance value", OFFSET(balance[2]), AV_OPT_TYPE_FLOAT, {.dbl=1}, -10, 10, VF }, + { "gbal", "set the green balance value", OFFSET(balance[0]), AV_OPT_TYPE_FLOAT, {.dbl=1}, -10, 10, VF }, + { "bbal", "set the blue balance value", OFFSET(balance[1]), AV_OPT_TYPE_FLOAT, {.dbl=1}, -10, 10, VF }, + { "rlum", "set the red luma coefficient", OFFSET(lcoeffs[2]), AV_OPT_TYPE_FLOAT, {.dbl=0.072186}, 0, 1, VF }, + { "glum", "set the green luma coefficient", OFFSET(lcoeffs[0]), AV_OPT_TYPE_FLOAT, {.dbl=0.715158}, 0, 1, VF }, + { "blum", "set the blue luma coefficient", OFFSET(lcoeffs[1]), AV_OPT_TYPE_FLOAT, {.dbl=0.212656}, 0, 1, VF }, + { NULL } +}; + +AVFILTER_DEFINE_CLASS(vibrance); + +AVFilter ff_vf_vibrance = { + .name = "vibrance", + .description = NULL_IF_CONFIG_SMALL("Boost or alter saturation."), + .priv_size = sizeof(VibranceContext), + .priv_class = &vibrance_class, + .query_formats = query_formats, + .inputs = vibrance_inputs, + .outputs = vibrance_outputs, + .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS, +}; diff --git a/libavfilter/vf_waveform.c b/libavfilter/vf_waveform.c index bcee57cf3b056..80336284070e6 100644 --- a/libavfilter/vf_waveform.c +++ b/libavfilter/vf_waveform.c @@ -102,6 +102,7 @@ typedef struct WaveformContext { int shift_w[4], shift_h[4]; GraticuleLines *glines; int nb_glines; + int rgb; int (*waveform_slice)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs); @@ -2610,17 +2611,18 @@ static void graticule_row(WaveformContext *s, AVFrame *out) const float o1 = s->opacity; const float o2 = 1. - o1; const int height = s->display == PARADE ? out->height / s->acomp : out->height; - int k = 0, c, p, l, offset_x = 0, offset_y = 0; + int C, k = 0, c, p, l, offset_x = 0, offset_y = 0; for (c = 0; c < s->ncomp; c++) { if (!((1 << c) & s->pcomp) || (!s->display && k > 0)) continue; k++; + C = s->rgb ? 0 : c; for (p = 0; p < s->ncomp; p++) { const int v = s->grat_yuva_color[p]; for (l = 0; l < s->nb_glines; l++) { - const uint16_t pos = s->glines[l].line[c].pos; + const uint16_t pos = s->glines[l].line[C].pos; int x = offset_x + (s->mirror ? s->size - 1 - pos : pos); uint8_t *dst = out->data[p] + offset_y * out->linesize[p] + x; @@ -2629,8 +2631,8 @@ static void graticule_row(WaveformContext *s, AVFrame *out) } for (l = 0; l < s->nb_glines && (s->flags & 1); l++) { - const char *name = s->glines[l].line[c].name; - const uint16_t pos = s->glines[l].line[c].pos; + const char *name = s->glines[l].line[C].name; + const uint16_t pos = s->glines[l].line[C].pos; int x = offset_x + (s->mirror ? s->size - 1 - pos : pos) - 10; if (x < 0) @@ -2651,17 +2653,18 @@ static void graticule16_row(WaveformContext *s, AVFrame *out) const float o2 = 1. - o1; const int mult = s->max / 256; const int height = s->display == PARADE ? out->height / s->acomp : out->height; - int k = 0, c, p, l, offset_x = 0, offset_y = 0; + int C, k = 0, c, p, l, offset_x = 0, offset_y = 0; for (c = 0; c < s->ncomp; c++) { if (!((1 << c) & s->pcomp) || (!s->display && k > 0)) continue; k++; + C = s->rgb ? 0 : c; for (p = 0; p < s->ncomp; p++) { const int v = s->grat_yuva_color[p] * mult; for (l = 0; l < s->nb_glines ; l++) { - const uint16_t pos = s->glines[l].line[c].pos; + const uint16_t pos = s->glines[l].line[C].pos; int x = offset_x + (s->mirror ? s->size - 1 - pos : pos); uint16_t *dst = (uint16_t *)(out->data[p] + offset_y * out->linesize[p]) + x; @@ -2670,8 +2673,8 @@ static void graticule16_row(WaveformContext *s, AVFrame *out) } for (l = 0; l < s->nb_glines && (s->flags & 1); l++) { - const char *name = s->glines[l].line[c].name; - const uint16_t pos = s->glines[l].line[c].pos; + const char *name = s->glines[l].line[C].name; + const uint16_t pos = s->glines[l].line[C].pos; int x = offset_x + (s->mirror ? s->size - 1 - pos : pos) - 10; if (x < 0) @@ -2691,17 +2694,18 @@ static void graticule_column(WaveformContext *s, AVFrame *out) const float o1 = s->opacity; const float o2 = 1. - o1; const int width = s->display == PARADE ? out->width / s->acomp : out->width; - int k = 0, c, p, l, offset_y = 0, offset_x = 0; + int C, k = 0, c, p, l, offset_y = 0, offset_x = 0; for (c = 0; c < s->ncomp; c++) { if ((!((1 << c) & s->pcomp) || (!s->display && k > 0))) continue; k++; + C = s->rgb ? 0 : c; for (p = 0; p < s->ncomp; p++) { const int v = s->grat_yuva_color[p]; for (l = 0; l < s->nb_glines ; l++) { - const uint16_t pos = s->glines[l].line[c].pos; + const uint16_t pos = s->glines[l].line[C].pos; int y = offset_y + (s->mirror ? s->size - 1 - pos : pos); uint8_t *dst = out->data[p] + y * out->linesize[p] + offset_x; @@ -2710,8 +2714,8 @@ static void graticule_column(WaveformContext *s, AVFrame *out) } for (l = 0; l < s->nb_glines && (s->flags & 1); l++) { - const char *name = s->glines[l].line[c].name; - const uint16_t pos = s->glines[l].line[c].pos; + const char *name = s->glines[l].line[C].name; + const uint16_t pos = s->glines[l].line[C].pos; int y = offset_y + (s->mirror ? s->size - 1 - pos : pos) - 10; if (y < 0) @@ -2732,17 +2736,18 @@ static void graticule16_column(WaveformContext *s, AVFrame *out) const float o2 = 1. - o1; const int mult = s->max / 256; const int width = s->display == PARADE ? out->width / s->acomp : out->width; - int k = 0, c, p, l, offset_x = 0, offset_y = 0; + int C, k = 0, c, p, l, offset_x = 0, offset_y = 0; for (c = 0; c < s->ncomp; c++) { if ((!((1 << c) & s->pcomp) || (!s->display && k > 0))) continue; k++; + C = s->rgb ? 0 : c; for (p = 0; p < s->ncomp; p++) { const int v = s->grat_yuva_color[p] * mult; for (l = 0; l < s->nb_glines ; l++) { - const uint16_t pos = s->glines[l].line[c].pos; + const uint16_t pos = s->glines[l].line[C].pos; int y = offset_y + (s->mirror ? s->size - 1 - pos : pos); uint16_t *dst = (uint16_t *)(out->data[p] + y * out->linesize[p]) + offset_x; @@ -2751,8 +2756,8 @@ static void graticule16_column(WaveformContext *s, AVFrame *out) } for (l = 0; l < s->nb_glines && (s->flags & 1); l++) { - const char *name = s->glines[l].line[c].name; - const uint16_t pos = s->glines[l].line[c].pos; + const char *name = s->glines[l].line[C].name; + const uint16_t pos = s->glines[l].line[C].pos; int y = offset_y + (s->mirror ? s->size - 1 - pos: pos) - 10; if (y < 0) @@ -2996,8 +3001,8 @@ static int config_input(AVFilterLink *inlink) case AV_PIX_FMT_GBRP9: case AV_PIX_FMT_GBRP10: case AV_PIX_FMT_GBRP12: + s->rgb = 1; memcpy(s->bg_color, black_gbrp_color, sizeof(s->bg_color)); - s->graticulef = graticule_none; break; default: memcpy(s->bg_color, black_yuva_color, sizeof(s->bg_color)); @@ -3020,6 +3025,9 @@ static int config_output(AVFilterLink *outlink) comp++; } s->acomp = comp; + if (s->acomp == 0) + return AVERROR(EINVAL); + s->odesc = av_pix_fmt_desc_get(outlink->format); s->dcomp = s->odesc->nb_components; diff --git a/libavfilter/vf_yadif.c b/libavfilter/vf_yadif.c index f58d8ac2bce32..3107924932a51 100644 --- a/libavfilter/vf_yadif.c +++ b/libavfilter/vf_yadif.c @@ -22,7 +22,6 @@ #include "libavutil/avassert.h" #include "libavutil/cpu.h" #include "libavutil/common.h" -#include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "libavutil/imgutils.h" #include "avfilter.h" @@ -254,166 +253,6 @@ static void filter(AVFilterContext *ctx, AVFrame *dstpic, emms_c(); } -static int return_frame(AVFilterContext *ctx, int is_second) -{ - YADIFContext *yadif = ctx->priv; - AVFilterLink *link = ctx->outputs[0]; - int tff, ret; - - if (yadif->parity == -1) { - tff = yadif->cur->interlaced_frame ? - yadif->cur->top_field_first : 1; - } else { - tff = yadif->parity ^ 1; - } - - if (is_second) { - yadif->out = ff_get_video_buffer(link, link->w, link->h); - if (!yadif->out) - return AVERROR(ENOMEM); - - av_frame_copy_props(yadif->out, yadif->cur); - yadif->out->interlaced_frame = 0; - } - - filter(ctx, yadif->out, tff ^ !is_second, tff); - - if (is_second) { - int64_t cur_pts = yadif->cur->pts; - int64_t next_pts = yadif->next->pts; - - if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) { - yadif->out->pts = cur_pts + next_pts; - } else { - yadif->out->pts = AV_NOPTS_VALUE; - } - } - ret = ff_filter_frame(ctx->outputs[0], yadif->out); - - yadif->frame_pending = (yadif->mode&1) && !is_second; - return ret; -} - -static int checkstride(YADIFContext *yadif, const AVFrame *a, const AVFrame *b) -{ - int i; - for (i = 0; i < yadif->csp->nb_components; i++) - if (a->linesize[i] != b->linesize[i]) - return 1; - return 0; -} - -static void fixstride(AVFilterLink *link, AVFrame *f) -{ - AVFrame *dst = ff_default_get_video_buffer(link, f->width, f->height); - if(!dst) - return; - av_frame_copy_props(dst, f); - av_image_copy(dst->data, dst->linesize, - (const uint8_t **)f->data, f->linesize, - dst->format, dst->width, dst->height); - av_frame_unref(f); - av_frame_move_ref(f, dst); - av_frame_free(&dst); -} - -static int filter_frame(AVFilterLink *link, AVFrame *frame) -{ - AVFilterContext *ctx = link->dst; - YADIFContext *yadif = ctx->priv; - - av_assert0(frame); - - if (yadif->frame_pending) - return_frame(ctx, 1); - - if (yadif->prev) - av_frame_free(&yadif->prev); - yadif->prev = yadif->cur; - yadif->cur = yadif->next; - yadif->next = frame; - - if (!yadif->cur && - !(yadif->cur = av_frame_clone(yadif->next))) - return AVERROR(ENOMEM); - - if (checkstride(yadif, yadif->next, yadif->cur)) { - av_log(ctx, AV_LOG_VERBOSE, "Reallocating frame due to differing stride\n"); - fixstride(link, yadif->next); - } - if (checkstride(yadif, yadif->next, yadif->cur)) - fixstride(link, yadif->cur); - if (yadif->prev && checkstride(yadif, yadif->next, yadif->prev)) - fixstride(link, yadif->prev); - if (checkstride(yadif, yadif->next, yadif->cur) || (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))) { - av_log(ctx, AV_LOG_ERROR, "Failed to reallocate frame\n"); - return -1; - } - - if (!yadif->prev) - return 0; - - if ((yadif->deint && !yadif->cur->interlaced_frame) || - ctx->is_disabled || - (yadif->deint && !yadif->prev->interlaced_frame && yadif->prev->repeat_pict) || - (yadif->deint && !yadif->next->interlaced_frame && yadif->next->repeat_pict) - ) { - yadif->out = av_frame_clone(yadif->cur); - if (!yadif->out) - return AVERROR(ENOMEM); - - av_frame_free(&yadif->prev); - if (yadif->out->pts != AV_NOPTS_VALUE) - yadif->out->pts *= 2; - return ff_filter_frame(ctx->outputs[0], yadif->out); - } - - yadif->out = ff_get_video_buffer(ctx->outputs[0], link->w, link->h); - if (!yadif->out) - return AVERROR(ENOMEM); - - av_frame_copy_props(yadif->out, yadif->cur); - yadif->out->interlaced_frame = 0; - - if (yadif->out->pts != AV_NOPTS_VALUE) - yadif->out->pts *= 2; - - return return_frame(ctx, 0); -} - -static int request_frame(AVFilterLink *link) -{ - AVFilterContext *ctx = link->src; - YADIFContext *yadif = ctx->priv; - int ret; - - if (yadif->frame_pending) { - return_frame(ctx, 1); - return 0; - } - - if (yadif->eof) - return AVERROR_EOF; - - ret = ff_request_frame(ctx->inputs[0]); - - if (ret == AVERROR_EOF && yadif->cur) { - AVFrame *next = av_frame_clone(yadif->next); - - if (!next) - return AVERROR(ENOMEM); - - next->pts = yadif->next->pts * 2 - yadif->cur->pts; - - filter_frame(ctx->inputs[0], next); - yadif->eof = 1; - } else if (ret < 0) { - return ret; - } - - return 0; -} - static av_cold void uninit(AVFilterContext *ctx) { YADIFContext *yadif = ctx->priv; @@ -492,6 +331,7 @@ static int config_props(AVFilterLink *link) } s->csp = av_pix_fmt_desc_get(link->format); + s->filter = filter; if (s->csp->comp[0].depth > 8) { s->filter_line = filter_line_c_16bit; s->filter_edges = filter_edges_16bit; @@ -507,37 +347,19 @@ static int config_props(AVFilterLink *link) } -#define OFFSET(x) offsetof(YADIFContext, x) -#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM - -#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit } - -static const AVOption yadif_options[] = { - { "mode", "specify the interlacing mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=YADIF_MODE_SEND_FRAME}, 0, 3, FLAGS, "mode"}, - CONST("send_frame", "send one frame for each frame", YADIF_MODE_SEND_FRAME, "mode"), - CONST("send_field", "send one frame for each field", YADIF_MODE_SEND_FIELD, "mode"), - CONST("send_frame_nospatial", "send one frame for each frame, but skip spatial interlacing check", YADIF_MODE_SEND_FRAME_NOSPATIAL, "mode"), - CONST("send_field_nospatial", "send one frame for each field, but skip spatial interlacing check", YADIF_MODE_SEND_FIELD_NOSPATIAL, "mode"), - - { "parity", "specify the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=YADIF_PARITY_AUTO}, -1, 1, FLAGS, "parity" }, - CONST("tff", "assume top field first", YADIF_PARITY_TFF, "parity"), - CONST("bff", "assume bottom field first", YADIF_PARITY_BFF, "parity"), - CONST("auto", "auto detect parity", YADIF_PARITY_AUTO, "parity"), - - { "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=YADIF_DEINT_ALL}, 0, 1, FLAGS, "deint" }, - CONST("all", "deinterlace all frames", YADIF_DEINT_ALL, "deint"), - CONST("interlaced", "only deinterlace frames marked as interlaced", YADIF_DEINT_INTERLACED, "deint"), - - { NULL } +static const AVClass yadif_class = { + .class_name = "yadif", + .item_name = av_default_item_name, + .option = ff_yadif_options, + .version = LIBAVUTIL_VERSION_INT, + .category = AV_CLASS_CATEGORY_FILTER, }; -AVFILTER_DEFINE_CLASS(yadif); - static const AVFilterPad avfilter_vf_yadif_inputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, - .filter_frame = filter_frame, + .filter_frame = ff_yadif_filter_frame, }, { NULL } }; @@ -546,7 +368,7 @@ static const AVFilterPad avfilter_vf_yadif_outputs[] = { { .name = "default", .type = AVMEDIA_TYPE_VIDEO, - .request_frame = request_frame, + .request_frame = ff_yadif_request_frame, .config_props = config_props, }, { NULL } diff --git a/libavfilter/vf_yadif_cuda.c b/libavfilter/vf_yadif_cuda.c new file mode 100644 index 0000000000000..be22344d9de22 --- /dev/null +++ b/libavfilter/vf_yadif_cuda.c @@ -0,0 +1,426 @@ +/* + * Copyright (C) 2018 Philip Langdale + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include +#include "libavutil/avassert.h" +#include "libavutil/hwcontext_cuda.h" +#include "internal.h" +#include "yadif.h" + +extern char vf_yadif_cuda_ptx[]; + +typedef struct DeintCUDAContext { + YADIFContext yadif; + + AVCUDADeviceContext *hwctx; + AVBufferRef *device_ref; + AVBufferRef *input_frames_ref; + AVHWFramesContext *input_frames; + + CUcontext cu_ctx; + CUstream stream; + CUmodule cu_module; + CUfunction cu_func_uchar; + CUfunction cu_func_uchar2; + CUfunction cu_func_ushort; + CUfunction cu_func_ushort2; +} DeintCUDAContext; + +#define DIV_UP(a, b) ( ((a) + (b) - 1) / (b) ) +#define ALIGN_UP(a, b) (((a) + (b) - 1) & ~((b) - 1)) +#define BLOCKX 32 +#define BLOCKY 16 + +static int check_cu(AVFilterContext *avctx, CUresult err, const char *func) +{ + const char *err_name; + const char *err_string; + + av_log(avctx, AV_LOG_TRACE, "Calling %s\n", func); + + if (err == CUDA_SUCCESS) + return 0; + + cuGetErrorName(err, &err_name); + cuGetErrorString(err, &err_string); + + av_log(avctx, AV_LOG_ERROR, "%s failed", func); + if (err_name && err_string) + av_log(avctx, AV_LOG_ERROR, " -> %s: %s", err_name, err_string); + av_log(avctx, AV_LOG_ERROR, "\n"); + + return AVERROR_EXTERNAL; +} + +#define CHECK_CU(x) check_cu(ctx, (x), #x) + +static CUresult call_kernel(AVFilterContext *ctx, CUfunction func, + CUdeviceptr prev, CUdeviceptr cur, CUdeviceptr next, + CUarray_format format, int channels, + int src_width, // Width is pixels per channel + int src_height, // Height is pixels per channel + int src_pitch, // Pitch is bytes + CUdeviceptr dst, + int dst_width, // Width is pixels per channel + int dst_height, // Height is pixels per channel + int dst_pitch, // Pitch is pixels per channel + int parity, int tff) +{ + DeintCUDAContext *s = ctx->priv; + CUtexObject tex_prev = 0, tex_cur = 0, tex_next = 0; + CUresult err; + int skip_spatial_check = s->yadif.mode&2; + + void *args[] = { &dst, &tex_prev, &tex_cur, &tex_next, + &dst_width, &dst_height, &dst_pitch, + &src_width, &src_height, &parity, &tff, + &skip_spatial_check }; + + CUDA_TEXTURE_DESC tex_desc = { + .filterMode = CU_TR_FILTER_MODE_POINT, + .flags = CU_TRSF_READ_AS_INTEGER, + }; + + CUDA_RESOURCE_DESC res_desc = { + .resType = CU_RESOURCE_TYPE_PITCH2D, + .res.pitch2D.format = format, + .res.pitch2D.numChannels = channels, + .res.pitch2D.width = src_width, + .res.pitch2D.height = src_height, + .res.pitch2D.pitchInBytes = src_pitch, + }; + + res_desc.res.pitch2D.devPtr = (CUdeviceptr)prev; + err = CHECK_CU(cuTexObjectCreate(&tex_prev, &res_desc, &tex_desc, NULL)); + if (err != CUDA_SUCCESS) { + goto exit; + } + + res_desc.res.pitch2D.devPtr = (CUdeviceptr)cur; + err = CHECK_CU(cuTexObjectCreate(&tex_cur, &res_desc, &tex_desc, NULL)); + if (err != CUDA_SUCCESS) { + goto exit; + } + + res_desc.res.pitch2D.devPtr = (CUdeviceptr)next; + err = CHECK_CU(cuTexObjectCreate(&tex_next, &res_desc, &tex_desc, NULL)); + if (err != CUDA_SUCCESS) { + goto exit; + } + + err = CHECK_CU(cuLaunchKernel(func, + DIV_UP(dst_width, BLOCKX), DIV_UP(dst_height, BLOCKY), 1, + BLOCKX, BLOCKY, 1, + 0, s->stream, args, NULL)); + +exit: + if (tex_prev) + CHECK_CU(cuTexObjectDestroy(tex_prev)); + if (tex_cur) + CHECK_CU(cuTexObjectDestroy(tex_cur)); + if (tex_next) + CHECK_CU(cuTexObjectDestroy(tex_next)); + + return err; +} + +static void filter(AVFilterContext *ctx, AVFrame *dst, + int parity, int tff) +{ + DeintCUDAContext *s = ctx->priv; + YADIFContext *y = &s->yadif; + CUcontext dummy; + CUresult err; + int i; + + err = CHECK_CU(cuCtxPushCurrent(s->cu_ctx)); + if (err != CUDA_SUCCESS) { + goto exit; + } + + for (i = 0; i < y->csp->nb_components; i++) { + CUfunction func; + CUarray_format format; + int pixel_size, channels; + const AVComponentDescriptor *comp = &y->csp->comp[i]; + + if (comp->plane < i) { + // We process planes as a whole, so don't reprocess + // them for additional components + continue; + } + + pixel_size = (comp->depth + comp->shift) / 8; + channels = comp->step / pixel_size; + if (pixel_size > 2 || channels > 2) { + av_log(ctx, AV_LOG_ERROR, "Unsupported pixel format: %s\n", y->csp->name); + goto exit; + } + switch (pixel_size) { + case 1: + func = channels == 1 ? s->cu_func_uchar : s->cu_func_uchar2; + format = CU_AD_FORMAT_UNSIGNED_INT8; + break; + case 2: + func = channels == 1 ? s->cu_func_ushort : s->cu_func_ushort2; + format = CU_AD_FORMAT_UNSIGNED_INT16; + break; + default: + av_log(ctx, AV_LOG_ERROR, "Unsupported pixel format: %s\n", y->csp->name); + goto exit; + } + av_log(ctx, AV_LOG_TRACE, + "Deinterlacing plane %d: pixel_size: %d channels: %d\n", + comp->plane, pixel_size, channels); + call_kernel(ctx, func, + (CUdeviceptr)y->prev->data[i], + (CUdeviceptr)y->cur->data[i], + (CUdeviceptr)y->next->data[i], + format, channels, + AV_CEIL_RSHIFT(y->cur->width, i ? y->csp->log2_chroma_w : 0), + AV_CEIL_RSHIFT(y->cur->height, i ? y->csp->log2_chroma_h : 0), + y->cur->linesize[i], + (CUdeviceptr)dst->data[i], + AV_CEIL_RSHIFT(dst->width, i ? y->csp->log2_chroma_w : 0), + AV_CEIL_RSHIFT(dst->height, i ? y->csp->log2_chroma_h : 0), + dst->linesize[i] / comp->step, + parity, tff); + } + + err = CHECK_CU(cuStreamSynchronize(s->stream)); + if (err != CUDA_SUCCESS) { + goto exit; + } + +exit: + CHECK_CU(cuCtxPopCurrent(&dummy)); + return; +} + +static av_cold void deint_cuda_uninit(AVFilterContext *ctx) +{ + CUcontext dummy; + DeintCUDAContext *s = ctx->priv; + YADIFContext *y = &s->yadif; + + if (s->cu_module) { + CHECK_CU(cuCtxPushCurrent(s->cu_ctx)); + CHECK_CU(cuModuleUnload(s->cu_module)); + CHECK_CU(cuCtxPopCurrent(&dummy)); + } + + av_frame_free(&y->prev); + av_frame_free(&y->cur); + av_frame_free(&y->next); + + av_buffer_unref(&s->device_ref); + s->hwctx = NULL; + av_buffer_unref(&s->input_frames_ref); + s->input_frames = NULL; +} + +static int deint_cuda_query_formats(AVFilterContext *ctx) +{ + enum AVPixelFormat pix_fmts[] = { + AV_PIX_FMT_CUDA, AV_PIX_FMT_NONE, + }; + int ret; + + if ((ret = ff_formats_ref(ff_make_format_list(pix_fmts), + &ctx->inputs[0]->out_formats)) < 0) + return ret; + if ((ret = ff_formats_ref(ff_make_format_list(pix_fmts), + &ctx->outputs[0]->in_formats)) < 0) + return ret; + + return 0; +} + +static int config_input(AVFilterLink *inlink) +{ + AVFilterContext *ctx = inlink->dst; + DeintCUDAContext *s = ctx->priv; + + if (!inlink->hw_frames_ctx) { + av_log(ctx, AV_LOG_ERROR, "A hardware frames reference is " + "required to associate the processing device.\n"); + return AVERROR(EINVAL); + } + + s->input_frames_ref = av_buffer_ref(inlink->hw_frames_ctx); + if (!s->input_frames_ref) { + av_log(ctx, AV_LOG_ERROR, "A input frames reference create " + "failed.\n"); + return AVERROR(ENOMEM); + } + s->input_frames = (AVHWFramesContext*)s->input_frames_ref->data; + + return 0; +} + +static int config_output(AVFilterLink *link) +{ + AVHWFramesContext *output_frames; + AVFilterContext *ctx = link->src; + DeintCUDAContext *s = ctx->priv; + YADIFContext *y = &s->yadif; + int ret = 0; + CUcontext dummy; + CUresult err; + + av_assert0(s->input_frames); + s->device_ref = av_buffer_ref(s->input_frames->device_ref); + if (!s->device_ref) { + av_log(ctx, AV_LOG_ERROR, "A device reference create " + "failed.\n"); + return AVERROR(ENOMEM); + } + s->hwctx = ((AVHWDeviceContext*)s->device_ref->data)->hwctx; + s->cu_ctx = s->hwctx->cuda_ctx; + s->stream = s->hwctx->stream; + + link->hw_frames_ctx = av_hwframe_ctx_alloc(s->device_ref); + if (!link->hw_frames_ctx) { + av_log(ctx, AV_LOG_ERROR, "Failed to create HW frame context " + "for output.\n"); + ret = AVERROR(ENOMEM); + goto exit; + } + + output_frames = (AVHWFramesContext*)link->hw_frames_ctx->data; + + output_frames->format = AV_PIX_FMT_CUDA; + output_frames->sw_format = s->input_frames->sw_format; + output_frames->width = ctx->inputs[0]->w; + output_frames->height = ctx->inputs[0]->h; + + output_frames->initial_pool_size = 4; + + ret = ff_filter_init_hw_frames(ctx, link, 10); + if (ret < 0) + goto exit; + + ret = av_hwframe_ctx_init(link->hw_frames_ctx); + if (ret < 0) { + av_log(ctx, AV_LOG_ERROR, "Failed to initialise CUDA frame " + "context for output: %d\n", ret); + goto exit; + } + + link->time_base.num = ctx->inputs[0]->time_base.num; + link->time_base.den = ctx->inputs[0]->time_base.den * 2; + link->w = ctx->inputs[0]->w; + link->h = ctx->inputs[0]->h; + + if(y->mode & 1) + link->frame_rate = av_mul_q(ctx->inputs[0]->frame_rate, + (AVRational){2, 1}); + + if (link->w < 3 || link->h < 3) { + av_log(ctx, AV_LOG_ERROR, "Video of less than 3 columns or lines is not supported\n"); + ret = AVERROR(EINVAL); + goto exit; + } + + y->csp = av_pix_fmt_desc_get(output_frames->sw_format); + y->filter = filter; + + err = CHECK_CU(cuCtxPushCurrent(s->cu_ctx)); + if (err != CUDA_SUCCESS) { + ret = AVERROR_EXTERNAL; + goto exit; + } + + err = CHECK_CU(cuModuleLoadData(&s->cu_module, vf_yadif_cuda_ptx)); + if (err != CUDA_SUCCESS) { + ret = AVERROR_INVALIDDATA; + goto exit; + } + + err = CHECK_CU(cuModuleGetFunction(&s->cu_func_uchar, s->cu_module, "yadif_uchar")); + if (err != CUDA_SUCCESS) { + ret = AVERROR_INVALIDDATA; + goto exit; + } + + err = CHECK_CU(cuModuleGetFunction(&s->cu_func_uchar2, s->cu_module, "yadif_uchar2")); + if (err != CUDA_SUCCESS) { + ret = AVERROR_INVALIDDATA; + goto exit; + } + + err= CHECK_CU(cuModuleGetFunction(&s->cu_func_ushort, s->cu_module, "yadif_ushort")); + if (err != CUDA_SUCCESS) { + ret = AVERROR_INVALIDDATA; + goto exit; + } + + err = CHECK_CU(cuModuleGetFunction(&s->cu_func_ushort2, s->cu_module, "yadif_ushort2")); + if (err != CUDA_SUCCESS) { + ret = AVERROR_INVALIDDATA; + goto exit; + } + +exit: + CHECK_CU(cuCtxPopCurrent(&dummy)); + + return ret; +} + +static const AVClass yadif_cuda_class = { + .class_name = "yadif_cuda", + .item_name = av_default_item_name, + .option = ff_yadif_options, + .version = LIBAVUTIL_VERSION_INT, + .category = AV_CLASS_CATEGORY_FILTER, +}; + +static const AVFilterPad deint_cuda_inputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .filter_frame = ff_yadif_filter_frame, + .config_props = config_input, + }, + { NULL } +}; + +static const AVFilterPad deint_cuda_outputs[] = { + { + .name = "default", + .type = AVMEDIA_TYPE_VIDEO, + .request_frame = ff_yadif_request_frame, + .config_props = config_output, + }, + { NULL } +}; + +AVFilter ff_vf_yadif_cuda = { + .name = "yadif_cuda", + .description = NULL_IF_CONFIG_SMALL("Deinterlace CUDA frames"), + .priv_size = sizeof(DeintCUDAContext), + .priv_class = &yadif_cuda_class, + .uninit = deint_cuda_uninit, + .query_formats = deint_cuda_query_formats, + .inputs = deint_cuda_inputs, + .outputs = deint_cuda_outputs, + .flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL, + .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE, +}; diff --git a/libavfilter/vf_yadif_cuda.cu b/libavfilter/vf_yadif_cuda.cu new file mode 100644 index 0000000000000..12e7e4a443cea --- /dev/null +++ b/libavfilter/vf_yadif_cuda.cu @@ -0,0 +1,299 @@ +/* + * Copyright (C) 2018 Philip Langdale + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +template +__inline__ __device__ T spatial_predictor(T a, T b, T c, T d, T e, T f, T g, + T h, T i, T j, T k, T l, T m, T n) +{ + int spatial_pred = (d + k)/2; + int spatial_score = abs(c - j) + abs(d - k) + abs(e - l); + + int score = abs(b - k) + abs(c - l) + abs(d - m); + if (score < spatial_score) { + spatial_pred = (c + l)/2; + spatial_score = score; + score = abs(a - l) + abs(b - m) + abs(c - n); + if (score < spatial_score) { + spatial_pred = (b + m)/2; + spatial_score = score; + } + } + score = abs(d - i) + abs(e - j) + abs(f - k); + if (score < spatial_score) { + spatial_pred = (e + j)/2; + spatial_score = score; + score = abs(e - h) + abs(f - i) + abs(g - j); + if (score < spatial_score) { + spatial_pred = (f + i)/2; + spatial_score = score; + } + } + return spatial_pred; +} + +__inline__ __device__ int max3(int a, int b, int c) +{ + int x = max(a, b); + return max(x, c); +} + +__inline__ __device__ int min3(int a, int b, int c) +{ + int x = min(a, b); + return min(x, c); +} + +template +__inline__ __device__ T temporal_predictor(T A, T B, T C, T D, T E, T F, + T G, T H, T I, T J, T K, T L, + T spatial_pred, bool skip_check) +{ + int p0 = (C + H) / 2; + int p1 = F; + int p2 = (D + I) / 2; + int p3 = G; + int p4 = (E + J) / 2; + + int tdiff0 = abs(D - I); + int tdiff1 = (abs(A - F) + abs(B - G)) / 2; + int tdiff2 = (abs(K - F) + abs(G - L)) / 2; + + int diff = max3(tdiff0, tdiff1, tdiff2); + + if (!skip_check) { + int maxi = max3(p2 - p3, p2 - p1, min(p0 - p1, p4 - p3)); + int mini = min3(p2 - p3, p2 - p1, max(p0 - p1, p4 - p3)); + diff = max3(diff, mini, -maxi); + } + + if (spatial_pred > p2 + diff) { + spatial_pred = p2 + diff; + } + if (spatial_pred < p2 - diff) { + spatial_pred = p2 - diff; + } + + return spatial_pred; +} + +template +__inline__ __device__ void yadif_single(T *dst, + cudaTextureObject_t prev, + cudaTextureObject_t cur, + cudaTextureObject_t next, + int dst_width, int dst_height, int dst_pitch, + int src_width, int src_height, + int parity, int tff, bool skip_spatial_check) +{ + // Identify location + int xo = blockIdx.x * blockDim.x + threadIdx.x; + int yo = blockIdx.y * blockDim.y + threadIdx.y; + + if (xo >= dst_width || yo >= dst_height) { + return; + } + + // Don't modify the primary field + if (yo % 2 == parity) { + dst[yo*dst_pitch+xo] = tex2D(cur, xo, yo); + return; + } + + // Calculate spatial prediction + T a = tex2D(cur, xo - 3, yo - 1); + T b = tex2D(cur, xo - 2, yo - 1); + T c = tex2D(cur, xo - 1, yo - 1); + T d = tex2D(cur, xo - 0, yo - 1); + T e = tex2D(cur, xo + 1, yo - 1); + T f = tex2D(cur, xo + 2, yo - 1); + T g = tex2D(cur, xo + 3, yo - 1); + + T h = tex2D(cur, xo - 3, yo + 1); + T i = tex2D(cur, xo - 2, yo + 1); + T j = tex2D(cur, xo - 1, yo + 1); + T k = tex2D(cur, xo - 0, yo + 1); + T l = tex2D(cur, xo + 1, yo + 1); + T m = tex2D(cur, xo + 2, yo + 1); + T n = tex2D(cur, xo + 3, yo + 1); + + T spatial_pred = + spatial_predictor(a, b, c, d, e, f, g, h, i, j, k, l, m, n); + + // Calculate temporal prediction + int is_second_field = !(parity ^ tff); + + cudaTextureObject_t prev2 = prev; + cudaTextureObject_t prev1 = is_second_field ? cur : prev; + cudaTextureObject_t next1 = is_second_field ? next : cur; + cudaTextureObject_t next2 = next; + + T A = tex2D(prev2, xo, yo - 1); + T B = tex2D(prev2, xo, yo + 1); + T C = tex2D(prev1, xo, yo - 2); + T D = tex2D(prev1, xo, yo + 0); + T E = tex2D(prev1, xo, yo + 2); + T F = tex2D(cur, xo, yo - 1); + T G = tex2D(cur, xo, yo + 1); + T H = tex2D(next1, xo, yo - 2); + T I = tex2D(next1, xo, yo + 0); + T J = tex2D(next1, xo, yo + 2); + T K = tex2D(next2, xo, yo - 1); + T L = tex2D(next2, xo, yo + 1); + + spatial_pred = temporal_predictor(A, B, C, D, E, F, G, H, I, J, K, L, + spatial_pred, skip_spatial_check); + + dst[yo*dst_pitch+xo] = spatial_pred; +} + +template +__inline__ __device__ void yadif_double(T *dst, + cudaTextureObject_t prev, + cudaTextureObject_t cur, + cudaTextureObject_t next, + int dst_width, int dst_height, int dst_pitch, + int src_width, int src_height, + int parity, int tff, bool skip_spatial_check) +{ + int xo = blockIdx.x * blockDim.x + threadIdx.x; + int yo = blockIdx.y * blockDim.y + threadIdx.y; + + if (xo >= dst_width || yo >= dst_height) { + return; + } + + if (yo % 2 == parity) { + // Don't modify the primary field + dst[yo*dst_pitch+xo] = tex2D(cur, xo, yo); + return; + } + + T a = tex2D(cur, xo - 3, yo - 1); + T b = tex2D(cur, xo - 2, yo - 1); + T c = tex2D(cur, xo - 1, yo - 1); + T d = tex2D(cur, xo - 0, yo - 1); + T e = tex2D(cur, xo + 1, yo - 1); + T f = tex2D(cur, xo + 2, yo - 1); + T g = tex2D(cur, xo + 3, yo - 1); + + T h = tex2D(cur, xo - 3, yo + 1); + T i = tex2D(cur, xo - 2, yo + 1); + T j = tex2D(cur, xo - 1, yo + 1); + T k = tex2D(cur, xo - 0, yo + 1); + T l = tex2D(cur, xo + 1, yo + 1); + T m = tex2D(cur, xo + 2, yo + 1); + T n = tex2D(cur, xo + 3, yo + 1); + + T spatial_pred; + spatial_pred.x = + spatial_predictor(a.x, b.x, c.x, d.x, e.x, f.x, g.x, h.x, i.x, j.x, k.x, l.x, m.x, n.x); + spatial_pred.y = + spatial_predictor(a.y, b.y, c.y, d.y, e.y, f.y, g.y, h.y, i.y, j.y, k.y, l.y, m.y, n.y); + + // Calculate temporal prediction + int is_second_field = !(parity ^ tff); + + cudaTextureObject_t prev2 = prev; + cudaTextureObject_t prev1 = is_second_field ? cur : prev; + cudaTextureObject_t next1 = is_second_field ? next : cur; + cudaTextureObject_t next2 = next; + + T A = tex2D(prev2, xo, yo - 1); + T B = tex2D(prev2, xo, yo + 1); + T C = tex2D(prev1, xo, yo - 2); + T D = tex2D(prev1, xo, yo + 0); + T E = tex2D(prev1, xo, yo + 2); + T F = tex2D(cur, xo, yo - 1); + T G = tex2D(cur, xo, yo + 1); + T H = tex2D(next1, xo, yo - 2); + T I = tex2D(next1, xo, yo + 0); + T J = tex2D(next1, xo, yo + 2); + T K = tex2D(next2, xo, yo - 1); + T L = tex2D(next2, xo, yo + 1); + + spatial_pred.x = + temporal_predictor(A.x, B.x, C.x, D.x, E.x, F.x, G.x, H.x, I.x, J.x, K.x, L.x, + spatial_pred.x, skip_spatial_check); + spatial_pred.y = + temporal_predictor(A.y, B.y, C.y, D.y, E.y, F.y, G.y, H.y, I.y, J.y, K.y, L.y, + spatial_pred.y, skip_spatial_check); + + dst[yo*dst_pitch+xo] = spatial_pred; +} + +extern "C" { + +__global__ void yadif_uchar(unsigned char *dst, + cudaTextureObject_t prev, + cudaTextureObject_t cur, + cudaTextureObject_t next, + int dst_width, int dst_height, int dst_pitch, + int src_width, int src_height, + int parity, int tff, bool skip_spatial_check) +{ + yadif_single(dst, prev, cur, next, + dst_width, dst_height, dst_pitch, + src_width, src_height, + parity, tff, skip_spatial_check); +} + +__global__ void yadif_ushort(unsigned short *dst, + cudaTextureObject_t prev, + cudaTextureObject_t cur, + cudaTextureObject_t next, + int dst_width, int dst_height, int dst_pitch, + int src_width, int src_height, + int parity, int tff, bool skip_spatial_check) +{ + yadif_single(dst, prev, cur, next, + dst_width, dst_height, dst_pitch, + src_width, src_height, + parity, tff, skip_spatial_check); +} + +__global__ void yadif_uchar2(uchar2 *dst, + cudaTextureObject_t prev, + cudaTextureObject_t cur, + cudaTextureObject_t next, + int dst_width, int dst_height, int dst_pitch, + int src_width, int src_height, + int parity, int tff, bool skip_spatial_check) +{ + yadif_double(dst, prev, cur, next, + dst_width, dst_height, dst_pitch, + src_width, src_height, + parity, tff, skip_spatial_check); +} + +__global__ void yadif_ushort2(ushort2 *dst, + cudaTextureObject_t prev, + cudaTextureObject_t cur, + cudaTextureObject_t next, + int dst_width, int dst_height, int dst_pitch, + int src_width, int src_height, + int parity, int tff, bool skip_spatial_check) +{ + yadif_double(dst, prev, cur, next, + dst_width, dst_height, dst_pitch, + src_width, src_height, + parity, tff, skip_spatial_check); +} + +} /* extern "C" */ diff --git a/libavfilter/window_func.h b/libavfilter/window_func.h index a94482c937942..1de8f1fbdb693 100644 --- a/libavfilter/window_func.h +++ b/libavfilter/window_func.h @@ -30,6 +30,7 @@ enum WindowFunc { WFUNC_RECT, WFUNC_HANNING, WFUNC_HAMMING, WFUNC_BLACKMAN, WFUNC_BHARRIS, WFUNC_BNUTTALL, WFUNC_SINE, WFUNC_NUTTALL, WFUNC_BHANN, WFUNC_LANCZOS, WFUNC_GAUSS, WFUNC_TUKEY, WFUNC_DOLPH, WFUNC_CAUCHY, WFUNC_PARZEN, WFUNC_POISSON, + WFUNC_BOHMAN, NB_WFUNC }; static inline void generate_window_func(float *lut, int N, int win_func, @@ -182,6 +183,14 @@ static inline void generate_window_func(float *lut, int N, int win_func, } *overlap = 0.75; break; + case WFUNC_BOHMAN: + for (n = 0; n < N; n++) { + double x = 2 * ((n / (double)(N - 1))) - 1.; + + lut[n] = (1 - fabs(x)) * cos(M_PI*fabs(x)) + 1./M_PI*sin(M_PI*fabs(x)); + } + *overlap = 0.75; + break; default: av_assert0(0); } diff --git a/libavfilter/yadif.h b/libavfilter/yadif.h index d23d1380d0089..32d6f4a0d4080 100644 --- a/libavfilter/yadif.h +++ b/libavfilter/yadif.h @@ -19,6 +19,7 @@ #ifndef AVFILTER_YADIF_H #define AVFILTER_YADIF_H +#include "libavutil/opt.h" #include "libavutil/pixdesc.h" #include "avfilter.h" @@ -54,6 +55,8 @@ typedef struct YADIFContext { AVFrame *prev; AVFrame *out; + void (*filter)(AVFilterContext *ctx, AVFrame *dstpic, int parity, int tff); + /** * Required alignment for filter_line */ @@ -71,4 +74,10 @@ typedef struct YADIFContext { void ff_yadif_init_x86(YADIFContext *yadif); +int ff_yadif_filter_frame(AVFilterLink *link, AVFrame *frame); + +int ff_yadif_request_frame(AVFilterLink *link); + +extern const AVOption ff_yadif_options[]; + #endif /* AVFILTER_YADIF_H */ diff --git a/libavfilter/yadif_common.c b/libavfilter/yadif_common.c new file mode 100644 index 0000000000000..19e8ac5281923 --- /dev/null +++ b/libavfilter/yadif_common.c @@ -0,0 +1,209 @@ +/* + * Copyright (C) 2006-2011 Michael Niedermayer + * 2010 James Darnley + + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/avassert.h" +#include "libavutil/imgutils.h" +#include "internal.h" +#include "yadif.h" + +static int return_frame(AVFilterContext *ctx, int is_second) +{ + YADIFContext *yadif = ctx->priv; + AVFilterLink *link = ctx->outputs[0]; + int tff, ret; + + if (yadif->parity == -1) { + tff = yadif->cur->interlaced_frame ? + yadif->cur->top_field_first : 1; + } else { + tff = yadif->parity ^ 1; + } + + if (is_second) { + yadif->out = ff_get_video_buffer(link, link->w, link->h); + if (!yadif->out) + return AVERROR(ENOMEM); + + av_frame_copy_props(yadif->out, yadif->cur); + yadif->out->interlaced_frame = 0; + } + + yadif->filter(ctx, yadif->out, tff ^ !is_second, tff); + + if (is_second) { + int64_t cur_pts = yadif->cur->pts; + int64_t next_pts = yadif->next->pts; + + if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) { + yadif->out->pts = cur_pts + next_pts; + } else { + yadif->out->pts = AV_NOPTS_VALUE; + } + } + ret = ff_filter_frame(ctx->outputs[0], yadif->out); + + yadif->frame_pending = (yadif->mode&1) && !is_second; + return ret; +} + +static int checkstride(YADIFContext *yadif, const AVFrame *a, const AVFrame *b) +{ + int i; + for (i = 0; i < yadif->csp->nb_components; i++) + if (a->linesize[i] != b->linesize[i]) + return 1; + return 0; +} + +static void fixstride(AVFilterLink *link, AVFrame *f) +{ + AVFrame *dst = ff_default_get_video_buffer(link, f->width, f->height); + if(!dst) + return; + av_frame_copy_props(dst, f); + av_image_copy(dst->data, dst->linesize, + (const uint8_t **)f->data, f->linesize, + dst->format, dst->width, dst->height); + av_frame_unref(f); + av_frame_move_ref(f, dst); + av_frame_free(&dst); +} + +int ff_yadif_filter_frame(AVFilterLink *link, AVFrame *frame) +{ + AVFilterContext *ctx = link->dst; + YADIFContext *yadif = ctx->priv; + + av_assert0(frame); + + if (yadif->frame_pending) + return_frame(ctx, 1); + + if (yadif->prev) + av_frame_free(&yadif->prev); + yadif->prev = yadif->cur; + yadif->cur = yadif->next; + yadif->next = frame; + + if (!yadif->cur && + !(yadif->cur = av_frame_clone(yadif->next))) + return AVERROR(ENOMEM); + + if (checkstride(yadif, yadif->next, yadif->cur)) { + av_log(ctx, AV_LOG_VERBOSE, "Reallocating frame due to differing stride\n"); + fixstride(link, yadif->next); + } + if (checkstride(yadif, yadif->next, yadif->cur)) + fixstride(link, yadif->cur); + if (yadif->prev && checkstride(yadif, yadif->next, yadif->prev)) + fixstride(link, yadif->prev); + if (checkstride(yadif, yadif->next, yadif->cur) || (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))) { + av_log(ctx, AV_LOG_ERROR, "Failed to reallocate frame\n"); + return -1; + } + + if (!yadif->prev) + return 0; + + if ((yadif->deint && !yadif->cur->interlaced_frame) || + ctx->is_disabled || + (yadif->deint && !yadif->prev->interlaced_frame && yadif->prev->repeat_pict) || + (yadif->deint && !yadif->next->interlaced_frame && yadif->next->repeat_pict) + ) { + yadif->out = av_frame_clone(yadif->cur); + if (!yadif->out) + return AVERROR(ENOMEM); + + av_frame_free(&yadif->prev); + if (yadif->out->pts != AV_NOPTS_VALUE) + yadif->out->pts *= 2; + return ff_filter_frame(ctx->outputs[0], yadif->out); + } + + yadif->out = ff_get_video_buffer(ctx->outputs[0], link->w, link->h); + if (!yadif->out) + return AVERROR(ENOMEM); + + av_frame_copy_props(yadif->out, yadif->cur); + yadif->out->interlaced_frame = 0; + + if (yadif->out->pts != AV_NOPTS_VALUE) + yadif->out->pts *= 2; + + return return_frame(ctx, 0); +} + +int ff_yadif_request_frame(AVFilterLink *link) +{ + AVFilterContext *ctx = link->src; + YADIFContext *yadif = ctx->priv; + int ret; + + if (yadif->frame_pending) { + return_frame(ctx, 1); + return 0; + } + + if (yadif->eof) + return AVERROR_EOF; + + ret = ff_request_frame(ctx->inputs[0]); + + if (ret == AVERROR_EOF && yadif->cur) { + AVFrame *next = av_frame_clone(yadif->next); + + if (!next) + return AVERROR(ENOMEM); + + next->pts = yadif->next->pts * 2 - yadif->cur->pts; + + ff_yadif_filter_frame(ctx->inputs[0], next); + yadif->eof = 1; + } else if (ret < 0) { + return ret; + } + + return 0; +} + +#define OFFSET(x) offsetof(YADIFContext, x) +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM + +#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit } + +const AVOption ff_yadif_options[] = { + { "mode", "specify the interlacing mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=YADIF_MODE_SEND_FRAME}, 0, 3, FLAGS, "mode"}, + CONST("send_frame", "send one frame for each frame", YADIF_MODE_SEND_FRAME, "mode"), + CONST("send_field", "send one frame for each field", YADIF_MODE_SEND_FIELD, "mode"), + CONST("send_frame_nospatial", "send one frame for each frame, but skip spatial interlacing check", YADIF_MODE_SEND_FRAME_NOSPATIAL, "mode"), + CONST("send_field_nospatial", "send one frame for each field, but skip spatial interlacing check", YADIF_MODE_SEND_FIELD_NOSPATIAL, "mode"), + + { "parity", "specify the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=YADIF_PARITY_AUTO}, -1, 1, FLAGS, "parity" }, + CONST("tff", "assume top field first", YADIF_PARITY_TFF, "parity"), + CONST("bff", "assume bottom field first", YADIF_PARITY_BFF, "parity"), + CONST("auto", "auto detect parity", YADIF_PARITY_AUTO, "parity"), + + { "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=YADIF_DEINT_ALL}, 0, 1, FLAGS, "deint" }, + CONST("all", "deinterlace all frames", YADIF_DEINT_ALL, "deint"), + CONST("interlaced", "only deinterlace frames marked as interlaced", YADIF_DEINT_INTERLACED, "deint"), + + { NULL } +}; diff --git a/libavformat/Makefile b/libavformat/Makefile index e99e9150d586f..e4d997c4a0d28 100644 --- a/libavformat/Makefile +++ b/libavformat/Makefile @@ -411,6 +411,8 @@ OBJS-$(CONFIG_PCM_U32LE_DEMUXER) += pcmdec.o pcm.o OBJS-$(CONFIG_PCM_U32LE_MUXER) += pcmenc.o rawenc.o OBJS-$(CONFIG_PCM_U8_DEMUXER) += pcmdec.o pcm.o OBJS-$(CONFIG_PCM_U8_MUXER) += pcmenc.o rawenc.o +OBJS-$(CONFIG_PCM_VIDC_DEMUXER) += pcmdec.o pcm.o +OBJS-$(CONFIG_PCM_VIDC_MUXER) += pcmenc.o rawenc.o OBJS-$(CONFIG_PJS_DEMUXER) += pjsdec.o subtitles.o OBJS-$(CONFIG_PMP_DEMUXER) += pmpdec.o OBJS-$(CONFIG_PVA_DEMUXER) += pva.o diff --git a/libavformat/allformats.c b/libavformat/allformats.c index 9e41718685239..498077e1dedf3 100644 --- a/libavformat/allformats.c +++ b/libavformat/allformats.c @@ -289,6 +289,8 @@ extern AVInputFormat ff_pcm_alaw_demuxer; extern AVOutputFormat ff_pcm_alaw_muxer; extern AVInputFormat ff_pcm_mulaw_demuxer; extern AVOutputFormat ff_pcm_mulaw_muxer; +extern AVInputFormat ff_pcm_vidc_demuxer; +extern AVOutputFormat ff_pcm_vidc_muxer; extern AVInputFormat ff_pcm_f64be_demuxer; extern AVOutputFormat ff_pcm_f64be_muxer; extern AVInputFormat ff_pcm_f64le_demuxer; diff --git a/libavformat/apngdec.c b/libavformat/apngdec.c index f9a97e56813e1..53cdd4538e010 100644 --- a/libavformat/apngdec.c +++ b/libavformat/apngdec.c @@ -342,6 +342,10 @@ static int apng_read_packet(AVFormatContext *s, AVPacket *pkt) len = avio_rb32(pb); tag = avio_rl32(pb); + + if (avio_feof(pb)) + return AVERROR_EOF; + switch (tag) { case MKTAG('f', 'c', 'T', 'L'): if (len != 26) diff --git a/libavformat/dashenc.c b/libavformat/dashenc.c index 3f5f290e255f5..f8b3d106d5cfe 100644 --- a/libavformat/dashenc.c +++ b/libavformat/dashenc.c @@ -355,8 +355,11 @@ static int flush_init_segment(AVFormatContext *s, OutputStream *os) return ret; os->pos = os->init_range_length = range_length; - if (!c->single_file) - ff_format_io_close(s, &os->out); + if (!c->single_file) { + char filename[1024]; + snprintf(filename, sizeof(filename), "%s%s", c->dirname, os->initfile); + dashenc_io_close(s, &os->out, filename); + } return 0; } @@ -878,14 +881,14 @@ static int write_manifest(AVFormatContext *s, int final) snprintf(temp_filename, sizeof(temp_filename), use_rename ? "%s.tmp" : "%s", filename_hls); set_http_options(&opts, c); - ret = avio_open2(&out, temp_filename, AVIO_FLAG_WRITE, NULL, &opts); + ret = dashenc_io_open(s, &c->m3u8_out, temp_filename, &opts); if (ret < 0) { av_log(s, AV_LOG_ERROR, "Unable to open %s for writing\n", temp_filename); return ret; } av_dict_free(&opts); - ff_hls_write_playlist_version(out, 7); + ff_hls_write_playlist_version(c->m3u8_out, 7); for (i = 0; i < s->nb_streams; i++) { char playlist_file[64]; @@ -894,7 +897,7 @@ static int write_manifest(AVFormatContext *s, int final) if (st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO) continue; get_hls_playlist_name(playlist_file, sizeof(playlist_file), NULL, i); - ff_hls_write_audio_rendition(out, (char *)audio_group, + ff_hls_write_audio_rendition(c->m3u8_out, (char *)audio_group, playlist_file, i, is_default); max_audio_bitrate = FFMAX(st->codecpar->bit_rate + os->muxer_overhead, max_audio_bitrate); @@ -912,6 +915,7 @@ static int write_manifest(AVFormatContext *s, int final) AVStream *st = s->streams[i]; OutputStream *os = &c->streams[i]; char *agroup = NULL; + char *codec_str_ptr = NULL; int stream_bitrate = st->codecpar->bit_rate + os->muxer_overhead; if (st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO) continue; @@ -922,11 +926,15 @@ static int write_manifest(AVFormatContext *s, int final) av_strlcat(codec_str, ",", sizeof(codec_str)); av_strlcat(codec_str, audio_codec_str, sizeof(codec_str)); } + if (st->codecpar->codec_id != AV_CODEC_ID_HEVC) { + codec_str_ptr = codec_str; + } get_hls_playlist_name(playlist_file, sizeof(playlist_file), NULL, i); - ff_hls_write_stream_info(st, out, stream_bitrate, playlist_file, agroup, - codec_str, NULL); + ff_hls_write_stream_info(st, c->m3u8_out, stream_bitrate, + playlist_file, agroup, + codec_str_ptr, NULL); } - avio_close(out); + dashenc_io_close(s, &c->m3u8_out, temp_filename); if (use_rename) if ((ret = avpriv_io_move(temp_filename, filename_hls)) < 0) return ret; @@ -1252,7 +1260,7 @@ static void dashenc_delete_file(AVFormatContext *s, char *filename) { } av_dict_free(&http_opts); - dashenc_io_close(s, &out, filename); + ff_format_io_close(s, &out); } else if (unlink(filename) < 0) { av_log(s, AV_LOG_ERROR, "failed to delete %s: %s\n", filename, strerror(errno)); } diff --git a/libavformat/flv.h b/libavformat/flv.h index df5ce3d17f8c7..3571b90279c7d 100644 --- a/libavformat/flv.h +++ b/libavformat/flv.h @@ -65,6 +65,7 @@ enum FlvTagType { enum { FLV_STREAM_TYPE_VIDEO, FLV_STREAM_TYPE_AUDIO, + FLV_STREAM_TYPE_SUBTITLE, FLV_STREAM_TYPE_DATA, FLV_STREAM_TYPE_NB, }; diff --git a/libavformat/flvdec.c b/libavformat/flvdec.c index a2dea464e3697..4b9f46902bf2a 100644 --- a/libavformat/flvdec.c +++ b/libavformat/flvdec.c @@ -143,7 +143,9 @@ static AVStream *create_stream(AVFormatContext *s, int codec_type) st->codecpar->codec_type = codec_type; if (s->nb_streams>=3 ||( s->nb_streams==2 && s->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE - && s->streams[1]->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE)) + && s->streams[1]->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE + && s->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_DATA + && s->streams[1]->codecpar->codec_type != AVMEDIA_TYPE_DATA)) s->ctx_flags &= ~AVFMTCTX_NOHEADER; if (codec_type == AVMEDIA_TYPE_AUDIO) { st->codecpar->bit_rate = flv->audio_bit_rate; @@ -996,12 +998,12 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt) if ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_VIDEO_INFO_CMD) goto skip; } else if (type == FLV_TAG_TYPE_META) { - stream_type=FLV_STREAM_TYPE_DATA; + stream_type=FLV_STREAM_TYPE_SUBTITLE; if (size > 13 + 1 + 4) { // Header-type metadata stuff int type; meta_pos = avio_tell(s->pb); type = flv_read_metabody(s, next); - if (type == 0 && dts == 0 || type < 0 || type == TYPE_UNKNOWN) { + if (type == 0 && dts == 0 || type < 0) { if (type < 0 && flv->validate_count && flv->validate_index[0].pos > next && flv->validate_index[0].pos - 4 < next @@ -1015,6 +1017,8 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt) return flv_data_packet(s, pkt, dts, next); } else if (type == TYPE_ONCAPTION) { return flv_data_packet(s, pkt, dts, next); + } else if (type == TYPE_UNKNOWN) { + stream_type = FLV_STREAM_TYPE_DATA; } avio_seek(s->pb, meta_pos, SEEK_SET); } @@ -1051,13 +1055,16 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt) if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && (s->video_codec_id || flv_same_video_codec(st->codecpar, flags))) break; - } else if (stream_type == FLV_STREAM_TYPE_DATA) { + } else if (stream_type == FLV_STREAM_TYPE_SUBTITLE) { if (st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) break; + } else if (stream_type == FLV_STREAM_TYPE_DATA) { + if (st->codecpar->codec_type == AVMEDIA_TYPE_DATA) + break; } } if (i == s->nb_streams) { - static const enum AVMediaType stream_types[] = {AVMEDIA_TYPE_VIDEO, AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_SUBTITLE}; + static const enum AVMediaType stream_types[] = {AVMEDIA_TYPE_VIDEO, AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_SUBTITLE, AVMEDIA_TYPE_DATA}; st = create_stream(s, stream_types[stream_type]); if (!st) return AVERROR(ENOMEM); @@ -1151,8 +1158,10 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt) if (ret < 0) return ret; size -= ret; - } else if (stream_type == FLV_STREAM_TYPE_DATA) { + } else if (stream_type == FLV_STREAM_TYPE_SUBTITLE) { st->codecpar->codec_id = AV_CODEC_ID_TEXT; + } else if (stream_type == FLV_STREAM_TYPE_DATA) { + st->codecpar->codec_id = AV_CODEC_ID_NONE; // Opaque AMF data } if (st->codecpar->codec_id == AV_CODEC_ID_AAC || @@ -1253,6 +1262,7 @@ static int flv_read_packet(AVFormatContext *s, AVPacket *pkt) if ( stream_type == FLV_STREAM_TYPE_AUDIO || ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY) || + stream_type == FLV_STREAM_TYPE_SUBTITLE || stream_type == FLV_STREAM_TYPE_DATA) pkt->flags |= AV_PKT_FLAG_KEY; diff --git a/libavformat/ftp.c b/libavformat/ftp.c index 676f1c6b4c46b..e0720674808dd 100644 --- a/libavformat/ftp.c +++ b/libavformat/ftp.c @@ -513,7 +513,7 @@ static int ftp_features(FTPContext *s) static const char *feat_command = "FEAT\r\n"; static const char *enable_utf8_command = "OPTS UTF8 ON\r\n"; static const int feat_codes[] = {211, 0}; - static const int opts_codes[] = {200, 451, 0}; + static const int opts_codes[] = {200, 202, 451, 0}; av_freep(&s->features); if (ftp_send_command(s, feat_command, feat_codes, &s->features) != 211) { @@ -521,7 +521,8 @@ static int ftp_features(FTPContext *s) } if (ftp_has_feature(s, "UTF8")) { - if (ftp_send_command(s, enable_utf8_command, opts_codes, NULL) == 200) + int ret = ftp_send_command(s, enable_utf8_command, opts_codes, NULL); + if (ret == 200 || ret == 202) s->utf8 = 1; } @@ -781,13 +782,13 @@ static int ftp_read(URLContext *h, unsigned char *buf, int size) if (s->state == DISCONNECTED) { /* optimization */ if (s->position >= s->filesize) - return 0; + return AVERROR_EOF; if ((err = ftp_connect_data_connection(h)) < 0) return err; } if (s->state == READY) { if (s->position >= s->filesize) - return 0; + return AVERROR_EOF; if ((err = ftp_retrieve(s)) < 0) return err; } diff --git a/libavformat/hlsenc.c b/libavformat/hlsenc.c index 28c2dd62fcae3..f8f060d065628 100644 --- a/libavformat/hlsenc.c +++ b/libavformat/hlsenc.c @@ -147,7 +147,6 @@ typedef struct VariantStream { char *fmp4_init_filename; char *base_output_dirname; - int fmp4_init_mode; AVStream **streams; char codec_attr[128]; @@ -733,7 +732,6 @@ static int hls_mux_init(AVFormatContext *s, VariantStream *vs) vs->packets_written = 1; vs->start_pos = 0; vs->new_start = 1; - vs->fmp4_init_mode = 0; if (hls->segment_type == SEGMENT_TYPE_FMP4) { if (hls->max_seg_size > 0) { @@ -743,7 +741,6 @@ static int hls_mux_init(AVFormatContext *s, VariantStream *vs) vs->packets_written = 0; vs->init_range_length = 0; - vs->fmp4_init_mode = !byterange_mode; set_http_options(s, &options, hls); if ((ret = avio_open_dyn_buf(&oc->pb)) < 0) return ret; @@ -2205,6 +2202,7 @@ static int hls_write_packet(AVFormatContext *s, AVPacket *pkt) avio_flush(oc->pb); range_length = avio_close_dyn_buf(oc->pb, &buffer); avio_write(vs->out, buffer, range_length); + av_free(buffer); vs->init_range_length = range_length; avio_open_dyn_buf(&oc->pb); vs->packets_written = 0; @@ -2233,10 +2231,6 @@ static int hls_write_packet(AVFormatContext *s, AVPacket *pkt) } } - if (vs->fmp4_init_mode) { - vs->number--; - } - if (hls->segment_type == SEGMENT_TYPE_FMP4) { if (hls->flags & HLS_SINGLE_FILE) { ret = flush_dynbuf(vs, &range_length); @@ -2294,7 +2288,6 @@ static int hls_write_packet(AVFormatContext *s, AVPacket *pkt) vs->start_pos += vs->size; } - vs->fmp4_init_mode = 0; if (hls->flags & HLS_SINGLE_FILE) { vs->number++; } else if (hls->max_seg_size > 0) { @@ -2355,6 +2348,25 @@ static int hls_write_trailer(struct AVFormatContext *s) return AVERROR(ENOMEM); } if ( hls->segment_type == SEGMENT_TYPE_FMP4) { + if (!vs->init_range_length) { + av_write_frame(vs->avf, NULL); /* Flush any buffered data */ + avio_flush(oc->pb); + + uint8_t *buffer = NULL; + int range_length = avio_close_dyn_buf(oc->pb, &buffer); + avio_write(vs->out, buffer, range_length); + av_free(buffer); + vs->init_range_length = range_length; + avio_open_dyn_buf(&oc->pb); + vs->packets_written = 0; + vs->start_pos = range_length; + int byterange_mode = (hls->flags & HLS_SINGLE_FILE) || (hls->max_seg_size > 0); + if (!byterange_mode) { + ff_format_io_close(s, &vs->out); + hlsenc_io_close(s, &vs->out, vs->base_output_dirname); + } + } + int range_length = 0; if (!(hls->flags & HLS_SINGLE_FILE)) { ret = hlsenc_io_open(s, &vs->out, vs->avf->url, NULL); @@ -2368,6 +2380,7 @@ static int hls_write_trailer(struct AVFormatContext *s) if (ret < 0) { goto failed; } + vs->size = range_length; ff_format_io_close(s, &vs->out); } @@ -2376,8 +2389,6 @@ static int hls_write_trailer(struct AVFormatContext *s) if (oc->pb) { if (hls->segment_type != SEGMENT_TYPE_FMP4) { vs->size = avio_tell(vs->avf->pb) - vs->start_pos; - } else { - vs->size = avio_tell(vs->avf->pb); } if (hls->segment_type != SEGMENT_TYPE_FMP4) ff_format_io_close(s, &oc->pb); diff --git a/libavformat/ivfenc.c b/libavformat/ivfenc.c index 66441a2a43304..adf72117e9343 100644 --- a/libavformat/ivfenc.c +++ b/libavformat/ivfenc.c @@ -97,6 +97,8 @@ static int ivf_check_bitstream(struct AVFormatContext *s, const AVPacket *pkt) if (st->codecpar->codec_id == AV_CODEC_ID_VP9) ret = ff_stream_add_bitstream_filter(st, "vp9_superframe", NULL); + else if (st->codecpar->codec_id == AV_CODEC_ID_AV1) + ret = ff_stream_add_bitstream_filter(st, "av1_metadata", "td=insert"); return ret; } diff --git a/libavformat/libsrt.c b/libavformat/libsrt.c index fbfd6ace83845..fe3b312151563 100644 --- a/libavformat/libsrt.c +++ b/libavformat/libsrt.c @@ -76,6 +76,14 @@ typedef struct SRTContext { int64_t rcvlatency; int64_t peerlatency; enum SRTMode mode; + int sndbuf; + int rcvbuf; + int lossmaxttl; + int minversion; + char *streamid; + char *smoother; + int messageapi; + SRT_TRANSTYPE transtype; } SRTContext; #define D AV_OPT_FLAG_DECODING_PARAM @@ -110,6 +118,16 @@ static const AVOption libsrt_options[] = { { "caller", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = SRT_MODE_CALLER }, INT_MIN, INT_MAX, .flags = D|E, "mode" }, { "listener", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = SRT_MODE_LISTENER }, INT_MIN, INT_MAX, .flags = D|E, "mode" }, { "rendezvous", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = SRT_MODE_RENDEZVOUS }, INT_MIN, INT_MAX, .flags = D|E, "mode" }, + { "sndbuf", "Send buffer size (in bytes)", OFFSET(sndbuf), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, .flags = D|E }, + { "rcvbuf", "Receive buffer size (in bytes)", OFFSET(rcvbuf), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, .flags = D|E }, + { "lossmaxttl", "Maximum possible packet reorder tolerance", OFFSET(lossmaxttl), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, .flags = D|E }, + { "minversion", "The minimum SRT version that is required from the peer", OFFSET(minversion), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, .flags = D|E }, + { "streamid", "A string of up to 512 characters that an Initiator can pass to a Responder", OFFSET(streamid), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = D|E }, + { "smoother", "The type of Smoother used for the transmission for that socket", OFFSET(smoother), AV_OPT_TYPE_STRING, { .str = NULL }, .flags = D|E }, + { "messageapi", "Enable message API", OFFSET(messageapi), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, .flags = D|E }, + { "transtype", "The transmission type for the socket", OFFSET(transtype), AV_OPT_TYPE_INT, { .i64 = SRTT_INVALID }, SRTT_LIVE, SRTT_INVALID, .flags = D|E, "transtype" }, + { "live", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = SRTT_LIVE }, INT_MIN, INT_MAX, .flags = D|E, "transtype" }, + { "file", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = SRTT_FILE }, INT_MIN, INT_MAX, .flags = D|E, "transtype" }, { NULL } }; @@ -297,6 +315,7 @@ static int libsrt_set_options_pre(URLContext *h, int fd) int connect_timeout = s->connect_timeout; if ((s->mode == SRT_MODE_RENDEZVOUS && libsrt_setsockopt(h, fd, SRTO_RENDEZVOUS, "SRTO_RENDEZVOUS", &yes, sizeof(yes)) < 0) || + (s->transtype != SRTT_INVALID && libsrt_setsockopt(h, fd, SRTO_TRANSTYPE, "SRTO_TRANSTYPE", &s->transtype, sizeof(s->transtype)) < 0) || (s->maxbw >= 0 && libsrt_setsockopt(h, fd, SRTO_MAXBW, "SRTO_MAXBW", &s->maxbw, sizeof(s->maxbw)) < 0) || (s->pbkeylen >= 0 && libsrt_setsockopt(h, fd, SRTO_PBKEYLEN, "SRTO_PBKEYLEN", &s->pbkeylen, sizeof(s->pbkeylen)) < 0) || (s->passphrase && libsrt_setsockopt(h, fd, SRTO_PASSPHRASE, "SRTO_PASSPHRASE", s->passphrase, strlen(s->passphrase)) < 0) || @@ -310,6 +329,13 @@ static int libsrt_set_options_pre(URLContext *h, int fd) (s->tlpktdrop >= 0 && libsrt_setsockopt(h, fd, SRTO_TLPKTDROP, "SRTO_TLPKDROP", &s->tlpktdrop, sizeof(s->tlpktdrop)) < 0) || (s->nakreport >= 0 && libsrt_setsockopt(h, fd, SRTO_NAKREPORT, "SRTO_NAKREPORT", &s->nakreport, sizeof(s->nakreport)) < 0) || (connect_timeout >= 0 && libsrt_setsockopt(h, fd, SRTO_CONNTIMEO, "SRTO_CONNTIMEO", &connect_timeout, sizeof(connect_timeout)) <0 ) || + (s->sndbuf >= 0 && libsrt_setsockopt(h, fd, SRTO_SNDBUF, "SRTO_SNDBUF", &s->sndbuf, sizeof(s->sndbuf)) < 0) || + (s->rcvbuf >= 0 && libsrt_setsockopt(h, fd, SRTO_RCVBUF, "SRTO_RCVBUF", &s->rcvbuf, sizeof(s->rcvbuf)) < 0) || + (s->lossmaxttl >= 0 && libsrt_setsockopt(h, fd, SRTO_LOSSMAXTTL, "SRTO_LOSSMAXTTL", &s->lossmaxttl, sizeof(s->lossmaxttl)) < 0) || + (s->minversion >= 0 && libsrt_setsockopt(h, fd, SRTO_MINVERSION, "SRTO_MINVERSION", &s->minversion, sizeof(s->minversion)) < 0) || + (s->streamid && libsrt_setsockopt(h, fd, SRTO_STREAMID, "SRTO_STREAMID", s->streamid, strlen(s->streamid)) < 0) || + (s->smoother && libsrt_setsockopt(h, fd, SRTO_SMOOTHER, "SRTO_SMOOTHER", s->smoother, strlen(s->smoother)) < 0) || + (s->messageapi >= 0 && libsrt_setsockopt(h, fd, SRTO_MESSAGEAPI, "SRTO_MESSAGEAPI", &s->messageapi, sizeof(s->messageapi)) < 0) || (s->payload_size >= 0 && libsrt_setsockopt(h, fd, SRTO_PAYLOADSIZE, "SRTO_PAYLOADSIZE", &s->payload_size, sizeof(s->payload_size)) < 0)) { return AVERROR(EIO); } @@ -522,6 +548,38 @@ static int libsrt_open(URLContext *h, const char *uri, int flags) return AVERROR(EIO); } } + if (av_find_info_tag(buf, sizeof(buf), "sndbuf", p)) { + s->sndbuf = strtol(buf, NULL, 10); + } + if (av_find_info_tag(buf, sizeof(buf), "rcvbuf", p)) { + s->rcvbuf = strtol(buf, NULL, 10); + } + if (av_find_info_tag(buf, sizeof(buf), "lossmaxttl", p)) { + s->lossmaxttl = strtol(buf, NULL, 10); + } + if (av_find_info_tag(buf, sizeof(buf), "minversion", p)) { + s->minversion = strtol(buf, NULL, 0); + } + if (av_find_info_tag(buf, sizeof(buf), "streamid", p)) { + av_freep(&s->streamid); + s->streamid = av_strdup(buf); + } + if (av_find_info_tag(buf, sizeof(buf), "smoother", p)) { + av_freep(&s->smoother); + s->smoother = av_strdup(buf); + } + if (av_find_info_tag(buf, sizeof(buf), "messageapi", p)) { + s->messageapi = strtol(buf, NULL, 10); + } + if (av_find_info_tag(buf, sizeof(buf), "transtype", p)) { + if (!strcmp(buf, "live")) { + s->transtype = SRTT_LIVE; + } else if (!strcmp(buf, "file")) { + s->transtype = SRTT_FILE; + } else { + return AVERROR(EINVAL); + } + } } return libsrt_setup(h, uri, flags); } diff --git a/libavformat/mxfenc.c b/libavformat/mxfenc.c index a2f68dd4ed542..3549b4137d796 100644 --- a/libavformat/mxfenc.c +++ b/libavformat/mxfenc.c @@ -142,23 +142,10 @@ enum ULIndex { INDEX_DV100_1080_50, INDEX_DV100_720_60, INDEX_DV100_720_50, - INDEX_DNXHD_1080p_10bit_HIGH, - INDEX_DNXHD_1080p_8bit_MEDIUM, - INDEX_DNXHD_1080p_8bit_HIGH, - INDEX_DNXHD_1080i_10bit_HIGH, - INDEX_DNXHD_1080i_8bit_MEDIUM, - INDEX_DNXHD_1080i_8bit_HIGH, - INDEX_DNXHD_720p_10bit, - INDEX_DNXHD_720p_8bit_HIGH, - INDEX_DNXHD_720p_8bit_MEDIUM, - INDEX_DNXHD_720p_8bit_LOW, - INDEX_DNXHR_LB, - INDEX_DNXHR_SQ, - INDEX_DNXHR_HQ, - INDEX_DNXHR_HQX, - INDEX_DNXHR_444, + INDEX_DNXHD, INDEX_JPEG2000, INDEX_H264, + INDEX_S436M, }; static const struct { @@ -169,7 +156,7 @@ static const struct { { AV_CODEC_ID_PCM_S24LE, INDEX_AES3 }, { AV_CODEC_ID_PCM_S16LE, INDEX_AES3 }, { AV_CODEC_ID_DVVIDEO, INDEX_DV }, - { AV_CODEC_ID_DNXHD, INDEX_DNXHD_1080p_10bit_HIGH }, + { AV_CODEC_ID_DNXHD, INDEX_DNXHD }, { AV_CODEC_ID_JPEG2000, INDEX_JPEG2000 }, { AV_CODEC_ID_H264, INDEX_H264 }, { AV_CODEC_ID_NONE } @@ -307,81 +294,11 @@ static const MXFContainerEssenceEntry mxf_essence_container_uls[] = { { 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x18,0x01,0x01,0x00 }, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x04,0x01,0x02,0x02,0x02,0x02,0x08,0x00 }, mxf_write_cdci_desc }, - // DNxHD 1080p 10bit high + // DNxHD { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x11,0x01,0x00 }, { 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x15,0x01,0x05,0x00 }, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x01,0x00,0x00 }, mxf_write_cdci_desc }, - // DNxHD 1080p 8bit medium - { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x11,0x01,0x00 }, - { 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x15,0x01,0x05,0x00 }, - { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x03,0x00,0x00 }, - mxf_write_cdci_desc }, - // DNxHD 1080p 8bit high - { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x11,0x01,0x00 }, - { 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x15,0x01,0x05,0x00 }, - { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x04,0x00,0x00 }, - mxf_write_cdci_desc }, - // DNxHD 1080i 10bit high - { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x11,0x01,0x00 }, - { 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x15,0x01,0x05,0x00 }, - { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x07,0x00,0x00 }, - mxf_write_cdci_desc }, - // DNxHD 1080i 8bit medium - { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x11,0x01,0x00 }, - { 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x15,0x01,0x05,0x00 }, - { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x08,0x00,0x00 }, - mxf_write_cdci_desc }, - // DNxHD 1080i 8bit high - { { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x01,0x0D,0x01,0x03,0x01,0x02,0x11,0x01,0x00 }, - { 0x06,0x0E,0x2B,0x34,0x01,0x02,0x01,0x01,0x0D,0x01,0x03,0x01,0x15,0x01,0x05,0x00 }, - { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x09,0x00,0x00 }, - mxf_write_cdci_desc }, - // DNxHD 720p 10bit - { { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0d,0x01,0x03,0x01,0x02,0x11,0x01,0x00 }, - { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01,0x15,0x01,0x05,0x00 }, - { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x10,0x00,0x00 }, - mxf_write_cdci_desc }, - // DNxHD 720p 8bit high - { { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0d,0x01,0x03,0x01,0x02,0x11,0x01,0x00 }, - { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01,0x15,0x01,0x05,0x00 }, - { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x11,0x00,0x00 }, - mxf_write_cdci_desc }, - // DNxHD 720p 8bit medium - { { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0d,0x01,0x03,0x01,0x02,0x11,0x01,0x00 }, - { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01,0x15,0x01,0x05,0x00 }, - { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x12,0x00,0x00 }, - mxf_write_cdci_desc }, - // DNxHD 720p 8bit low - { { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0d,0x01,0x03,0x01,0x02,0x11,0x01,0x00 }, - { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01,0x15,0x01,0x05,0x00 }, - { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0A,0x04,0x01,0x02,0x02,0x71,0x13,0x00,0x00 }, - mxf_write_cdci_desc }, - // DNxHR LB - CID 1274 - { { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0d,0x01,0x03,0x01,0x02,0x11,0x01,0x00 }, - { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01,0x15,0x01,0x05,0x00 }, - { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0D,0x04,0x01,0x02,0x02,0x71,0x28,0x00,0x00 }, - mxf_write_cdci_desc }, - // DNxHR SQ - CID 1273 - { { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0d,0x01,0x03,0x01,0x02,0x11,0x01,0x00 }, - { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01,0x15,0x01,0x05,0x00 }, - { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0D,0x04,0x01,0x02,0x02,0x71,0x27,0x00,0x00 }, - mxf_write_cdci_desc }, - // DNxHR HQ - CID 1272 - { { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0d,0x01,0x03,0x01,0x02,0x11,0x01,0x00 }, - { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01,0x15,0x01,0x05,0x00 }, - { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0D,0x04,0x01,0x02,0x02,0x71,0x26,0x00,0x00 }, - mxf_write_cdci_desc }, - // DNxHR HQX - CID 1271 - { { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0d,0x01,0x03,0x01,0x02,0x11,0x01,0x00 }, - { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01,0x15,0x01,0x05,0x00 }, - { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0D,0x04,0x01,0x02,0x02,0x71,0x25,0x00,0x00 }, - mxf_write_cdci_desc }, - // DNxHR 444 - CID 1270 - { { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x01,0x0d,0x01,0x03,0x01,0x02,0x11,0x01,0x00 }, - { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01,0x15,0x01,0x05,0x00 }, - { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x0D,0x04,0x01,0x02,0x02,0x71,0x24,0x00,0x00 }, - mxf_write_cdci_desc }, // JPEG2000 { { 0x06,0x0e,0x2b,0x34,0x04,0x01,0x01,0x07,0x0d,0x01,0x03,0x01,0x02,0x0c,0x01,0x00 }, { 0x06,0x0e,0x2b,0x34,0x01,0x02,0x01,0x01,0x0d,0x01,0x03,0x01,0x15,0x01,0x08,0x00 }, @@ -1363,7 +1280,7 @@ static int64_t mxf_write_cdci_common(AVFormatContext *s, AVStream *st, const UID default: f1 = 0; f2 = 0; break; } - if (!sc->interlaced) { + if (!sc->interlaced && f2) { f2 = 0; f1 *= 2; } @@ -2028,90 +1945,81 @@ static int mxf_write_partition(AVFormatContext *s, int bodysid, return 0; } -static int mxf_parse_dnxhd_frame(AVFormatContext *s, AVStream *st, -AVPacket *pkt) +static const struct { + int cid; + UID codec_ul; +} mxf_dnxhd_codec_uls[] = { + { 1235, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x01,0x00,0x00 } }, // 1080p 10bit HIGH + { 1237, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x03,0x00,0x00 } }, // 1080p 8bit MED + { 1238, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x04,0x00,0x00 } }, // 1080p 8bit HIGH + { 1241, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x07,0x00,0x00 } }, // 1080i 10bit HIGH + { 1242, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x08,0x00,0x00 } }, // 1080i 8bit MED + { 1243, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x09,0x00,0x00 } }, // 1080i 8bit HIGH + { 1244, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x0a,0x00,0x00 } }, // 1080i 8bit TR + { 1250, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x10,0x00,0x00 } }, // 720p 10bit + { 1251, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x11,0x00,0x00 } }, // 720p 8bit HIGH + { 1252, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x12,0x00,0x00 } }, // 720p 8bit MED + { 1253, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x13,0x00,0x00 } }, // 720p 8bit LOW + { 1256, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x16,0x00,0x00 } }, // 1080p 10bit 444 + { 1258, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x18,0x00,0x00 } }, // 720p 8bit TR + { 1259, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x19,0x00,0x00 } }, // 1080p 8bit TR + { 1260, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x1a,0x00,0x00 } }, // 1080i 8bit TR MBAFF + { 1270, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x24,0x00,0x00 } }, // DNXHR 444 + { 1271, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x25,0x00,0x00 } }, // DNXHR HQX + { 1272, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x26,0x00,0x00 } }, // DNXHR HQ + { 1273, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x27,0x00,0x00 } }, // DNXHR SQ + { 1274, { 0x06,0x0E,0x2B,0x34,0x04,0x01,0x01,0x0a,0x04,0x01,0x02,0x02,0x71,0x28,0x00,0x00 } }, // DNXHR LB +}; + +static int mxf_parse_dnxhd_frame(AVFormatContext *s, AVStream *st, AVPacket *pkt) { MXFContext *mxf = s->priv_data; MXFStreamContext *sc = st->priv_data; - int cid; - uint8_t* header_cid; - int frame_size = 0; + int i, cid, frame_size = 0; if (mxf->header_written) return 1; if (pkt->size < 43) - return -1; + return 0; - header_cid = pkt->data + 0x28; - cid = header_cid[0] << 24 | header_cid[1] << 16 | header_cid[2] << 8 | header_cid[3]; + sc->codec_ul = NULL; + cid = AV_RB32(pkt->data + 0x28); + for (i = 0; i < FF_ARRAY_ELEMS(mxf_dnxhd_codec_uls); i++) { + if (cid == mxf_dnxhd_codec_uls[i].cid) { + sc->codec_ul = &mxf_dnxhd_codec_uls[i].codec_ul; + break; + } + } + if (!sc->codec_ul) + return 0; + + sc->component_depth = 0; + switch (pkt->data[0x21] >> 5) { + case 1: sc->component_depth = 8; break; + case 2: sc->component_depth = 10; break; + case 3: sc->component_depth = 12; break; + } + if (!sc->component_depth) + return 0; if ((frame_size = avpriv_dnxhd_get_frame_size(cid)) == DNXHD_VARIABLE) { frame_size = avpriv_dnxhd_get_hr_frame_size(cid, st->codecpar->width, st->codecpar->height); } - if (frame_size < 0) - return -1; + return 0; + if ((sc->interlaced = avpriv_dnxhd_get_interlaced(cid)) < 0) - return AVERROR_INVALIDDATA; + return 0; - switch (cid) { - case 1235: - sc->index = INDEX_DNXHD_1080p_10bit_HIGH; - sc->component_depth = 10; - break; - case 1237: - sc->index = INDEX_DNXHD_1080p_8bit_MEDIUM; - break; - case 1238: - sc->index = INDEX_DNXHD_1080p_8bit_HIGH; - break; - case 1241: - sc->index = INDEX_DNXHD_1080i_10bit_HIGH; - sc->component_depth = 10; - break; - case 1242: - sc->index = INDEX_DNXHD_1080i_8bit_MEDIUM; - break; - case 1243: - sc->index = INDEX_DNXHD_1080i_8bit_HIGH; - break; - case 1250: - sc->index = INDEX_DNXHD_720p_10bit; - sc->component_depth = 10; - break; - case 1251: - sc->index = INDEX_DNXHD_720p_8bit_HIGH; - break; - case 1252: - sc->index = INDEX_DNXHD_720p_8bit_MEDIUM; - break; - case 1253: - sc->index = INDEX_DNXHD_720p_8bit_LOW; - break; - case 1274: - sc->index = INDEX_DNXHR_LB; - break; - case 1273: - sc->index = INDEX_DNXHR_SQ; - break; - case 1272: - sc->index = INDEX_DNXHR_HQ; - break; - case 1271: - sc->index = INDEX_DNXHR_HQX; - sc->component_depth = st->codecpar->bits_per_raw_sample; - break; - case 1270: - sc->index = INDEX_DNXHR_444; - sc->component_depth = st->codecpar->bits_per_raw_sample; - break; - default: - return -1; + if (cid >= 1270) { // RI raster + av_reduce(&sc->aspect_ratio.num, &sc->aspect_ratio.den, + st->codecpar->width, st->codecpar->height, + INT_MAX); + } else { + sc->aspect_ratio = (AVRational){ 16, 9 }; } - sc->codec_ul = &mxf_essence_container_uls[sc->index].codec_ul; - sc->aspect_ratio = (AVRational){ 16, 9 }; sc->frame_size = pkt->size; return 1; @@ -2575,7 +2483,7 @@ static int mxf_write_header(AVFormatContext *s) } else if (st->codecpar->codec_type == AVMEDIA_TYPE_DATA) { AVDictionaryEntry *e = av_dict_get(st->metadata, "data_type", NULL, 0); if (e && !strcmp(e->value, "vbi_vanc_smpte_436M")) { - sc->index = 38; + sc->index = INDEX_S436M; } else { av_log(s, AV_LOG_ERROR, "track %d: unsupported data type\n", i); return -1; diff --git a/libavformat/pcmdec.c b/libavformat/pcmdec.c index d0ceea6fa9f16..bd2a0384f8aa0 100644 --- a/libavformat/pcmdec.c +++ b/libavformat/pcmdec.c @@ -177,6 +177,9 @@ PCMDEF(alaw, "PCM A-law", PCMDEF(mulaw, "PCM mu-law", "ul", AV_CODEC_ID_PCM_MULAW) +PCMDEF(vidc, "PCM Archimedes VIDC", + NULL, AV_CODEC_ID_PCM_VIDC) + static const AVOption sln_options[] = { { "sample_rate", "", offsetof(PCMAudioDemuxerContext, sample_rate), AV_OPT_TYPE_INT, {.i64 = 8000}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM }, { "channels", "", offsetof(PCMAudioDemuxerContext, channels), AV_OPT_TYPE_INT, {.i64 = 1}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM }, diff --git a/libavformat/pcmenc.c b/libavformat/pcmenc.c index 3e4f30805722b..1760b3bef7ae0 100644 --- a/libavformat/pcmenc.c +++ b/libavformat/pcmenc.c @@ -92,3 +92,6 @@ PCMDEF(alaw, "PCM A-law", PCMDEF(mulaw, "PCM mu-law", "ul", AV_CODEC_ID_PCM_MULAW) + +PCMDEF(vidc, "PCM Archimedes VIDC", + NULL, AV_CODEC_ID_PCM_VIDC) diff --git a/libavformat/rpl.c b/libavformat/rpl.c index d373600478600..6b45b35c30872 100644 --- a/libavformat/rpl.c +++ b/libavformat/rpl.c @@ -119,6 +119,8 @@ static int rpl_read_header(AVFormatContext *s) AVStream *vst = NULL, *ast = NULL; int total_audio_size; int error = 0; + const char *endptr; + char audio_type[RPL_LINE_LENGTH]; uint32_t i; @@ -188,7 +190,9 @@ static int rpl_read_header(AVFormatContext *s) ast->codecpar->codec_tag = audio_format; ast->codecpar->sample_rate = read_line_and_int(pb, &error); // audio bitrate ast->codecpar->channels = read_line_and_int(pb, &error); // number of audio channels - ast->codecpar->bits_per_coded_sample = read_line_and_int(pb, &error); // audio bits per sample + error |= read_line(pb, line, sizeof(line)); + ast->codecpar->bits_per_coded_sample = read_int(line, &endptr, &error); // audio bits per sample + strcpy(audio_type, endptr); // At least one sample uses 0 for ADPCM, which is really 4 bits // per sample. if (ast->codecpar->bits_per_coded_sample == 0) @@ -205,6 +209,17 @@ static int rpl_read_header(AVFormatContext *s) // 16-bit audio is always signed ast->codecpar->codec_id = AV_CODEC_ID_PCM_S16LE; break; + } else if (ast->codecpar->bits_per_coded_sample == 8) { + if(strstr(audio_type, "unsigned") != NULL) { + ast->codecpar->codec_id = AV_CODEC_ID_PCM_U8; + break; + } else if(strstr(audio_type, "linear") != NULL) { + ast->codecpar->codec_id = AV_CODEC_ID_PCM_S8; + break; + } else { + ast->codecpar->codec_id = AV_CODEC_ID_PCM_VIDC; + break; + } } // There are some other formats listed as legal per the spec; // samples needed. diff --git a/libavformat/vc1test.c b/libavformat/vc1test.c index a801f4bd22e41..d44570988b554 100644 --- a/libavformat/vc1test.c +++ b/libavformat/vc1test.c @@ -34,9 +34,14 @@ static int vc1t_probe(AVProbeData *p) { + uint32_t size; + if (p->buf_size < 24) return 0; - if (p->buf[3] != 0xC5 || AV_RL32(&p->buf[4]) != 4 || AV_RL32(&p->buf[20]) != 0xC) + + size = AV_RL32(&p->buf[4]); + if (p->buf[3] != 0xC5 || size < 4 || size > p->buf_size - 20 || + AV_RL32(&p->buf[size+16]) != 0xC) return 0; return AVPROBE_SCORE_EXTENSION; @@ -48,9 +53,10 @@ static int vc1t_read_header(AVFormatContext *s) AVStream *st; int frames; uint32_t fps; + uint32_t size; frames = avio_rl24(pb); - if(avio_r8(pb) != 0xC5 || avio_rl32(pb) != 4) + if (avio_r8(pb) != 0xC5 || ((size = avio_rl32(pb)) < 4)) return AVERROR_INVALIDDATA; /* init video codec */ @@ -63,6 +69,8 @@ static int vc1t_read_header(AVFormatContext *s) if (ff_get_extradata(s, st->codecpar, pb, VC1_EXTRADATA_SIZE) < 0) return AVERROR(ENOMEM); + + avio_skip(pb, size - 4); st->codecpar->height = avio_rl32(pb); st->codecpar->width = avio_rl32(pb); if(avio_rl32(pb) != 0xC) @@ -114,5 +122,6 @@ AVInputFormat ff_vc1t_demuxer = { .read_probe = vc1t_probe, .read_header = vc1t_read_header, .read_packet = vc1t_read_packet, + .extensions = "rcv", .flags = AVFMT_GENERIC_INDEX, }; diff --git a/libavformat/version.h b/libavformat/version.h index e2d0cfd4143ae..843f922f9c38a 100644 --- a/libavformat/version.h +++ b/libavformat/version.h @@ -32,8 +32,8 @@ // Major bumping may affect Ticket5467, 5421, 5451(compatibility with Chromium) // Also please add any ticket numbers that you believe might be affected here #define LIBAVFORMAT_VERSION_MAJOR 58 -#define LIBAVFORMAT_VERSION_MINOR 19 -#define LIBAVFORMAT_VERSION_MICRO 101 +#define LIBAVFORMAT_VERSION_MINOR 22 +#define LIBAVFORMAT_VERSION_MICRO 100 #define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ LIBAVFORMAT_VERSION_MINOR, \ diff --git a/libavutil/frame.c b/libavutil/frame.c index 4460325a9bba8..9b3fb13e68b80 100644 --- a/libavutil/frame.c +++ b/libavutil/frame.c @@ -243,11 +243,13 @@ static int get_video_buffer(AVFrame *frame, int align) return ret; frame->buf[0] = av_buffer_alloc(ret + 4*plane_padding); - if (!frame->buf[0]) + if (!frame->buf[0]) { + ret = AVERROR(ENOMEM); goto fail; + } - if (av_image_fill_pointers(frame->data, frame->format, padded_height, - frame->buf[0]->data, frame->linesize) < 0) + if ((ret = av_image_fill_pointers(frame->data, frame->format, padded_height, + frame->buf[0]->data, frame->linesize)) < 0) goto fail; for (i = 1; i < 4; i++) { @@ -260,7 +262,7 @@ static int get_video_buffer(AVFrame *frame, int align) return 0; fail: av_frame_unref(frame); - return AVERROR(ENOMEM); + return ret; } static int get_audio_buffer(AVFrame *frame, int align) @@ -831,6 +833,7 @@ const char *av_frame_side_data_name(enum AVFrameSideDataType type) case AV_FRAME_DATA_MASTERING_DISPLAY_METADATA: return "Mastering display metadata"; case AV_FRAME_DATA_CONTENT_LIGHT_LEVEL: return "Content light level metadata"; case AV_FRAME_DATA_GOP_TIMECODE: return "GOP timecode"; + case AV_FRAME_DATA_S12M_TIMECODE: return "SMPTE 12-1 timecode"; case AV_FRAME_DATA_SPHERICAL: return "Spherical Mapping"; case AV_FRAME_DATA_ICC_PROFILE: return "ICC profile"; #if FF_API_FRAME_QP diff --git a/libavutil/frame.h b/libavutil/frame.h index 9d57d6ce66ff9..66f27f44bd373 100644 --- a/libavutil/frame.h +++ b/libavutil/frame.h @@ -158,6 +158,14 @@ enum AVFrameSideDataType { */ AV_FRAME_DATA_QP_TABLE_DATA, #endif + + /** + * Timecode which conforms to SMPTE ST 12-1. The data is an array of 4 uint32_t + * where the first uint32_t describes how many (1-3) of the other timecodes are used. + * The timecode format is described in the av_timecode_get_smpte_from_framenum() + * function in libavutil/timecode.c. + */ + AV_FRAME_DATA_S12M_TIMECODE, }; enum AVActiveFormatDescription { @@ -381,7 +389,6 @@ typedef struct AVFrame { * that time, * the decoder reorders values as needed and sets AVFrame.reordered_opaque * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque - * @deprecated in favor of pkt_pts */ int64_t reordered_opaque; diff --git a/libavutil/integer.c b/libavutil/integer.c index 890e314dced6d..78e252fbde24f 100644 --- a/libavutil/integer.c +++ b/libavutil/integer.c @@ -74,7 +74,7 @@ AVInteger av_mul_i(AVInteger a, AVInteger b){ if(a.v[i]) for(j=i; j>16) + out.v[j] + a.v[i]*b.v[j-i]; + carry= (carry>>16) + out.v[j] + a.v[i]*(unsigned)b.v[j-i]; out.v[j]= carry; } } diff --git a/libavutil/pixdesc.c b/libavutil/pixdesc.c index 970a83214c506..1c36577289537 100644 --- a/libavutil/pixdesc.c +++ b/libavutil/pixdesc.c @@ -31,19 +31,22 @@ #include "intreadwrite.h" #include "version.h" -void av_read_image_line(uint16_t *dst, +void av_read_image_line2(void *dst, const uint8_t *data[4], const int linesize[4], const AVPixFmtDescriptor *desc, int x, int y, int c, int w, - int read_pal_component) + int read_pal_component, + int dst_element_size) { AVComponentDescriptor comp = desc->comp[c]; int plane = comp.plane; int depth = comp.depth; - int mask = (1 << depth) - 1; + unsigned mask = (1ULL << depth) - 1; int shift = comp.shift; int step = comp.step; int flags = desc->flags; + uint16_t *dst16 = dst; + uint32_t *dst32 = dst; if (flags & AV_PIX_FMT_FLAG_BITSTREAM) { int skip = x * step + comp.offset; @@ -57,38 +60,56 @@ void av_read_image_line(uint16_t *dst, shift -= step; p -= shift >> 3; shift &= 7; - *dst++ = val; + if (dst_element_size == 4) *dst32++ = val; + else *dst16++ = val; } } else { const uint8_t *p = data[plane] + y * linesize[plane] + x * step + comp.offset; int is_8bit = shift + depth <= 8; + int is_16bit= shift + depth <=16; if (is_8bit) p += !!(flags & AV_PIX_FMT_FLAG_BE); while (w--) { - int val = is_8bit ? *p : - flags & AV_PIX_FMT_FLAG_BE ? AV_RB16(p) : AV_RL16(p); + unsigned val; + if (is_8bit) val = *p; + else if(is_16bit) val = flags & AV_PIX_FMT_FLAG_BE ? AV_RB16(p) : AV_RL16(p); + else val = flags & AV_PIX_FMT_FLAG_BE ? AV_RB32(p) : AV_RL32(p); val = (val >> shift) & mask; if (read_pal_component) val = data[1][4 * val + c]; p += step; - *dst++ = val; + if (dst_element_size == 4) *dst32++ = val; + else *dst16++ = val; } } } -void av_write_image_line(const uint16_t *src, +void av_read_image_line(uint16_t *dst, + const uint8_t *data[4], const int linesize[4], + const AVPixFmtDescriptor *desc, + int x, int y, int c, int w, + int read_pal_component) +{ + av_read_image_line2(dst, data, linesize, desc,x, y, c, w, + read_pal_component, + 2); +} + +void av_write_image_line2(const void *src, uint8_t *data[4], const int linesize[4], const AVPixFmtDescriptor *desc, - int x, int y, int c, int w) + int x, int y, int c, int w, int src_element_size) { AVComponentDescriptor comp = desc->comp[c]; int plane = comp.plane; int depth = comp.depth; int step = comp.step; int flags = desc->flags; + const uint32_t *src32 = src; + const uint16_t *src16 = src; if (flags & AV_PIX_FMT_FLAG_BITSTREAM) { int skip = x * step + comp.offset; @@ -96,7 +117,7 @@ void av_write_image_line(const uint16_t *src, int shift = 8 - depth - (skip & 7); while (w--) { - *p |= *src++ << shift; + *p |= (src_element_size == 4 ? *src32++ : *src16++) << shift; shift -= step; p -= shift >> 3; shift &= 7; @@ -109,17 +130,28 @@ void av_write_image_line(const uint16_t *src, if (shift + depth <= 8) { p += !!(flags & AV_PIX_FMT_FLAG_BE); while (w--) { - *p |= (*src++ << shift); + *p |= ((src_element_size == 4 ? *src32++ : *src16++) << shift); p += step; } } else { while (w--) { - if (flags & AV_PIX_FMT_FLAG_BE) { - uint16_t val = AV_RB16(p) | (*src++ << shift); - AV_WB16(p, val); + unsigned s = (src_element_size == 4 ? *src32++ : *src16++); + if (shift + depth <= 16) { + if (flags & AV_PIX_FMT_FLAG_BE) { + uint16_t val = AV_RB16(p) | (s << shift); + AV_WB16(p, val); + } else { + uint16_t val = AV_RL16(p) | (s << shift); + AV_WL16(p, val); + } } else { - uint16_t val = AV_RL16(p) | (*src++ << shift); - AV_WL16(p, val); + if (flags & AV_PIX_FMT_FLAG_BE) { + uint32_t val = AV_RB32(p) | (s << shift); + AV_WB32(p, val); + } else { + uint32_t val = AV_RL32(p) | (s << shift); + AV_WL32(p, val); + } } p += step; } @@ -127,6 +159,14 @@ void av_write_image_line(const uint16_t *src, } } +void av_write_image_line(const uint16_t *src, + uint8_t *data[4], const int linesize[4], + const AVPixFmtDescriptor *desc, + int x, int y, int c, int w) +{ + av_write_image_line2(src, data, linesize, desc, x, y, c, w, 2); +} + #if FF_API_PLUS1_MINUS1 FF_DISABLE_DEPRECATION_WARNINGS #endif diff --git a/libavutil/pixdesc.h b/libavutil/pixdesc.h index 4f9c5a271fdea..c055810ae8772 100644 --- a/libavutil/pixdesc.h +++ b/libavutil/pixdesc.h @@ -343,7 +343,13 @@ char *av_get_pix_fmt_string(char *buf, int buf_size, * format writes the values corresponding to the palette * component c in data[1] to dst, rather than the palette indexes in * data[0]. The behavior is undefined if the format is not paletted. + * @param dst_element_size size of elements in dst array (2 or 4 byte) */ +void av_read_image_line2(void *dst, const uint8_t *data[4], + const int linesize[4], const AVPixFmtDescriptor *desc, + int x, int y, int c, int w, int read_pal_component, + int dst_element_size); + void av_read_image_line(uint16_t *dst, const uint8_t *data[4], const int linesize[4], const AVPixFmtDescriptor *desc, int x, int y, int c, int w, int read_pal_component); @@ -361,7 +367,12 @@ void av_read_image_line(uint16_t *dst, const uint8_t *data[4], * @param y the vertical coordinate of the first pixel to write * @param w the width of the line to write, that is the number of * values to write to the image line + * @param src_element_size size of elements in src array (2 or 4 byte) */ +void av_write_image_line2(const void *src, uint8_t *data[4], + const int linesize[4], const AVPixFmtDescriptor *desc, + int x, int y, int c, int w, int src_element_size); + void av_write_image_line(const uint16_t *src, uint8_t *data[4], const int linesize[4], const AVPixFmtDescriptor *desc, int x, int y, int c, int w); diff --git a/libavutil/version.h b/libavutil/version.h index f84ec891544ce..adbf59639cf5e 100644 --- a/libavutil/version.h +++ b/libavutil/version.h @@ -79,7 +79,7 @@ */ #define LIBAVUTIL_VERSION_MAJOR 56 -#define LIBAVUTIL_VERSION_MINOR 19 +#define LIBAVUTIL_VERSION_MINOR 23 #define LIBAVUTIL_VERSION_MICRO 101 #define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ diff --git a/libpostproc/version.h b/libpostproc/version.h index f3725483ce94e..373705679b504 100644 --- a/libpostproc/version.h +++ b/libpostproc/version.h @@ -29,7 +29,7 @@ #include "libavutil/avutil.h" #define LIBPOSTPROC_VERSION_MAJOR 55 -#define LIBPOSTPROC_VERSION_MINOR 2 +#define LIBPOSTPROC_VERSION_MINOR 4 #define LIBPOSTPROC_VERSION_MICRO 100 #define LIBPOSTPROC_VERSION_INT AV_VERSION_INT(LIBPOSTPROC_VERSION_MAJOR, \ diff --git a/libswresample/version.h b/libswresample/version.h index b8b5bee9b3c2b..c70cf812f80e3 100644 --- a/libswresample/version.h +++ b/libswresample/version.h @@ -29,7 +29,7 @@ #include "libavutil/avutil.h" #define LIBSWRESAMPLE_VERSION_MAJOR 3 -#define LIBSWRESAMPLE_VERSION_MINOR 2 +#define LIBSWRESAMPLE_VERSION_MINOR 4 #define LIBSWRESAMPLE_VERSION_MICRO 100 #define LIBSWRESAMPLE_VERSION_INT AV_VERSION_INT(LIBSWRESAMPLE_VERSION_MAJOR, \ diff --git a/libswscale/swscale_unscaled.c b/libswscale/swscale_unscaled.c index 4b3cd71e900f1..058f2b94db1ec 100644 --- a/libswscale/swscale_unscaled.c +++ b/libswscale/swscale_unscaled.c @@ -423,7 +423,7 @@ static void gray8aToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels, } } -static int packed_16bpc_bswap(SwsContext *c, const uint8_t *src[], +static int bswap_16bpc(SwsContext *c, const uint8_t *src[], int srcStride[], int srcSliceY, int srcSliceH, uint8_t *dst[], int dstStride[]) { @@ -1821,6 +1821,14 @@ static int planarCopyWrapper(SwsContext *c, const uint8_t *src[], srcPtr += srcStride[plane]; dstPtr += dstStride[plane]; } + } else if (isFloat(c->srcFormat) && isFloat(c->dstFormat) && + isBE(c->srcFormat) != isBE(c->dstFormat)) { /* swap float plane */ + for (i = 0; i < height; i++) { + for (j = 0; j < length; j++) + ((uint32_t *) dstPtr)[j] = av_bswap32(((const uint32_t *) srcPtr)[j]); + srcPtr += srcStride[plane]; + dstPtr += dstStride[plane]; + } } else if (dstStride[plane] == srcStride[plane] && srcStride[plane] > 0 && srcStride[plane] == length) { memcpy(dst[plane] + dstStride[plane] * y, src[plane], @@ -2015,7 +2023,7 @@ void ff_get_unscaled_swscale(SwsContext *c) IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV444P12) || IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV444P14) || IS_DIFFERENT_ENDIANESS(srcFormat, dstFormat, AV_PIX_FMT_YUV444P16)) - c->swscale = packed_16bpc_bswap; + c->swscale = bswap_16bpc; if (usePal(srcFormat) && isByteRGB(dstFormat)) c->swscale = palToRgbWrapper; diff --git a/libswscale/utils.c b/libswscale/utils.c index cb40164a95f35..d5913ed73386e 100644 --- a/libswscale/utils.c +++ b/libswscale/utils.c @@ -1810,8 +1810,7 @@ av_cold int sws_init_context(SwsContext *c, SwsFilter *srcFilter, /* unscaled special cases */ if (unscaled && !usesHFilter && !usesVFilter && (c->srcRange == c->dstRange || isAnyRGB(dstFormat) || - srcFormat == AV_PIX_FMT_GRAYF32 && dstFormat == AV_PIX_FMT_GRAY8 || - srcFormat == AV_PIX_FMT_GRAY8 && dstFormat == AV_PIX_FMT_GRAYF32)) { + isFloat(srcFormat) || isFloat(dstFormat))){ ff_get_unscaled_swscale(c); if (c->swscale) { diff --git a/libswscale/version.h b/libswscale/version.h index a07bd717c8e60..0e28a76e64d27 100644 --- a/libswscale/version.h +++ b/libswscale/version.h @@ -27,7 +27,7 @@ #include "libavutil/version.h" #define LIBSWSCALE_VERSION_MAJOR 5 -#define LIBSWSCALE_VERSION_MINOR 2 +#define LIBSWSCALE_VERSION_MINOR 4 #define LIBSWSCALE_VERSION_MICRO 100 #define LIBSWSCALE_VERSION_INT AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \ diff --git a/tests/api/Makefile b/tests/api/Makefile index 759dd9d243a98..b5c4ccae23527 100644 --- a/tests/api/Makefile +++ b/tests/api/Makefile @@ -1,5 +1,6 @@ APITESTPROGS-$(call ENCDEC, FLAC, FLAC) += api-flac APITESTPROGS-$(call DEMDEC, H264, H264) += api-h264 +APITESTPROGS-$(call DEMDEC, H264, H264) += api-h264-slice APITESTPROGS-yes += api-seek APITESTPROGS-yes += api-codec-param APITESTPROGS-$(call DEMDEC, H263, H263) += api-band diff --git a/tests/api/api-h264-slice-test.c b/tests/api/api-h264-slice-test.c new file mode 100644 index 0000000000000..be03e80049a5b --- /dev/null +++ b/tests/api/api-h264-slice-test.c @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2001 Fabrice Bellard + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#define MAX_SLICES 8 + +// ./fate 2 ./crew_cif out.y4m + +#include "config.h" + +#include +#include +#include +#include + +#if HAVE_UNISTD_H +#include +#endif +#if HAVE_IO_H +#include +#endif +#include +#include +#include + +#include "libavformat/network.h" +#include "libavcodec/avcodec.h" +#include "libavutil/pixdesc.h" +#include "libavutil/hash.h" + +static int header = 0; + +static int decode(AVCodecContext *dec_ctx, AVFrame *frame, + AVPacket *pkt) +{ + static uint64_t frame_cnt = 0; + int ret; + + ret = avcodec_send_packet(dec_ctx, pkt); + if (ret < 0) { + fprintf(stderr, "Error sending a packet for decoding: %s\n", av_err2str(ret)); + return ret; + } + + while (ret >= 0) { + const AVPixFmtDescriptor *desc; + char sum[AV_HASH_MAX_SIZE * 2 + 1]; + struct AVHashContext *hash; + + ret = avcodec_receive_frame(dec_ctx, frame); + if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) { + return 0; + } else if (ret < 0) { + fprintf(stderr, "Error during decoding: %s\n", av_err2str(ret)); + return ret; + } + + if (!header) { + printf( + "#format: frame checksums\n" + "#version: 2\n" + "#hash: MD5\n" + "#tb 0: 1/30\n" + "#media_type 0: video\n" + "#codec_id 0: rawvideo\n" + "#dimensions 0: 352x288\n" + "#sar 0: 128/117\n" + "#stream#, dts, pts, duration, size, hash\n"); + header = 1; + } + desc = av_pix_fmt_desc_get(dec_ctx->pix_fmt); + if ((ret = av_hash_alloc(&hash, "md5")) < 0) { + return ret; + } + av_hash_init(hash); + + for (int i = 0; i < frame->height; i++) + av_hash_update(hash, &frame->data[0][i * frame->linesize[0]], frame->width); + for (int i = 0; i < frame->height >> desc->log2_chroma_h; i++) + av_hash_update(hash, &frame->data[1][i * frame->linesize[1]], frame->width >> desc->log2_chroma_w); + for (int i = 0; i < frame->height >> desc->log2_chroma_h; i++) + av_hash_update(hash, &frame->data[2][i * frame->linesize[2]], frame->width >> desc->log2_chroma_w); + + av_hash_final_hex(hash, sum, av_hash_get_size(hash) * 2 + 1); + printf("0, %10"PRId64", %10"PRId64", 1, %8d, %s\n", + frame_cnt, frame_cnt, + (frame->width * frame->height + 2 * (frame->height >> desc->log2_chroma_h) * (frame->width >> desc->log2_chroma_w)), sum); + frame_cnt += 1; + av_hash_freep(&hash); + } + return 0; +} + +int main(int argc, char **argv) +{ + const AVCodec *codec = NULL; + AVCodecContext *c = NULL; + AVFrame *frame = NULL; + unsigned int threads; + AVPacket *pkt; + FILE *file = NULL; + char nal[MAX_SLICES * UINT16_MAX + AV_INPUT_BUFFER_PADDING_SIZE]; + int nals = 0, ret = 0; + char *p = nal; + + if (argc < 4) { + fprintf(stderr, "Usage: %s \n", argv[0]); + return -1; + } + + if (!(threads = strtoul(argv[1], NULL, 0))) + threads = 1; + else if (threads > MAX_SLICES) + threads = MAX_SLICES; + +#ifdef _WIN32 + setmode(fileno(stdout), O_BINARY); +#endif + + if (!(pkt = av_packet_alloc())) { + return -1; + } + + if (!(codec = avcodec_find_decoder(AV_CODEC_ID_H264))) { + fprintf(stderr, "Codec not found\n"); + ret = -1; + goto err; + } + + if (!(c = avcodec_alloc_context3(codec))) { + fprintf(stderr, "Could not allocate video codec context\n"); + ret = -1; + goto err; + } + + c->width = 352; + c->height = 288; + + c->flags2 |= AV_CODEC_FLAG2_CHUNKS; + c->thread_type = FF_THREAD_SLICE; + c->thread_count = threads; + + if ((ret = avcodec_open2(c, codec, NULL)) < 0) { + fprintf(stderr, "Could not open codec\n"); + goto err; + } + +#if HAVE_THREADS + if (c->active_thread_type != FF_THREAD_SLICE) { + fprintf(stderr, "Couldn't activate slice threading: %d\n", c->active_thread_type); + ret = -1; + goto err; + } +#else + fprintf(stderr, "WARN: not using threads, only checking decoding slice NALUs\n"); +#endif + + if (!(frame = av_frame_alloc())) { + fprintf(stderr, "Could not allocate video frame\n"); + ret = -1; + goto err; + } + + if (!(file = fopen(argv[2], "rb"))) { + fprintf(stderr, "Couldn't open NALU file: %s\n", argv[2]); + ret = -1; + goto err; + } + + while(1) { + uint16_t size = 0; + size_t ret = fread(&size, 1, sizeof(uint16_t), file); + if (ret != sizeof(uint16_t)) + break; + + size = ntohs(size); + ret = fread(p, 1, size, file); + if (ret != size) { + perror("Couldn't read data"); + goto err; + } + p += ret; + + if (++nals >= threads) { + int decret = 0; + pkt->data = nal; + pkt->size = p - nal; + if ((decret = decode(c, frame, pkt)) < 0) { + goto err; + } + memset(nal, 0, MAX_SLICES * UINT16_MAX + AV_INPUT_BUFFER_PADDING_SIZE); + nals = 0; + p = nal; + } + } + + if (nals) { + pkt->data = nal; + pkt->size = p - nal; + if ((ret = decode(c, frame, pkt)) < 0) { + goto err; + } + } + + ret = decode(c, frame, NULL); + +err: + if (file) + fclose(file); + av_frame_free(&frame); + avcodec_free_context(&c); + av_packet_free(&pkt); + + return ret; +} diff --git a/tests/fate/api.mak b/tests/fate/api.mak index eb656e68a3867..132c38f2c213e 100644 --- a/tests/fate/api.mak +++ b/tests/fate/api.mak @@ -12,6 +12,10 @@ FATE_API_SAMPLES_LIBAVFORMAT-$(call DEMDEC, H264, H264) += fate-api-h264 fate-api-h264: $(APITESTSDIR)/api-h264-test$(EXESUF) fate-api-h264: CMD = run $(APITESTSDIR)/api-h264-test $(TARGET_SAMPLES)/h264-conformance/SVA_NL2_E.264 +FATE_API_SAMPLES_LIBAVFORMAT-$(call DEMDEC, H264, H264) += fate-api-h264-slice +fate-api-h264-slice: $(APITESTSDIR)/api-h264-slice-test$(EXESUF) +fate-api-h264-slice: CMD = run $(APITESTSDIR)/api-h264-slice-test 2 $(TARGET_SAMPLES)/h264/crew_cif.nal api-h264-slice.h264 + FATE_API_LIBAVFORMAT-$(call DEMDEC, FLV, FLV) += fate-api-seek fate-api-seek: $(APITESTSDIR)/api-seek-test$(EXESUF) fate-lavf-flv_fmt fate-api-seek: CMD = run $(APITESTSDIR)/api-seek-test $(TARGET_PATH)/tests/data/lavf/lavf.flv 0 720 diff --git a/tests/fate/h264.mak b/tests/fate/h264.mak index 1839b9b44eb0b..f14b46c6e05a4 100644 --- a/tests/fate/h264.mak +++ b/tests/fate/h264.mak @@ -196,6 +196,7 @@ FATE_H264 := $(FATE_H264:%=fate-h264-conformance-%) \ fate-h264-3386 \ fate-h264-missing-frame \ fate-h264-ref-pic-mod-overflow \ + fate-h264-timecode FATE_H264-$(call DEMDEC, H264, H264) += $(FATE_H264) FATE_H264-$(call DEMDEC, MOV, H264) += fate-h264-crop-to-container @@ -440,6 +441,7 @@ fate-h264-twofields-packet: CMD = framecrc -i $(TARGET_SAM fate-h264-unescaped-extradata: CMD = framecrc -i $(TARGET_SAMPLES)/h264/unescaped_extradata.mp4 -an -frames 10 fate-h264-3386: CMD = framecrc -i $(TARGET_SAMPLES)/h264/bbc2.sample.h264 fate-h264-missing-frame: CMD = framecrc -i $(TARGET_SAMPLES)/h264/nondeterministic_cut.h264 +fate-h264-timecode: CMD = framecrc -i $(TARGET_SAMPLES)/h264/crew_cif_timecode-2.h264 fate-h264-reinit-%: CMD = framecrc -i $(TARGET_SAMPLES)/h264/$(@:fate-h264-%=%).h264 -vf format=yuv444p10le,scale=w=352:h=288 diff --git a/tests/ref/fate/api-h264-slice b/tests/ref/fate/api-h264-slice new file mode 100644 index 0000000000000..1d463d30ac021 --- /dev/null +++ b/tests/ref/fate/api-h264-slice @@ -0,0 +1,309 @@ +#format: frame checksums +#version: 2 +#hash: MD5 +#tb 0: 1/30 +#media_type 0: video +#codec_id 0: rawvideo +#dimensions 0: 352x288 +#sar 0: 128/117 +#stream#, dts, pts, duration, size, hash +0, 0, 0, 1, 152064, 28a2f99d62b553403fcffc1f680d5403 +0, 1, 1, 1, 152064, cd95f40841e08160ace0d64506f8adbf +0, 2, 2, 1, 152064, 32f37a1b3ddc2b8b0f6283f0c403a976 +0, 3, 3, 1, 152064, 643c0b0702072038578ef5ae2000c1a0 +0, 4, 4, 1, 152064, 8d9c9660705f7533e7f49f11693aedf9 +0, 5, 5, 1, 152064, 66a794f8a116c055451091e0e4cd911e +0, 6, 6, 1, 152064, 8ad529648796ae6da279de0b7ca34f72 +0, 7, 7, 1, 152064, 898ad4170eb740d713de254eb4bfe255 +0, 8, 8, 1, 152064, f96cfc1f00df10003e144529a5fae6c6 +0, 9, 9, 1, 152064, 0351a3b68dc87ba5963624e1461788da +0, 10, 10, 1, 152064, 6718e4086a0039584bcc37dcd4be6a67 +0, 11, 11, 1, 152064, fb4fec78d9434b9579b31f8ad0472762 +0, 12, 12, 1, 152064, ec2dcc547d84e15383dcee8462bb9d0c +0, 13, 13, 1, 152064, ea62711bf59b4d1d56cb9dbcb68a8eda +0, 14, 14, 1, 152064, 75b1cb899a9d9e695106f187c20b91f8 +0, 15, 15, 1, 152064, 44a13e4235c2ed3692af5ef698efe4d3 +0, 16, 16, 1, 152064, 6d5f1249d96573782fa95e9228d1ad0a +0, 17, 17, 1, 152064, fce8503dd9472fc7932ffbe21425d45a +0, 18, 18, 1, 152064, e93489a6b4c38d611493a3721aa994d7 +0, 19, 19, 1, 152064, 04580677a663ddba1b747c2ab0d598e6 +0, 20, 20, 1, 152064, a28cceb666c92eaecc3da3e092b83715 +0, 21, 21, 1, 152064, ba9ce0f84fb16c27453666265ad54e35 +0, 22, 22, 1, 152064, 946e014822ab2b45c52d3e08e7db97c2 +0, 23, 23, 1, 152064, b7a40ebb6ac72b322ccfca2568fa521a +0, 24, 24, 1, 152064, cb5a0564af00a00496950ad705a160ce +0, 25, 25, 1, 152064, dbad9e8e79c04b1df497884ec28e3a59 +0, 26, 26, 1, 152064, 3c748cdc0e6ec79ca72482e22f0c3ef8 +0, 27, 27, 1, 152064, 1740911da2ebbdc729cbbea0df466c44 +0, 28, 28, 1, 152064, 3b322e03fcc16d6a0dea651634ce0b40 +0, 29, 29, 1, 152064, a7fca405425015b85cb58ec1aece575b +0, 30, 30, 1, 152064, 2004eec0c923f1855b4b918e6dcb5e02 +0, 31, 31, 1, 152064, 20542f58f1622f58f3cbf4b2bf0b772f +0, 32, 32, 1, 152064, 8872f5cb900ed8d317f3abe50a147934 +0, 33, 33, 1, 152064, 774ba43dc0cd7932099e3e0633e25721 +0, 34, 34, 1, 152064, 4f2d9b7e6d115bd103ccd9945f85582f +0, 35, 35, 1, 152064, f53542ca7f6d5ec462770ffff3f4bfd0 +0, 36, 36, 1, 152064, 43b5f8c4e6dc3dc1acc903687bc90293 +0, 37, 37, 1, 152064, aa7d265ab285ded777a970debe6a08d5 +0, 38, 38, 1, 152064, 818ae082b3dd9557e04710d7cfd700be +0, 39, 39, 1, 152064, 44cfe472ccedf8a44d0b90c97932caae +0, 40, 40, 1, 152064, 5d2756c81c90bb10484e2e892fce0e0c +0, 41, 41, 1, 152064, c1b254a4b66dc9d769e10316976f5538 +0, 42, 42, 1, 152064, 03808a3f7b01293dbe6089b33e3dc103 +0, 43, 43, 1, 152064, 8c689a6143a8a89415d2e645bb0fe925 +0, 44, 44, 1, 152064, 24268cac7d78eefd8e247ec599e60b4f +0, 45, 45, 1, 152064, ac3195c57a3ebe3871992acfc3182e2f +0, 46, 46, 1, 152064, 8730a99fb5a2573475f61d4e7998ba44 +0, 47, 47, 1, 152064, 651042c34273096db82879fcdd91310d +0, 48, 48, 1, 152064, 972b47241098a9b6471ba6c8ccc2b83b +0, 49, 49, 1, 152064, e8e53022355e6bf7ac50d53bcb1bdc92 +0, 50, 50, 1, 152064, e5b003b04a88e0d60d446eb5b600cc66 +0, 51, 51, 1, 152064, 1c2317c071a33b0b465bbcea04411ddf +0, 52, 52, 1, 152064, 7bd53f4e852370aaeb6b0d042d24b94a +0, 53, 53, 1, 152064, 6ef30966b0d9c0d92b2350e2f45197a8 +0, 54, 54, 1, 152064, 354e2afff0d056193d6c4c2667c638d3 +0, 55, 55, 1, 152064, e67767e97f44c3ef80ea4acee41af0ff +0, 56, 56, 1, 152064, 32589395bf7d07c1c6df644d43b5bbba +0, 57, 57, 1, 152064, 2f2b56210b87142fa3620cce5c56af02 +0, 58, 58, 1, 152064, 599781773ace555c82ac591cc2acf8ec +0, 59, 59, 1, 152064, 2465cf6313dab6bda9171993ff6168de +0, 60, 60, 1, 152064, 54cc6c8a9b3fd95b9700d319c4a69297 +0, 61, 61, 1, 152064, 9e813429ebf7ee4e11fcd4976974fea0 +0, 62, 62, 1, 152064, fac0303897b4d1bd1a202fe0a7d1c6f7 +0, 63, 63, 1, 152064, accb382b99f2d27cefbc9f7ea315f80c +0, 64, 64, 1, 152064, b88711feaee9f7f84da34028b7e2cc81 +0, 65, 65, 1, 152064, 80549aebdcc5629dfc3bee8112536bac +0, 66, 66, 1, 152064, 9c63aa480b5d9937d839b809ba67eee2 +0, 67, 67, 1, 152064, 5e5a729c45a995ba2a97083fca69c9e9 +0, 68, 68, 1, 152064, 6b59c5d4460d78fa337b94b080e72215 +0, 69, 69, 1, 152064, 166d675f774f4f74dbce7e2728afd16c +0, 70, 70, 1, 152064, 3051629ff9281ea8879bce0ed62c1e71 +0, 71, 71, 1, 152064, c2ff4493434ca4fea45c724c10bcbe55 +0, 72, 72, 1, 152064, 907274f16ebeb7c09de8bc124c1fe586 +0, 73, 73, 1, 152064, 88efefc9fc00d88fbce20f4d74a55d25 +0, 74, 74, 1, 152064, 357bdbcb828c088b748022df9e47b9c7 +0, 75, 75, 1, 152064, 4550087923c1d4195ab536899a99d429 +0, 76, 76, 1, 152064, 2b07777d9109577eaedc87321dd3ff69 +0, 77, 77, 1, 152064, 4a377c552c62cba06d5285aa8478a540 +0, 78, 78, 1, 152064, a0e893c028e106c2394f0578f00ae88a +0, 79, 79, 1, 152064, c0db0e2ee3768d2e4e71581a2be30707 +0, 80, 80, 1, 152064, 954e0cd38b00ea2181b8c322511536f0 +0, 81, 81, 1, 152064, 53ff687670a2490ac3f94405014251b8 +0, 82, 82, 1, 152064, 64a588b7adbc560ec4ad6823c37e41c6 +0, 83, 83, 1, 152064, 60d65e899976214cd3b2b5abb10f2b83 +0, 84, 84, 1, 152064, 1bf0efef4b204a72d05b21c18a569585 +0, 85, 85, 1, 152064, 65d814be6b698ab1185f082c9c9d7de3 +0, 86, 86, 1, 152064, de26e7e663aaeb642e0a94776c5bf22c +0, 87, 87, 1, 152064, f7b0b259ccf21e59fcacd95945a013c3 +0, 88, 88, 1, 152064, 7f3185bb4dc3368733bd29c9aa9e08eb +0, 89, 89, 1, 152064, 3cedc14798d145fcdc8b8a3082de3b88 +0, 90, 90, 1, 152064, c5792622ca4a04d21e1f2c2b2ff692fe +0, 91, 91, 1, 152064, d13199fa94e53643902a8a26e33c9862 +0, 92, 92, 1, 152064, b380359e836896d7698a8dadfe6d6fdc +0, 93, 93, 1, 152064, 4c7c5f1f093f7bcaddf06068b9b1d2e5 +0, 94, 94, 1, 152064, 73f33e1eedea9aa5e6c3f2b636dd2c23 +0, 95, 95, 1, 152064, f0d2aad6477ace945f87b25cb44f3ff0 +0, 96, 96, 1, 152064, d90bddd7c2279bbd0266a26915202712 +0, 97, 97, 1, 152064, 0b377a48dea8fc2702395796808af63f +0, 98, 98, 1, 152064, ea0099179e806a9680f019446e39d125 +0, 99, 99, 1, 152064, a77dd3069c54b255e45b261f31be80d2 +0, 100, 100, 1, 152064, d362dcbe415329e713ff6ef9e6447d87 +0, 101, 101, 1, 152064, 15441bcb307ac24766ceba8db42f9413 +0, 102, 102, 1, 152064, 79b953e72d11d3fa6d6974e4b8b13392 +0, 103, 103, 1, 152064, ec8c35c829fac56ca8ae2f0160ae5d7f +0, 104, 104, 1, 152064, c104f8f1d17629b0449f4a2af2e40f73 +0, 105, 105, 1, 152064, 4661c4b3c2b1a03a8e23e7e88e974f22 +0, 106, 106, 1, 152064, 7cb48bae9841f67294b2e25a73d46a8e +0, 107, 107, 1, 152064, bddcb2c64a4257760f50714ec8c49243 +0, 108, 108, 1, 152064, c2123750802357c25c352f09bd1b1de2 +0, 109, 109, 1, 152064, 6eb5af4f3ad69cc88e0c08f6aa9bb034 +0, 110, 110, 1, 152064, 063991a633a051d6889f0fff41059e5f +0, 111, 111, 1, 152064, fa736839a01ad04fe08d437c7fa60a2d +0, 112, 112, 1, 152064, 85a43397c5a1defe15b61464c8d1457a +0, 113, 113, 1, 152064, da50c437613be59486012b69c7953f63 +0, 114, 114, 1, 152064, eb32e24757a98192928324d3a389a3dc +0, 115, 115, 1, 152064, 1bf511fb8245e3be71ebefdcf506575d +0, 116, 116, 1, 152064, 4479c195c4cd4111afe561a07c0f626d +0, 117, 117, 1, 152064, 0b1815f0c28bb55aae515a5dc3a34f3b +0, 118, 118, 1, 152064, 300d3c32442bd554384b3c804dd519ad +0, 119, 119, 1, 152064, 197df868e0488b8b12c0b42d8c4b2aec +0, 120, 120, 1, 152064, 03bce34c3214e0144a0928b9b9acc8e8 +0, 121, 121, 1, 152064, ba73a879b8fca5db4a354075b26ccb6a +0, 122, 122, 1, 152064, b1c34c6d2535bf1e7af3a6936d1627df +0, 123, 123, 1, 152064, 77d162995974428c5c7766ee5627eac1 +0, 124, 124, 1, 152064, fa4c70aa68850bcae2579046557c0b5f +0, 125, 125, 1, 152064, 63ce618e67f380000030c97db78ac4ae +0, 126, 126, 1, 152064, 7e32538d501127faf058792e83fbbe43 +0, 127, 127, 1, 152064, 61bc1d685553a97a7c3b0cbb3790faad +0, 128, 128, 1, 152064, 57f3b97e4a80ded30b9e8f12cfc8ff44 +0, 129, 129, 1, 152064, 31db51a64307ca6f1db866a01befa301 +0, 130, 130, 1, 152064, 59924d342068caf1ad7329b947279e2d +0, 131, 131, 1, 152064, 2f0f9dd3056cac40c17684bcccdf267d +0, 132, 132, 1, 152064, b00df17142f99bdc077cb2e4c5c8b747 +0, 133, 133, 1, 152064, e7c40734dea5433038b975666be7b21e +0, 134, 134, 1, 152064, 51d77965d3a9d431a2c409244c9bc465 +0, 135, 135, 1, 152064, 15b54bdc5e2098fe7c01ce689babe08b +0, 136, 136, 1, 152064, 3fa3284ae3f714ea197ad614bff7c5c5 +0, 137, 137, 1, 152064, c6512a19b7b1b29c05c7b825b41ab930 +0, 138, 138, 1, 152064, b13c8bc436186d47595dc69186f1f927 +0, 139, 139, 1, 152064, d5eff490784883a93dd3aaea08c46d5b +0, 140, 140, 1, 152064, a005ac77851ea3a44e261d9067ee835f +0, 141, 141, 1, 152064, 6706b74dc10c72f27e9f6341710e56ac +0, 142, 142, 1, 152064, 46479f86f53f55d2094354eb9bed78df +0, 143, 143, 1, 152064, 17f5cd040eb766ece29d1c1e283e9c20 +0, 144, 144, 1, 152064, 4f34c43eeeac2c751aac486ba42d9b9a +0, 145, 145, 1, 152064, 24c16b9d01c316305686af1a12f7be49 +0, 146, 146, 1, 152064, 9ae9b1f109fa3d02f226fefdaf395be6 +0, 147, 147, 1, 152064, eb98c1c6e473d8b53069206ffc69a9cb +0, 148, 148, 1, 152064, f0768d9cb981d277b70d1b3b928f4915 +0, 149, 149, 1, 152064, c1a5cef2bdb3f3b932a051c29d31f889 +0, 150, 150, 1, 152064, 8f75fb3a6f994b90999f8b0c664ad7c4 +0, 151, 151, 1, 152064, 3a778c9c86afaf03f2e60668d849e25b +0, 152, 152, 1, 152064, 4c3dd11965a2cf55790088a99289981a +0, 153, 153, 1, 152064, 763f810845e6f4e798a6edb6633f5506 +0, 154, 154, 1, 152064, 6b305b9d79151c1644c924d522091eea +0, 155, 155, 1, 152064, e981ce0e01f24eca2e89c7c81480fb07 +0, 156, 156, 1, 152064, 91349f36d44383dc1cd72f0a3f9c76ed +0, 157, 157, 1, 152064, 9a67f029ed2370983ff3e24d8c2c65d2 +0, 158, 158, 1, 152064, cf5717cb593fbafad6abf8bdb7ca2737 +0, 159, 159, 1, 152064, 7ece8c2497ca72e4f8e9eb69048391f8 +0, 160, 160, 1, 152064, 9dccce22ca32a7ec8890f77e4de1fa42 +0, 161, 161, 1, 152064, f418dc75e266c47ba84275741f0635cb +0, 162, 162, 1, 152064, aeddab213baab78ed0c44abb7409e291 +0, 163, 163, 1, 152064, a0b5e3c0616105580a310529ed71d072 +0, 164, 164, 1, 152064, e0e96da8724b472868634b6b145ebb2e +0, 165, 165, 1, 152064, bdaaf9623f5d329c8706e4850db0beea +0, 166, 166, 1, 152064, 6566ddd82da9096458e039caa7d56674 +0, 167, 167, 1, 152064, b882cb5f1c6059d338273e8fdb18e41e +0, 168, 168, 1, 152064, f9723e59ce02828e64c16d32216441b2 +0, 169, 169, 1, 152064, 98b5a843bf125eeae0240bde40016d6a +0, 170, 170, 1, 152064, 8958b81f8a028928c4b9a7024a4eebff +0, 171, 171, 1, 152064, 25a8acfdd14a472a8090d41626472070 +0, 172, 172, 1, 152064, 6faf859c0b264b6d76e0823c6045cebd +0, 173, 173, 1, 152064, 0774a3470360c37ede375d19aebe1844 +0, 174, 174, 1, 152064, 5dd921d4f05976fb6bbf5cc6996254e0 +0, 175, 175, 1, 152064, d03d789e3c439420a07e3e919ddd1cf0 +0, 176, 176, 1, 152064, 1fad139023f7d7022f8f65a6e31f68a9 +0, 177, 177, 1, 152064, 0c706070d649da054eeaf686d2e14a1d +0, 178, 178, 1, 152064, 51e4156b19bdc55e993d1956473827e3 +0, 179, 179, 1, 152064, e447458fd86c022852cedf56dc58f34f +0, 180, 180, 1, 152064, 59732caeb824f052044b4434ef564227 +0, 181, 181, 1, 152064, cf5ccf671ddc89e1f430878afb86fced +0, 182, 182, 1, 152064, e3e98f92e4cf8f0ccce27482407ebbf1 +0, 183, 183, 1, 152064, 089d236d04d1918b319524e3002d21c8 +0, 184, 184, 1, 152064, 7063afc35aa2c24b1e3dc781bb612af1 +0, 185, 185, 1, 152064, 902e5153028215ac60bf0f998673e3ca +0, 186, 186, 1, 152064, 2360fb2ed2b0e7c37a318fb7f9df7550 +0, 187, 187, 1, 152064, be0788a6a06906f57f7ad1e0e4c0aba7 +0, 188, 188, 1, 152064, db90ee89bbeefcd54b79f022ed9d62d9 +0, 189, 189, 1, 152064, 7237b5c1e6f182805d4e324e636f2a45 +0, 190, 190, 1, 152064, e5da5c0643e457087f54935cfa50f7c0 +0, 191, 191, 1, 152064, 89b5d462accdc4cfaed1e57de4589f39 +0, 192, 192, 1, 152064, b670710e2f897f20d83c42bcd0ee7d85 +0, 193, 193, 1, 152064, 9c7ceba12895f2a670e4a1498d28951c +0, 194, 194, 1, 152064, 4b426b0719a67bc228e1928e83b47b53 +0, 195, 195, 1, 152064, b2c646cd4d3b100619fd6e626ea8b3cb +0, 196, 196, 1, 152064, ad9abc825e1b87ec0defb1df032364e6 +0, 197, 197, 1, 152064, 21423e23c708f43a9d615bc2bc700d97 +0, 198, 198, 1, 152064, 14a42211968cd4b8416ebc0285eb02b3 +0, 199, 199, 1, 152064, a45eb0c4f6a9c5beeb90a292be71461e +0, 200, 200, 1, 152064, f9bfba991f0a0ea6bbfdde5d23bd8785 +0, 201, 201, 1, 152064, 49d33752288ddef86dc702652f688c75 +0, 202, 202, 1, 152064, 97b50290b4a1e2f31c704cc302fe89d8 +0, 203, 203, 1, 152064, c3006dcc89d2f45379c840c7dd5f7559 +0, 204, 204, 1, 152064, 4a861c22e63478ffe73571909da9a15f +0, 205, 205, 1, 152064, e7a8bff496059d3cd40470920fb26c75 +0, 206, 206, 1, 152064, 989d818e0d7d8eea14da209c37ad3e0b +0, 207, 207, 1, 152064, 1732c746805ca221c85fb5570911378d +0, 208, 208, 1, 152064, 60ece5f795f5756bef34ba84fb6fec2a +0, 209, 209, 1, 152064, 9fd355648ef40dd0e15c81554b111637 +0, 210, 210, 1, 152064, 2a3b9220b98ea648e395ab9ea12023d2 +0, 211, 211, 1, 152064, eea2a06e68196917ba2a754563953cd5 +0, 212, 212, 1, 152064, 3c2ec831a9802a968654df1bee81ca40 +0, 213, 213, 1, 152064, 590abeedce1cfa9df8a00d7ab9cf2c8e +0, 214, 214, 1, 152064, bc07f89391568a78854f99ad9fd62c49 +0, 215, 215, 1, 152064, 0bd866450376be96a85690595d96d909 +0, 216, 216, 1, 152064, 33483531a4d760bdc30a77d5de49aff7 +0, 217, 217, 1, 152064, b0294c6e784fa3f15532331063c5036f +0, 218, 218, 1, 152064, f4f3ba2781b2a9be3c2dd5b4c445e0d9 +0, 219, 219, 1, 152064, 8550626512e0602a1c53bfb8c51427d8 +0, 220, 220, 1, 152064, 0c2d0229196825910e5f487c33b45ef3 +0, 221, 221, 1, 152064, 93dbbed468f0012b921aa0b2b6751a70 +0, 222, 222, 1, 152064, 2f0d99dc6d4b5c65bc18946b1e6cdc4c +0, 223, 223, 1, 152064, fb25cbe655fc050bbcbfe9cc3fa06ffe +0, 224, 224, 1, 152064, 376d3f894957b3bac2308f2662ad5c82 +0, 225, 225, 1, 152064, 46b5c54ea38987b9e3d371a64d06300d +0, 226, 226, 1, 152064, 9bd24bc1a94aed633ff63aac5b720269 +0, 227, 227, 1, 152064, df0bb3f7724048f67c4a60a1dbb3d5e6 +0, 228, 228, 1, 152064, a9d1c8b8007ea61c0ab2f97b3cfc2aea +0, 229, 229, 1, 152064, fd5a4ccab51773b09edca30e665999e8 +0, 230, 230, 1, 152064, 0eaf8218244c9b2e78660cf923755557 +0, 231, 231, 1, 152064, 40f4fc64016fd148b14aea2da7652125 +0, 232, 232, 1, 152064, 6f075b312e9f7e1b4c3343441a9e1f7f +0, 233, 233, 1, 152064, 93f7523632abfe91fa701208aafdc29a +0, 234, 234, 1, 152064, 3c3ea7aa12a89df2309b76c22053b0ff +0, 235, 235, 1, 152064, 2181a1aec4278efa70dec025878d88c0 +0, 236, 236, 1, 152064, 35dffda6543fdf43ad182484564abda8 +0, 237, 237, 1, 152064, bf2b65551a8fcf3b1b4185e0ebfca2a7 +0, 238, 238, 1, 152064, 49fd2dd18ddbb7f005c3705910bff99f +0, 239, 239, 1, 152064, 9f6826599ebd45a1159e46d293fc8f7b +0, 240, 240, 1, 152064, 5b88b8ec1da51a165e2741f8a6b710ad +0, 241, 241, 1, 152064, a81229c0d464cc8d376f8b0153b50fc2 +0, 242, 242, 1, 152064, 07ef482c1c9967700a6cef5cdd010384 +0, 243, 243, 1, 152064, d4ebe4de6e096f7cccd5ae2be856e983 +0, 244, 244, 1, 152064, 6daf25ffb2c2baf02e483e84733fc37b +0, 245, 245, 1, 152064, d52f485c747e945bfe34aeeaaec4fe78 +0, 246, 246, 1, 152064, 408e5b502af7a10454af6f388e2722be +0, 247, 247, 1, 152064, 684d285dc9c08791ce16e02a1f65e22b +0, 248, 248, 1, 152064, 5de9b8f8678c6b7a1ff04f217ef8c0c3 +0, 249, 249, 1, 152064, b60f9e37dcfc3924adcfc96d08fb2656 +0, 250, 250, 1, 152064, 8975d551bb7c01cb520b5694e73d1809 +0, 251, 251, 1, 152064, af55f9897a3fa51eacdcebf3a21f5fe5 +0, 252, 252, 1, 152064, 10c21c5167cba09ce54f361e88e6e3c9 +0, 253, 253, 1, 152064, 8cb92c4a8d32fe00a92c5bd4a163cc45 +0, 254, 254, 1, 152064, 3d39fd1222c8421f0eed3c8249c3d772 +0, 255, 255, 1, 152064, 43c5629af47dc4fd659bffe481e84999 +0, 256, 256, 1, 152064, ad6d5a0f4d2d2738809b7f610f6da823 +0, 257, 257, 1, 152064, d2f0dbca68098d58468e94b84ef0fb8b +0, 258, 258, 1, 152064, 247487ae60500313df92dd0175ac4e0f +0, 259, 259, 1, 152064, cfbbabb4b8c93c87c221f76a155bb0fc +0, 260, 260, 1, 152064, c708254a644abc41788d717dd59b8baf +0, 261, 261, 1, 152064, fa710d87bddd1a65970c5618a8a0158f +0, 262, 262, 1, 152064, 31210937c8a67c6aafda2e03226b9770 +0, 263, 263, 1, 152064, ac518a56fc537de251f3d28d380e25cb +0, 264, 264, 1, 152064, afcb7642c336bcef9b605a40e518d305 +0, 265, 265, 1, 152064, 15fd29e16aaebae6f74e49455631c1f8 +0, 266, 266, 1, 152064, 938b90999b05595e9875c6d4f9836407 +0, 267, 267, 1, 152064, 2fe744b939902a5f4bb69e9243c55d08 +0, 268, 268, 1, 152064, a902057edac1638a1cd218fe5b88bfc2 +0, 269, 269, 1, 152064, 78087115b9600b5499866c127d175c0f +0, 270, 270, 1, 152064, 877c729e2d2b599dd6cac1f59f12e068 +0, 271, 271, 1, 152064, 77e6b4b761902fbe27fb0ff9eb6d02ac +0, 272, 272, 1, 152064, dd3ee373cb4935eca46947aedda3b991 +0, 273, 273, 1, 152064, b3ee6b4a18f6d20f9b9fd8dc9e8af90e +0, 274, 274, 1, 152064, 492afb7421667468fa95017c693ec47b +0, 275, 275, 1, 152064, 9abb912d8101de895b8f482c199934c2 +0, 276, 276, 1, 152064, 08ca372dfb5e90382f1b58345a0e51b1 +0, 277, 277, 1, 152064, 805559cb3f3385e7865df692336dba29 +0, 278, 278, 1, 152064, c5cc85e4d44010e048fd2013535d7180 +0, 279, 279, 1, 152064, ef9a05a7a4e0b5beff9a8119af44ebc7 +0, 280, 280, 1, 152064, e6983be0a0c1705cfede1e7476aad381 +0, 281, 281, 1, 152064, a4bb0c3d4deb17784b07d3713db05302 +0, 282, 282, 1, 152064, 0fd5bb9259e8c27aba7670b08cd9a26b +0, 283, 283, 1, 152064, 43d6df9fd672b13e2c59db924e9fe30b +0, 284, 284, 1, 152064, 3aaf3b87705c46495c9d1b9f4ea706bf +0, 285, 285, 1, 152064, 0d2ba631f5c716d9c5e5b2a75d3b6433 +0, 286, 286, 1, 152064, bf29cc016dce85e621aaa7647fae1544 +0, 287, 287, 1, 152064, 3374284a808d79e9be32bf3610b0fd17 +0, 288, 288, 1, 152064, ea3f305e76009f3bf2cd5014d339eafa +0, 289, 289, 1, 152064, 95ce7320a841a71b5a8871cef385ce41 +0, 290, 290, 1, 152064, 88613d96dbda681edab4ed41c3f08536 +0, 291, 291, 1, 152064, b9e9e9045b91c4f7917274088de64a5e +0, 292, 292, 1, 152064, e0b90055449e7403289a8dda9c02add0 +0, 293, 293, 1, 152064, 367ee1603fa7778dad3e99be8db779ee +0, 294, 294, 1, 152064, 6bb0eaa6140d673b452eee6ac6c262c2 +0, 295, 295, 1, 152064, 9af4ef919ae61e1597db1b9acd6af95a +0, 296, 296, 1, 152064, e8f29872e86e54ac26b5fb0a20f10d3e +0, 297, 297, 1, 152064, 09aaad95cd7d173bfe609b79440cbfc8 +0, 298, 298, 1, 152064, c03abe502be10f76e33d93e1c40cc674 +0, 299, 299, 1, 152064, 3e7e315be8aef281714a63f4cf086085 diff --git a/tests/ref/fate/filter-pixdesc-grayf32be b/tests/ref/fate/filter-pixdesc-grayf32be index 423bbfbebc76d..171475483adac 100644 --- a/tests/ref/fate/filter-pixdesc-grayf32be +++ b/tests/ref/fate/filter-pixdesc-grayf32be @@ -1 +1 @@ -pixdesc-grayf32be 381c8d0f19d286809b91cd6e6c0048ab +pixdesc-grayf32be 9b23c74e8e8ffae5d7c7e82bbf5929da diff --git a/tests/ref/fate/filter-pixdesc-grayf32le b/tests/ref/fate/filter-pixdesc-grayf32le index a76e0a995e231..d598d123b4edf 100644 --- a/tests/ref/fate/filter-pixdesc-grayf32le +++ b/tests/ref/fate/filter-pixdesc-grayf32le @@ -1 +1 @@ -pixdesc-grayf32le 381c8d0f19d286809b91cd6e6c0048ab +pixdesc-grayf32le 291f074a24c44799a1f437d1c55556f1 diff --git a/tests/ref/fate/h264-timecode b/tests/ref/fate/h264-timecode new file mode 100644 index 0000000000000..b78f700c6d298 --- /dev/null +++ b/tests/ref/fate/h264-timecode @@ -0,0 +1,305 @@ +#tb 0: 1/30 +#media_type 0: video +#codec_id 0: rawvideo +#dimensions 0: 352x288 +#sar 0: 128/117 +0, 0, 0, 1, 152064, 0x70684c80 +0, 1, 1, 1, 152064, 0xb5c8b300 +0, 2, 2, 1, 152064, 0x5777ac60 +0, 3, 3, 1, 152064, 0xb27646a5 +0, 4, 4, 1, 152064, 0x20bd98ec +0, 5, 5, 1, 152064, 0xcf5ac1b0 +0, 6, 6, 1, 152064, 0x85a42952 +0, 7, 7, 1, 152064, 0xc25aa530 +0, 8, 8, 1, 152064, 0x97b14be9 +0, 9, 9, 1, 152064, 0xf67ec91a +0, 10, 10, 1, 152064, 0x3890d6a3 +0, 11, 11, 1, 152064, 0xc52c8467 +0, 12, 12, 1, 152064, 0x30a7af36 +0, 13, 13, 1, 152064, 0x27528a98 +0, 14, 14, 1, 152064, 0x245c08c5 +0, 15, 15, 1, 152064, 0x7e0220f3 +0, 16, 16, 1, 152064, 0x4b254c89 +0, 17, 17, 1, 152064, 0x1586e3e5 +0, 18, 18, 1, 152064, 0x594dfc58 +0, 19, 19, 1, 152064, 0x85ba9c8e +0, 20, 20, 1, 152064, 0x1e235100 +0, 21, 21, 1, 152064, 0xa02c6a72 +0, 22, 22, 1, 152064, 0xd1166fb6 +0, 23, 23, 1, 152064, 0xcc9b1546 +0, 24, 24, 1, 152064, 0x55e35a35 +0, 25, 25, 1, 152064, 0xea63e2ae +0, 26, 26, 1, 152064, 0x936a1802 +0, 27, 27, 1, 152064, 0x354a749c +0, 28, 28, 1, 152064, 0x5cd0f246 +0, 29, 29, 1, 152064, 0x0376e69b +0, 30, 30, 1, 152064, 0x5af5fb61 +0, 31, 31, 1, 152064, 0x9a053ab8 +0, 32, 32, 1, 152064, 0x57cbbfcc +0, 33, 33, 1, 152064, 0x81f19e93 +0, 34, 34, 1, 152064, 0x0812953d +0, 35, 35, 1, 152064, 0x0ae2a166 +0, 36, 36, 1, 152064, 0x193125b8 +0, 37, 37, 1, 152064, 0xab7eca7b +0, 38, 38, 1, 152064, 0x91ff1870 +0, 39, 39, 1, 152064, 0x8f522dde +0, 40, 40, 1, 152064, 0x98faab46 +0, 41, 41, 1, 152064, 0xa2119231 +0, 42, 42, 1, 152064, 0xfe591321 +0, 43, 43, 1, 152064, 0x6c8a1bf5 +0, 44, 44, 1, 152064, 0x857c925c +0, 45, 45, 1, 152064, 0xe81a77f2 +0, 46, 46, 1, 152064, 0x08234e83 +0, 47, 47, 1, 152064, 0x76cb39f6 +0, 48, 48, 1, 152064, 0x26168d25 +0, 49, 49, 1, 152064, 0x4dd3b273 +0, 50, 50, 1, 152064, 0xd6e8398e +0, 51, 51, 1, 152064, 0x55986a57 +0, 52, 52, 1, 152064, 0x9c2768fb +0, 53, 53, 1, 152064, 0x03517efe +0, 54, 54, 1, 152064, 0x3a48451f +0, 55, 55, 1, 152064, 0x1f6d6b87 +0, 56, 56, 1, 152064, 0x0917fb2a +0, 57, 57, 1, 152064, 0x0f49e7a9 +0, 58, 58, 1, 152064, 0x3c56d4e1 +0, 59, 59, 1, 152064, 0x487cca35 +0, 60, 60, 1, 152064, 0x5c6b8b1c +0, 61, 61, 1, 152064, 0x767d8a34 +0, 62, 62, 1, 152064, 0xcd8d692a +0, 63, 63, 1, 152064, 0x788b3ebf +0, 64, 64, 1, 152064, 0x4cae3852 +0, 65, 65, 1, 152064, 0x1150f0aa +0, 66, 66, 1, 152064, 0x9d4b3366 +0, 67, 67, 1, 152064, 0xedcb8863 +0, 68, 68, 1, 152064, 0x2c09ca8c +0, 69, 69, 1, 152064, 0x20930842 +0, 70, 70, 1, 152064, 0xd653b16f +0, 71, 71, 1, 152064, 0x41f38d77 +0, 72, 72, 1, 152064, 0xa5f69360 +0, 73, 73, 1, 152064, 0xf0f5ce27 +0, 74, 74, 1, 152064, 0xf2a6246c +0, 75, 75, 1, 152064, 0x7e76fabc +0, 76, 76, 1, 152064, 0xf76e1982 +0, 77, 77, 1, 152064, 0x40c1be5a +0, 78, 78, 1, 152064, 0x132ca50e +0, 79, 79, 1, 152064, 0xae0c69ed +0, 80, 80, 1, 152064, 0x5f775778 +0, 81, 81, 1, 152064, 0x62bb9790 +0, 82, 82, 1, 152064, 0x8b448e83 +0, 83, 83, 1, 152064, 0xcc35d9fe +0, 84, 84, 1, 152064, 0x51560127 +0, 85, 85, 1, 152064, 0xb915829b +0, 86, 86, 1, 152064, 0x3a3f2b0c +0, 87, 87, 1, 152064, 0x4e2d2260 +0, 88, 88, 1, 152064, 0x9fdb7567 +0, 89, 89, 1, 152064, 0xe34b2f4e +0, 90, 90, 1, 152064, 0x8650ec13 +0, 91, 91, 1, 152064, 0xdff3e299 +0, 92, 92, 1, 152064, 0x100f8f0c +0, 93, 93, 1, 152064, 0xa9aff101 +0, 94, 94, 1, 152064, 0xa80add4c +0, 95, 95, 1, 152064, 0xa7994880 +0, 96, 96, 1, 152064, 0xc74ecb79 +0, 97, 97, 1, 152064, 0xbada663d +0, 98, 98, 1, 152064, 0xff7f0592 +0, 99, 99, 1, 152064, 0x44731be5 +0, 100, 100, 1, 152064, 0x1a61f9ac +0, 101, 101, 1, 152064, 0x848ace19 +0, 102, 102, 1, 152064, 0x22858567 +0, 103, 103, 1, 152064, 0x2b3a9ba7 +0, 104, 104, 1, 152064, 0x02889774 +0, 105, 105, 1, 152064, 0x29a54516 +0, 106, 106, 1, 152064, 0x737f2833 +0, 107, 107, 1, 152064, 0x28b5a183 +0, 108, 108, 1, 152064, 0xaff9112a +0, 109, 109, 1, 152064, 0x0a7652b5 +0, 110, 110, 1, 152064, 0x03fa3e91 +0, 111, 111, 1, 152064, 0x9deade68 +0, 112, 112, 1, 152064, 0xb9af1a27 +0, 113, 113, 1, 152064, 0xe9f07f00 +0, 114, 114, 1, 152064, 0x1b03894a +0, 115, 115, 1, 152064, 0xf89e26c5 +0, 116, 116, 1, 152064, 0x6d6b5508 +0, 117, 117, 1, 152064, 0x735ce75d +0, 118, 118, 1, 152064, 0x30017005 +0, 119, 119, 1, 152064, 0x606ad5ab +0, 120, 120, 1, 152064, 0xb442ac30 +0, 121, 121, 1, 152064, 0xac321998 +0, 122, 122, 1, 152064, 0x4507990b +0, 123, 123, 1, 152064, 0xe40f986d +0, 124, 124, 1, 152064, 0xc9840540 +0, 125, 125, 1, 152064, 0x74cfbc82 +0, 126, 126, 1, 152064, 0x1ac9744b +0, 127, 127, 1, 152064, 0x8ac2a889 +0, 128, 128, 1, 152064, 0x3074a1bc +0, 129, 129, 1, 152064, 0x389ae633 +0, 130, 130, 1, 152064, 0xaadb4325 +0, 131, 131, 1, 152064, 0x7d1a91b5 +0, 132, 132, 1, 152064, 0xaa047ddc +0, 133, 133, 1, 152064, 0xe5cafebc +0, 134, 134, 1, 152064, 0x24314a0c +0, 135, 135, 1, 152064, 0x530cfa1c +0, 136, 136, 1, 152064, 0x3f973f68 +0, 137, 137, 1, 152064, 0xf51d3e20 +0, 138, 138, 1, 152064, 0x24aca84c +0, 139, 139, 1, 152064, 0x96b411e9 +0, 140, 140, 1, 152064, 0x6d046ea3 +0, 141, 141, 1, 152064, 0x9237974f +0, 142, 142, 1, 152064, 0x0a808964 +0, 143, 143, 1, 152064, 0x9d6ad957 +0, 144, 144, 1, 152064, 0x9d6381ea +0, 145, 145, 1, 152064, 0xfeceab64 +0, 146, 146, 1, 152064, 0x7fa00e6f +0, 147, 147, 1, 152064, 0x635ac444 +0, 148, 148, 1, 152064, 0xf0db3036 +0, 149, 149, 1, 152064, 0xc5ddef73 +0, 150, 150, 1, 152064, 0x7fea7516 +0, 151, 151, 1, 152064, 0x7f3f7460 +0, 152, 152, 1, 152064, 0x446dfa20 +0, 153, 153, 1, 152064, 0x5d7167c4 +0, 154, 154, 1, 152064, 0xf9da05b7 +0, 155, 155, 1, 152064, 0xc007383d +0, 156, 156, 1, 152064, 0xbf461f08 +0, 157, 157, 1, 152064, 0xf722508f +0, 158, 158, 1, 152064, 0x2699fa56 +0, 159, 159, 1, 152064, 0xa49ca6d8 +0, 160, 160, 1, 152064, 0x58f70dfd +0, 161, 161, 1, 152064, 0x391383db +0, 162, 162, 1, 152064, 0xb859f2fd +0, 163, 163, 1, 152064, 0xbb77d0a7 +0, 164, 164, 1, 152064, 0xd4c9881d +0, 165, 165, 1, 152064, 0xb46d7272 +0, 166, 166, 1, 152064, 0x78237e5e +0, 167, 167, 1, 152064, 0xbcd9f633 +0, 168, 168, 1, 152064, 0x17e09080 +0, 169, 169, 1, 152064, 0x4a9bdacf +0, 170, 170, 1, 152064, 0x600c972f +0, 171, 171, 1, 152064, 0x858e399a +0, 172, 172, 1, 152064, 0xf9ef200d +0, 173, 173, 1, 152064, 0x6aec0fda +0, 174, 174, 1, 152064, 0x4d7ba9a8 +0, 175, 175, 1, 152064, 0x0df5dbdb +0, 176, 176, 1, 152064, 0x77d598f8 +0, 177, 177, 1, 152064, 0x7d78c129 +0, 178, 178, 1, 152064, 0xf6b79ad2 +0, 179, 179, 1, 152064, 0x2b458750 +0, 180, 180, 1, 152064, 0xdbec9727 +0, 181, 181, 1, 152064, 0xcb073a1a +0, 182, 182, 1, 152064, 0xa95e913a +0, 183, 183, 1, 152064, 0x5ca9da6e +0, 184, 184, 1, 152064, 0x82e09caf +0, 185, 185, 1, 152064, 0x319f59c5 +0, 186, 186, 1, 152064, 0x11003b19 +0, 187, 187, 1, 152064, 0xcdfc5077 +0, 188, 188, 1, 152064, 0xa56fc40d +0, 189, 189, 1, 152064, 0x3d2425dc +0, 190, 190, 1, 152064, 0x907f51d3 +0, 191, 191, 1, 152064, 0xc52dc2dc +0, 192, 192, 1, 152064, 0xea800778 +0, 193, 193, 1, 152064, 0xc0b022f9 +0, 194, 194, 1, 152064, 0x106b4ea2 +0, 195, 195, 1, 152064, 0x50c6cbf2 +0, 196, 196, 1, 152064, 0x480711b5 +0, 197, 197, 1, 152064, 0x1954bca7 +0, 198, 198, 1, 152064, 0x7894a1c1 +0, 199, 199, 1, 152064, 0xaa39601a +0, 200, 200, 1, 152064, 0x07652fa2 +0, 201, 201, 1, 152064, 0x84ac1bce +0, 202, 202, 1, 152064, 0x89104737 +0, 203, 203, 1, 152064, 0x832bf2b0 +0, 204, 204, 1, 152064, 0x45fa87f4 +0, 205, 205, 1, 152064, 0xde5b6e82 +0, 206, 206, 1, 152064, 0x8d88f89b +0, 207, 207, 1, 152064, 0xba6488c8 +0, 208, 208, 1, 152064, 0xd9bc3312 +0, 209, 209, 1, 152064, 0xdba30d10 +0, 210, 210, 1, 152064, 0xd208cb34 +0, 211, 211, 1, 152064, 0x0642aadc +0, 212, 212, 1, 152064, 0xf392e67a +0, 213, 213, 1, 152064, 0xec6041d0 +0, 214, 214, 1, 152064, 0x52463e92 +0, 215, 215, 1, 152064, 0x218174a8 +0, 216, 216, 1, 152064, 0x9408f728 +0, 217, 217, 1, 152064, 0xabd31db7 +0, 218, 218, 1, 152064, 0x3e72f003 +0, 219, 219, 1, 152064, 0x638e603b +0, 220, 220, 1, 152064, 0xf1f896c7 +0, 221, 221, 1, 152064, 0x786554ff +0, 222, 222, 1, 152064, 0x9bb909f5 +0, 223, 223, 1, 152064, 0x726cf59e +0, 224, 224, 1, 152064, 0xc18c15a1 +0, 225, 225, 1, 152064, 0x45ea8f83 +0, 226, 226, 1, 152064, 0xcb88e67a +0, 227, 227, 1, 152064, 0x18d09432 +0, 228, 228, 1, 152064, 0x99d02a0a +0, 229, 229, 1, 152064, 0x7ddc3691 +0, 230, 230, 1, 152064, 0x47710c00 +0, 231, 231, 1, 152064, 0xe28646c7 +0, 232, 232, 1, 152064, 0xe8a2a4e5 +0, 233, 233, 1, 152064, 0xed19f345 +0, 234, 234, 1, 152064, 0xceffaf7f +0, 235, 235, 1, 152064, 0x8d116def +0, 236, 236, 1, 152064, 0xccb68ae8 +0, 237, 237, 1, 152064, 0x3529b3db +0, 238, 238, 1, 152064, 0x529911b8 +0, 239, 239, 1, 152064, 0x3a676438 +0, 240, 240, 1, 152064, 0x18508f5d +0, 241, 241, 1, 152064, 0x4577d18b +0, 242, 242, 1, 152064, 0x420f5881 +0, 243, 243, 1, 152064, 0x60341b86 +0, 244, 244, 1, 152064, 0x2f51de6a +0, 245, 245, 1, 152064, 0xc70bbf8d +0, 246, 246, 1, 152064, 0xc1ff63f7 +0, 247, 247, 1, 152064, 0x2dc1662b +0, 248, 248, 1, 152064, 0x1bbb3b70 +0, 249, 249, 1, 152064, 0x74f44ec2 +0, 250, 250, 1, 152064, 0x9b93084e +0, 251, 251, 1, 152064, 0x1493f82d +0, 252, 252, 1, 152064, 0x069d9869 +0, 253, 253, 1, 152064, 0xc9a4f706 +0, 254, 254, 1, 152064, 0xf80092ed +0, 255, 255, 1, 152064, 0xdc347577 +0, 256, 256, 1, 152064, 0x1df12299 +0, 257, 257, 1, 152064, 0x40d19951 +0, 258, 258, 1, 152064, 0xfb63dbf1 +0, 259, 259, 1, 152064, 0x9153714c +0, 260, 260, 1, 152064, 0x6cfd514c +0, 261, 261, 1, 152064, 0xc0ef7bf3 +0, 262, 262, 1, 152064, 0x5fce6828 +0, 263, 263, 1, 152064, 0xe7d0074d +0, 264, 264, 1, 152064, 0x9e3f7351 +0, 265, 265, 1, 152064, 0x3a0c5d56 +0, 266, 266, 1, 152064, 0xd5581f3c +0, 267, 267, 1, 152064, 0x9a4ec0d1 +0, 268, 268, 1, 152064, 0x150b9a54 +0, 269, 269, 1, 152064, 0x950eb994 +0, 270, 270, 1, 152064, 0xda31e3bf +0, 271, 271, 1, 152064, 0x14ff5d3c +0, 272, 272, 1, 152064, 0xd593bafc +0, 273, 273, 1, 152064, 0xd4cf7c58 +0, 274, 274, 1, 152064, 0x2be70997 +0, 275, 275, 1, 152064, 0xe551703b +0, 276, 276, 1, 152064, 0x7adaf447 +0, 277, 277, 1, 152064, 0x0435ea0f +0, 278, 278, 1, 152064, 0x87e5bba1 +0, 279, 279, 1, 152064, 0xea1fdf88 +0, 280, 280, 1, 152064, 0xaea5b4c4 +0, 281, 281, 1, 152064, 0x32f79e89 +0, 282, 282, 1, 152064, 0xcd5694bc +0, 283, 283, 1, 152064, 0x6b12830f +0, 284, 284, 1, 152064, 0xaf681652 +0, 285, 285, 1, 152064, 0x3b26e20b +0, 286, 286, 1, 152064, 0x2a9eee33 +0, 287, 287, 1, 152064, 0x8d5fe982 +0, 288, 288, 1, 152064, 0xa4cb5d02 +0, 289, 289, 1, 152064, 0x867dd0b0 +0, 290, 290, 1, 152064, 0x23c885e9 +0, 291, 291, 1, 152064, 0x99fd7b2b +0, 292, 292, 1, 152064, 0xa710e871 +0, 293, 293, 1, 152064, 0x3ecbaaeb +0, 294, 294, 1, 152064, 0x3d1c7de2 +0, 295, 295, 1, 152064, 0x378935f3 +0, 296, 296, 1, 152064, 0xce893553 +0, 297, 297, 1, 152064, 0xa834374c +0, 298, 298, 1, 152064, 0x665094f4 +0, 299, 299, 1, 152064, 0x3fee89c6