From b06d6540284bc1aa8d14fbec62eafad51ed13de1 Mon Sep 17 00:00:00 2001 From: graysky Date: Mon, 29 Jul 2024 17:19:46 -0400 Subject: [PATCH] alarm/ffmpeg-rpi to 4.4.5-1 Tested and verified good with h264 and x265 test clips with vlc-rpi-3.0.21-3 on RPi4B --- ...rpi.patch => 0002-ffmpeg-4.4.5n-rpi.patch} | 11899 ++++++++-------- alarm/ffmpeg-rpi/PKGBUILD | 10 +- 2 files changed, 5711 insertions(+), 6198 deletions(-) rename alarm/ffmpeg-rpi/{0002-ffmpeg-4.4.4n-rpi.patch => 0002-ffmpeg-4.4.5n-rpi.patch} (86%) diff --git a/alarm/ffmpeg-rpi/0002-ffmpeg-4.4.4n-rpi.patch b/alarm/ffmpeg-rpi/0002-ffmpeg-4.4.5n-rpi.patch similarity index 86% rename from alarm/ffmpeg-rpi/0002-ffmpeg-4.4.4n-rpi.patch rename to alarm/ffmpeg-rpi/0002-ffmpeg-4.4.5n-rpi.patch index 3abaa3c99..777e6c1bc 100644 --- a/alarm/ffmpeg-rpi/0002-ffmpeg-4.4.4n-rpi.patch +++ b/alarm/ffmpeg-rpi/0002-ffmpeg-4.4.5n-rpi.patch @@ -1,9 +1,9 @@ -ffmpeg: jc-kynesim/test/4.4.1/main +ffmpeg: jc-kynesim/release/4.4/rpi_import_1 -git diff-index --binary n4.4.4 +git diff-index --binary n4.4.5 -https://github.com/jc-kynesim/rpi-ffmpeg/commit/4185270f334d006a108a878be8a62bab7dce38ee -05-May-2023 +https://github.com/jc-kynesim/rpi-ffmpeg/commit/e86c25c8449b6540e90a31180b15894114eba86d +18-Jan-2023 diff --git a/CREDITS b/CREDITS index f1aea93d6b..e29f0b853c 100644 @@ -19,13 +19,315 @@ index f1aea93d6b..e29f0b853c 100644 -https://git.ffmpeg.org/ffmpeg +http://source.ffmpeg.org. diff --git a/Changelog b/Changelog -index 620ca2bf40..a6508cd8ac 100644 +index 645e5c3b0f..f2d394e29b 100644 --- a/Changelog +++ b/Changelog -@@ -1,267 +1,6 @@ +@@ -1,357 +1,6 @@ Entries are sorted chronologically from oldest to youngest within each release, releases are sorted from youngest to oldest. +-version 4.4.5: +- avcodec/cfhdenc: Height of 16 is not supported +- avcodec/cfhdenc: Allocate more space +- avcodec/vaapi_encode: Check hwctx +- avcodec/proresdec: Consider negative bits left +- avcodec/hevc/hevcdec: Do not allow slices to depend on failed slices +- avutil/slicethread: Check pthread_*_init() for failure +- avutil/frame: Check log2_crop_align +- avutil/buffer: Check ff_mutex_init() for failure +- avformat/xmv: Check this_packet_size +- avformat/ty: rec_size seems to only need 32bit +- avformat/tty: Check avio_size() +- avformat/siff: Basic pkt_size check +- avformat/sauce: Check avio_size() for failure +- avformat/sapdec: Check ffurl_get_file_handle() for error +- avformat/nsvdec: Check asize for PCM +- avformat/mp3dec: Check header_filesize +- avformat/mp3dec; Check for avio_size() failure +- avformat/mov: Use 64bit for str_size +- avformat/mm: Check length +- avformat/hnm: Check *chunk_size +- avformat/hlsenc: Check ret +- avformat/bintext: Check avio_size() return +- avformat/asfdec_o: Check size of index object +- avfilter/vf_scale: Check ff_scale_adjust_dimensions() for failure +- avfilter/scale_eval: Use 64bit, check values in ff_scale_adjust_dimensions() +- avfilter/vf_lut3d: Check av_scanf() +- avfilter/vf_deshake_opencl: Ensure that the first iteration initializes the best variables +- swscale/output: Fix integer overflows in yuv2rgba64_X_c_template +- avformat/mxfdec: Reorder elements of expression in bisect loop +- avcodec/pnmdec: Use 64bit for input size check +- avcodec/utvideoenc: Use unsigned shift to build flags +- avcodec/vc2enc: Fix overflows with storing large values +- avcodec/mpegvideo_enc: Do not duplicate pictures on shifting +- avdevice/dshow_capture: Fix error handling in ff_dshow_##prefix##_Create() +- avcodec/tiff: Check value on positive signed targets +- avfilter/vf_bm3d: Dont round MSE2SSE to an integer +- avdevice/dshow: Check device_filter_unique_name before use +- avdevice/dshow_filter: Use wcscpy_s() +- avcodec/flac_parser: Assert that we do not overrun the link_penalty array +- avcodec/pixlet: Simplify pfx computation +- avcodec/motion_est: Fix score squaring overflow +- avcodec/loco: Check loco_get_rice() for failure +- avcodec/loco: check get_ur_golomb_jpegls() for failure +- avcodec/imm4: check cbphi for error +- avcodec/iff: Use signed count +- avcodec/golomb: Assert that k is in the supported range for get_ur/sr_golomb() +- avcodec/golomb: Document return for get_ur_golomb_jpegls() and get_sr_golomb_flac() +- avcodec/dxv: Fix type in get_opcodes() +- avcodec/cri: Check length +- avcodec/xsubdec: Check parse_timecode() +- avutil/imgutils: av_image_check_size2() ensure width and height fit in 32bit +- doc/examples/mux: remove nop +- avcodec/proresenc_kostya: use unsigned alpha for rotation +- avformat/rtmppkt: Simplify and deobfuscate amf_tag_skip() slightly +- avformat/rmdec: use 64bit for audio_framesize checks +- avutil/hwcontext_d3d11va: correct sizeof IDirect3DSurface9 +- avutil/hwcontext_d3d11va: correct sizeof AVD3D11FrameDescriptor +- doc/examples/vaapi_encode: Try to check fwrite() for failure +- avformat/tls_schannel: Initialize ret +- avformat/subfile: Assert that whence is a known case +- avformat/subfile: Merge if into switch() +- avformat/rtsp: Check that lower transport is handled in one of the if() +- avformat/rtsp: initialize reply1 +- avformat/rtsp: use < 0 for error check +- avformat/rtpenc_vc2hq: Check sizes +- avfilter/af_aderivative: Free out on error +- avfilter/af_pan: check nb_output_channels before use +- cbs_av1: Reject thirty-two zero bits in uvlc code +- avfilter/af_mcompand: compute half frequency in double +- tools/coverity: Phase 1 study of anti-halicogenic for coverity av_rescale() +- avfilter/vf_avgblur: Check plane instead of AVFrame +- avformat/rdt: Check pkt_len +- avformat/mpeg: Check len in mpegps_probe() +- avdevice/dshow: Check ICaptureGraphBuilder2_SetFiltergraph() for failure +- avcodec/mfenc: check IMFSample_ConvertToContiguousBuffer() for failure +- avcodec/vc1_loopfilter: Factor duplicate code in vc1_b_h_intfi_loop_filter() +- avformat/img2dec: assert no pipe on ts_from_file +- avcodec/cbs_jpeg: Try to move the read entity to one side in a test +- avformat/mov: Check edit list for overflow +- fftools/ffmpeg: Check read() for failure +- swscale/output: Avoid undefined overflow in yuv2rgb_write_full() +- swscale/output: alpha can become negative after scaling, use multiply +- avcodec/targaenc: Allocate space for the palette +- avcodec/r210enc: Use av_rescale for bitrate +- avcodec/jfdctint_template: Fewer integer anomalies +- avcodec/snowenc: MV limits due to mv_penalty table size +- avformat/mxfdec: Check container_ul->desc before use +- MAINTAINERS: Update the entries for the release maintainer for FFmpeg +- configure: update copyright year +- avfilter/vf_rotate: Check ff_draw_init2() return value +- avformat/matroskadec: Assert that num_levels is non negative +- avformat/libzmq: Check av_strstart() +- avformat/img2dec: Move DQT after unrelated if() +- avdevice/xcbgrab: Check sscanf() return +- fftools/cmdutils: Add protective () to FLAGS +- avformat/sdp: Check before appending "," +- avcodec/ilbcdec: Remove dead code +- avcodec/vp8: Check cond init +- avcodec/vp8: Check mutex init +- avcodec/notchlc: Check init_get_bits8() for failure +- avcodec/tests/dct: Use 64bit in intermediate for error computation +- avcodec/scpr3: Check add_dec() for failure +- avcodec/rv34: assert that size is not 0 in rv34_gen_vlc_ext() +- avcodec/wavpackenc: Use unsigned for potential 31bit shift +- avcodec/tests/jpeg2000dwt: Use 64bit in comparission +- avcodec/tests/jpeg2000dwt: Use 64bit in err2 computation +- avformat/fwse: Remove always false expression +- avcodec/sga: Make it clear that the return is intentionally not checked +- avformat/asfdec_f: Use 64bit for preroll computation +- avformat/argo_asf: Use 64bit in offset intermediate +- avformat/ape: Use 64bit for final frame size +- avcodec/tiff: Assert init_get_bits8() success in unpack_gray() +- avcodec/tiff: Assert init_get_bits8() success in horizontal_fill() +- swscale/yuv2rgb: Use 64bit for brightness computation +- avutil/tests/opt: Check av_set_options_string() for failure +- avutil/tests/dict: Check av_dict_set() before get for failure +- avdevice/dshow: fix badly indented line +- avcodec/mscc & mwsc: Check loop counts before use +- avcodec/mpegvideo_enc: Fix potential overflow in RD +- avcodec/mpeg4videodec: assert impossible wrap points +- avcodec/mpeg12dec: Use 64bit in bit computation +- avcodec/vble: Check av_image_get_buffer_size() for failure +- avcodec/vp3: Replace check by assert +- avcodec/jpeg2000dec: remove ST=3 case +- avcodec/qsvdec: Check av_image_get_buffer_size() for failure +- avcodec/exr: Fix preview overflow +- avcodec/fmvc: remove dead assignment +- avcodec/h264_slice: Remove dead sps check +- avcodec/lpc: copy levenson coeffs only when they have been computed +- avutil/tests/base64: Check with too short output array +- libavutil/base64: Try not to write over the array end +- avcodec/cbs_av1: Avoid shift overflow +- doc/examples/demux_decode: Simplify loop +- avcodec/mpegvideo_enc: Fix 1 line and one column images +- swscale/output: Fix integer overflow in yuv2rgba64_full_1_c_template() +- swscale/output: Fix integer overflow in yuv2rgba64_1_c_template +- avformat/mxfdec: Check body_offset +- avformat/kvag: Check sample_rate +- avcodec/ac3_parser: Check init_get_bits8() for failure +- avcodec/pngdec: Check last AVFrame before deref +- avcodec/hevcdec: Check ref frame +- doc/examples/vaapi_transcode: Simplify loop +- avfilter/vf_thumbnail_cuda: Set ret before checking it +- avfilter/signature_lookup: Dont copy uninitialized stuff around +- avfilter/signature_lookup: Fix 2 differences to the refernce SW +- lavc/vp9: reset segmentation fields when segmentation isn't enabled +- configure: enable ffnvcodec, nvenc, nvdec for FreeBSD +- avcodec/x86/vp3dsp_init: Set correct function pointer, fix crash +- avutil/ppc/cpu: Also use the machdep.altivec sysctl on NetBSD +- avutil/ppc/cpu: Use proper header for OpenBSD PPC CPU detection +- lavd/v4l2: Use proper field type for second parameter of ioctl() with BSD's +- configure: use pkg-config for sndio +- fate/subtitles: Ignore line endings for sub-scc test +- avformat/mxfdec: Check index_edit_rate +- swscale/utils: Fix xInc overflow +- avcodec/exr: Dont use 64bits to hold 6bits +- avcodec/exr: Check for remaining bits in huf_unpack_enc_table() +- avformat/mpegts: Reset local nb_prg on add_program() failure +- avformat/mxfdec: Make edit_unit_byte_count unsigned +- avformat/movenc: Check that cts fits in 32bit +- avformat/mxfdec: Check first case of offset_temp computation for overflow +- avfilter/vf_signature: Dont crash on no frames +- avformat/westwood_vqa: Fix 2g packets +- avformat/matroskadec: Check timescale +- avformat/wavdec: satuarte next_tag_ofs, data_end +- avformat/sbgdec: Check for negative duration +- avformat/rpl: Use 64bit for total_audio_size and check it +- avformat/timecode: use 64bit for intermediate for rounding in fps_from_frame_rate() +- avformat/jacosubdec: Use 64bit for abs +- avformat/concatdec: Check user_duration sum +- avcodec/truemotion1: Height not being a multiple of 4 is unsupported +- avcodec/hcadec: do not set hfr_group_count to invalid values +- avformat/concatdec: clip outpoint - inpoint overflow in get_best_effort_duration() +- avformat/jacosubdec: clarify code +- avformat/cafdec: Check that data chunk end fits within 64bit +- avformat/iff: Saturate avio_tell() + 12 +- avformat/dxa: Adjust order of operations around block align +- avformat/cafdec: dont seek beyond 64bit +- avformat/id3v2: read_uslt() check for the amount read +- avcodec/proresenc_kostya: Remove bug similarity text +- avcodec/vorbisdec: Check remaining data in vorbis_residue_decode_internal() +- libswscale/utils: Fix bayer to yuvj +- swscale/swscale: Check srcSliceH for bayer +- swscale/utils: Allocate more dithererror +- avcodec/indeo3: Round dimensions up in allocate_frame_buffers() +- avutil/rational: Document what is to be expected from av_d2q() of doubles representing rational numbers +- avfilter/signature_lookup: Do not dereference NULL pointers after malloc failure +- avfilter/signature_lookup: dont leave uncleared pointers in sll_free() +- avcodec/mpegvideo_enc: Use ptrdiff_t for stride +- libavformat/hlsenc.c: Populate OTI using AAC profile in write_codec_attr. +- avcodec/mpegvideo_enc: Dont copy beyond the image +- avfilter/vf_minterpolate: Check pts before division +- avformat/flacdec: Avoid double AVERRORS +- avfilter/vf_vidstabdetect: Avoid double AVERRORS +- avfilter/vf_swaprect: round coordinates down +- avfilter/vf_swaprect: Use height for vertical variables +- avfilter/vf_swaprect: assert that rectangles are within memory +- avfilter/af_alimiter: Check nextpos before use +- avfilter/af_stereowiden: Check length +- avfilter/vf_weave: Fix odd height handling +- avfilter/vf_gradfun: Do not overread last line +- avformat/mov: do not set sign bit for chunk_offsets +- avcodec/jpeglsdec: Check Jpeg-LS LSE +- configure: Enable section_data_rel_ro for FreeBSD and NetBSD aarch64 / arm +- avformat/mov: Check if a key is longer than the atom containing it +- avcodec/nvdec: reset bitstream_len/nb_slices when resetting bitstream pointer +- avformat/mov: don't abort on duplicate Mastering Display Metadata boxes +- avcodec/x86/mathops: clip constants used with shift instructions within inline assembly +- avcodec/av1dec: fix matrix coefficients exposed by codec context +- avcodec/nvdec: don't free NVDECContext->bitstream +- avcodec/av1dec: Fix resolving zero divisor +- avformat/mov: Ignore duplicate ftyp +- avformat/mov: Fix integer overflow in mov_read_packet(). +- seek: Fix crashes in ff_seek_frame_binary if built with latest Clang 14 +- avcodec/4xm: Check for cfrm exhaustion +- avformat/mov: Disallow FTYP after streams +- doc/html: fix styling issue with Texinfo 7.0 +- doc/html: support texinfo 7.0 +- doc/t2h.pm: fix missing TOC with texinfo 6.8 and above +- doc/t2h.pm: fix missing CSS with texinfo 6.8 and above +- avformat/matroskadec: Fix declaration-after-statement warnings +- avformat/rtsp: Use rtsp_st->stream_index +- avcodec/jpeg2000dec: Check image offset +- avformat/mxfdec: Check klv offset +- libavutil/ppc/cpu.c: check that AT_HWCAP2 is defined +- avcodec/h2645_parse: Avoid EAGAIN +- avcodec/xvididct: Make c* unsigned to avoid undefined overflows +- avformat/tmv: Check video chunk size +- avcodec/h264_parser: saturate dts a bit +- avformat/asfdec_f: Saturate presentation time in marker +- avformat/xwma: sanity check bits_per_coded_sample +- avformat/matroskadec: Check prebuffered_ns for overflow +- avformat/wavdec: Check left avio_tell for overflow +- avformat/tta: Better totalframes check +- avformat/rpl: Check for number_of_chunks overflow +- avformat/mov: compute absolute dts difference without overflow in mov_find_next_sample() +- avformat/jacosubdec: Check timeres +- avformat/jacosubdec: avoid signed integer overflows in get_shift() +- avformat/jacosubdec: Factorize code in get_shift() a bit +- avcodec/escape124: Do not return random numbers +- avcodec/apedec: Fix an integer overflow in predictor_update_filter() +- avformat/avs: Check if return code is representable +- avcodec/lcldec: Make PNG filter addressing match the code afterwards +- avformat/westwood_vqa: Check chunk size +- avformat/sbgdec: Check for period overflow +- avformat/concatdec: Check in/outpoint for overflow +- avcodec/xvididct: Fix integer overflow in idct_row() +- avcodec/celp_math: avoid overflow in shift +- tools/target_dec_fuzzer: Adjust threshold for rtv1 +- avformat/hls: reduce default max reload to 3 +- avformat/format: Stop reading data at EOF during probing +- avcodec/huffyuvdec: avoid undefined behavior with get_vlc2() failure +- avcodec/cscd: Fix "CamStudio Lossless Codec 1.0" gzip files +- avcodec/cscd: Check for CamStudio Lossless Codec 1.0 behavior in end check of LZO files +- avcodec/hevcdec: Fix undefined memcpy() +- avcodec/mpeg4videodec: more unsigned in amv computation +- avcodec/tta: fix signed overflow in decorrelate +- avcodec/apedec: Fix 48khz 24bit below insane level +- avcodec/apedec: Fix CRC for 24bps and bigendian +- avcodec/xvididct: Fix integer overflow in idct_row() +- avformat/avr: Check sample rate +- avcodec/jpeg2000dec: Check for reduction factor and image offset +- avutil/softfloat: Basic documentation for av_sincos_sf() +- avutil/softfloat: fix av_sincos_sf() +- avcodec/utils: fix 2 integer overflows in get_audio_frame_duration() +- avcodec/hevcdec: Avoid null pointer dereferences in MC +- avcodec/takdsp: Fix integer overflows +- avcodec: Ignoring errors is only possible before the input end +- avcodec/noise_bsf: Check for wrapped frames +- avformat/oggparsetheora: clip duration within 64bit +- avformat/wavdec: Check that smv block fits in available space +- avcodec/tiff: add a zero DNG_LINEARIZATION_TABLE check +- avcodec/tak: Check remaining bits in ff_tak_decode_frame_header() +- avcodec/sonic: Fix two undefined integer overflows +- avcodec/utils: the IFF_ILBM implementation assumes that there are a multiple of 16 allocated +- avcodec/exr: Cleanup befor return +- avcodec/pngdec: Do not pass AVFrame into global header decode +- avcodec/pngdec: remove AVFrame argument from decode_iccp_chunk() +- avcodec/vorbisdec: Check codebook float values to be finite +- avcodec/g2meet: Replace fake allocation avoidance for framebuf +- avcodec/lcldec: More space for rgb24 +- avcodec/lcldec: Support 4:1:1 and 4:2:2 with odd width +- libavcodec/lcldec: width and height should not be unsigned +- avcodec/escape124: Check that blocks are allocated before use +- avcodec/huffyuvdec: Fix undefined behavior with shift +- avcodec/j2kenc: Replace RGB24 special case by generic test +- avcodec/j2kenc: Replace BGR48 / GRAY16 test by test for number of bits +- avcodec/j2kenc: simplify pixel format setup +- avcodec/j2kenc: Fix funky bpno errors on decoding +- avcodec/j2kenc: remove misleading pred value +- avcodec/j2kenc: fix 5/3 DWT identifer +- avcodec/vp3: Check width to avoid assertion failure +- avcodec/g729postfilter: Limit shift in long term filter +- avcodec/vdpau_mpeg4: fix order of quant matrix coefficients +- avcodec/vdpau_mpeg12: fix order of quant matrix coefficients +- avcodec/nvdec_mpeg4: fix order of quant matrix coefficients +- avcodec/nvdec_mpeg2: fix order of quant matrix coefficients +- avcodec/libsvtav1: remove compressed_ten_bit_format and simplify alloc_buffer +- configure: account for openssl3 license change +- +- -version 4.4.4: -- avcodec/tests/snowenc: Fix 2nd test -- avcodec/tests/snowenc: return a failure if DWT/IDWT mismatches @@ -75,242 +377,38 @@ index 620ca2bf40..a6508cd8ac 100644 -- avcodec/mjpegenc: take into account component count when writing the SOF header size -- swscale: aarch64: Fix yuv2rgb with negative stride - --version 4.4.3: --- avformat/vividas: Check packet size --- configure: link to libatomic when it's present --- avcodec/dstdec: Check for overflow in build_filter() --- avformat/spdifdec: Use 64bit to compute bit rate --- avformat/rpl: Use 64bit for duration computation --- avformat/xwma: Use av_rescale() for duration computation --- avformat/sdsdec: Use av_rescale() to avoid intermediate overflow in duration calculation --- avformat/sbgdec: Check ts_int in genrate_intervals --- avformat/rmdec: check tag_size --- avformat/nutdec: Check fields --- avformat/flvdec: Use 64bit for sum_flv_tag_size --- avformat/jacosubdec: Fix overflow in get_shift() --- avformat/dxa: avoid bpc overflows --- avformat/cafdec: Check that nb_frasmes fits within 64bit --- avformat/asfdec_o: Limit packet offset --- avformat/ape: Check frames size --- avformat/icodec: Check nb_pal --- avformat/aiffdec: Use 64bit for block_duration use --- avformat/aiffdec: Check block_duration --- avformat/mxfdec: only probe max run in --- avformat/mxfdec: Check run_in is within 65536 --- avcodec/mjpegdec: Check for unsupported bayer case --- avcodec/apedec: Fix integer overflow in filter_3800() --- avcodec/tta: Check 24bit scaling for overflow --- avcodec/mobiclip: Check quantizer for overflow --- avcodec/exr: Check preview psize --- avcodec/tiff: Fix loop detection --- libavformat/hls: Free keys --- avcodec/fmvc: Move frame allocation to a later stage --- avfilter/vf_showinfo: remove backspaces --- avcodec/speedhq: Check width --- avcodec/bink: disallow odd positioned scaled blocks --- avformat/asfdec_o: limit recursion depth in asf_read_unknown() --- doc/git-howto.texi: Document commit signing --- libavcodec/8bps: Check that line lengths fit within the buffer --- avcodec/midivid: Perform lzss_uncompress() before ff_reget_buffer() --- libavformat/iff: Check for overflow in body_end calculation --- avformat/avidec: Prevent entity expansion attacks --- avcodec/h263dec: Sanity check against minimal I/P frame size --- avcodec/hevcdec: Check s->ref in the md5 path similar to hwaccel --- avcodec/mpegaudiodec_template: use unsigned shift in handle_crc() --- avformat/subviewerdec: Make read_ts() more flexible --- avcodec/mjpegdec: bayer and rct are incompatible --- MAINTAINERS: Add ED25519 key for signing my commits in the future --- avcodec/hevc_filter: copy_CTB() only within width&height --- avcodec/tiff: Check tile_length and tile_width --- avcodec/mss4: Check image size with av_image_check_size2() --- avformat/flvdec: Check for EOF in index reading --- avformat/nutdec: Check get_packetheader() in mainheader --- avformat/asfdec_f: Use 64bit for packet start time --- avcodec/exr: Check x/ysize --- tools/target_dec_fuzzer: Adjust threshold for MMVIDEO --- avcodec/lagarith: Check dst/src in zero run code --- avcodec/h264dec: Skip late SEI --- avcodec/sbrdsp_fixed: Fix integer overflows in sbr_qmf_deint_neg_c() --- avfilter/vf_signature: Fix integer overflow in filter_frame() --- avformat/rtsp: break on unknown protocols --- avcodec/hevcdsp_template: stay within tables in sao_band_filter() --- avcodec/tiff: Check pixel format types for dng --- avcodec/qpeldsp: copy less for the mc0x cases --- avformat/aaxdec: Check for empty segments --- avcodec/ffv1dec: Limit golomb rice coded slices to width 8M --- avformat/iff: simplify duration calculation --- avcodec/wnv1: Check for width =1 --- avcodec/ffv1dec_template: fix indention --- avformat/sctp: close socket on errors --- avcodec/aasc: Fix indention --- avcodec/qdrw: adjust max colors to array size --- avcodec/alacdsp: Make intermediates unsigned --- avformat/aiffdec: cleanup size handling for extreem cases --- avformat/matroskadec: avoid integer overflows in SAR computation --- avcodec/jpeglsdec: fix end check for xfrm --- avcodec/cdgraphics: limit scrolling to the line --- avformat/hls: Limit start_seq_no to one bit less --- avformat/aiffdec: avoid integer overflow in get_meta() --- avformat/ape: more bits in size for less overflows --- avformat/aviobuf: Check buf_size in ffio_ensure_seekback() --- avformat/bfi: Check offsets better --- avformat/asfdec_f: Check packet_frag_timestamp --- avcodec/texturedspenc: Fix indexing in color distribution determination --- avformat/act: Check ff_get_wav_header() for failure --- avcodec/libxavs2: Improve r redundancy in occured --- avformat/libzmq: Improve r redundancy in occured --- avfilter/vsrc_mandelbrot: Check for malloc failure --- avfilter/vf_frei0r: Copy to frame allocated according to frei0r requirements --- avfilter/video: Add ff_default_get_video_buffer2() to set specific alignment --- avformat/genh: Check sample rate --- configure: bump year --- lavc/videotoolbox: do not pass AVCodecContext to decoder output callback --- lavc/pthread_frame: always transfer stashed hwaccel state --- avcodec/arm/sbcenc: avoid callee preserved vfp registers --- avfilter/vf_scale: overwrite the width and height expressions with the original values --- lavc/pthread_frame: avoid leaving stale hwaccel state in worker threads --- configure: extend SDL check to accept all 2.x versions --- lavf/tls_mbedtls: add support for mbedtls version 3 -- --version 4.4.2: --- fate: update reference files after the recent dash manifest muxer changes --- avformat/webmdashenc: fix on-demand profile string --- Update for FFmpeg 4.4.2 --- avcodec/exr: Avoid signed overflow in displayWindow --- avcodec/diracdec: avoid signed integer overflow in global mv --- avcodec/takdsp: Fix integer overflow in decorrelate_sf() --- avcodec/apedec: fix a integer overflow in long_filter_high_3800() --- avfilter/vf_subtitles: pass storage size to libass --- avformat/aqtitledec: Skip unrepresentable durations --- avformat/cafdec: Do not store empty keys in read_info_chunk() --- avformat/mxfdec: Do not clear array in mxf_read_strong_ref_array() before writing --- avformat/mxfdec: Check for avio_read() failure in mxf_read_strong_ref_array() --- avformat/mxfdec: Check count in mxf_read_strong_ref_array() --- avformat/hls: Check target_duration --- avcodec/pixlet: Avoid signed integer overflow in scaling in filterfn() --- avformat/matroskadec: Check pre_ns --- avcodec/sonic: Use unsigned for predictor_k to avoid undefined behavior --- avcodec/libuavs3d: Check ff_set_dimensions() for failure --- avcodec/mjpegbdec: Set buf_size --- avformat/matroskadec: Use rounded down duration in get_cue_desc() check --- avcodec/argo: Check packet size --- avcodec/g729_parser: Check channels --- avformat/avidec: Check height --- avformat/rmdec: Better duplicate tags check --- avformat/mov: Disallow empty sidx --- avformat/argo_asf: Fix order of operations in error check in argo_asf_write_trailer() --- avformat/matroskadec: Check duration --- avformat/mov: Corner case encryption error cleanup in mov_read_senc() --- avcodec/jpeglsdec: Fix if( code style --- avcodec/jpeglsdec: Check get_ur_golomb_jpegls() for error --- avcodec/motion_est: fix indention of ff_get_best_fcode() --- avcodec/motion_est: Fix xy indexing on range violation in ff_get_best_fcode() --- avformat/hls: Use unsigned for iv computation --- avcodec/jpeglsdec: Increase range for N in ls_get_code_runterm() by using unsigned --- avformat/matroskadec: Check desc_bytes --- avformat/utils: Fix invalid NULL pointer operation in ff_parse_key_value() --- avformat/matroskadec: Fix infinite loop with bz decompression --- avformat/mov: Check size before subtraction --- avcodec/cfhd: Avoid signed integer overflow in coeff --- avcodec/apedec: Fix integer overflows in predictor_update_3930() --- avcodec/apedec: fix integer overflow in 8bit samples --- avformat/flvdec: timestamps cannot use the full int64 range --- avcodec/tiff: Remove messing with jpeg context --- avcodec/tiff: Use ff_set_dimensions() for setting up mjpeg context dimensions --- avcodec/tiff: Pass max_pixels to mjpeg context --- avcodec/vqavideo: reset accounting on error --- avcodec/alacdsp: fix integer overflow in decorrelate_stereo() --- avformat/4xm: Check for duplicate track ids --- avformat/4xm: Consider max_streams on reallocating tracks array --- avformat/mov: Check next offset in mov_read_dref() --- avformat/vivo: Favor setting fps from explicit fractions --- avformat/vivo: Do not use the general expression evaluator for parsing a floating point value --- avformat/mxfdec: Check for duplicate mxf_read_index_entry_array() --- avcodec/apedec: Change avg to uint32_t --- avformat/mxfdec: Check component_depth in mxf_get_color_range() --- avformat/mov: Disallow duplicate smdm --- avformat/mov: Check for EOF in mov_read_glbl() --- avcodec/vp3: Check version in all cases when VP4 code is not built --- avformat/mov: Check channels for mov_parse_stsd_audio() --- avformat/avidec: Check read_odml_index() for failure --- avformat/aiffdec: Use av_rescale() for bitrate --- avformat/aiffdec: sanity check block_align --- avformat/aiffdec: Check sample_rate --- avcodec/libdav1d: free the Dav1dData packet on dav1d_send_data() failure --- avcodec/zmbvenc: Fix memleak upon init error --- avcodec/dnxhdenc: Fix segfault when using too many slice threads --- avcodec/wma(dec|enc): Fix memleaks upon allocation error --- avfilter/avfilter: Actually error out on init error --- avcodec/opus_silk: Remove wrong size information in function declaration --- avformat/omadec: Don't output uninitialized values --- avformat/jacosubenc: Fix writing extradata --- avformat/cafenc: Fix memleak when trailer is never written --- avformat/cafenc: Don't segfault upon allocation error --- avformat/cafenc: Fix potential integer overflow --- avformat/movenc: Limit ism_lookahead to a sane value --- avutil/utils: Remove racy check from avutil_version() --- avformat/sccdec: Don't use uninitialized data, fix crash, simplify logic --- avformat/subtitles: Honour ff_subtitles_read_line() documentation --- avformat/tee: Fix leak of FIFO-options dictionary --- avformat/tee: Fix leak of strings --- avcodec/rasc: Fix potential use of uninitialized value --- avfilter/vf_w3fdif: Fix segfault on allocation error --- avfilter/af_surround: Fix memleaks upon allocation error --- avfilter/af_vibrato: Fix segfault upon allocation error --- avfilter/aeval: Fix leak of expressions upon reallocation error --- avdevice/xv: Increase array size --- avfilter/asrc_flite: Fix use-after-frees --- avfilter/asrc_flite: Don't segfault when using list_voices option --- Revert "avfilter/vf_idet: reduce noisyness if the filter has been auto inserted" --- avformat/matroskadec: Don't unnecessarily reduce aspect ratio --- avcodec/h263: Fix global-buffer-overflow with noout flag2 set --- avcodec/vaapi_encode: Fix segfault upon closing uninitialized encoder --- avcodec/movtextenc: Fix infinite loop due to variable truncation --- avcodec/libopenh264dec: Increase array sizes, fix stack-buffer overread --- avcodec/libkvazaar: Increase array size --- avformat/aadec: Don't use the same loop counter in inner and outer loop --- avformat/moflex: Don't use uninitialized timebase for data stream --- lavf/udp: do not return an uninitialized value from udp_open() --- avcodec/nvenc: zero-initialize NV_ENC_REGISTER_RESOURCE struct --- configure: Add missing libshine->mpegaudioheader dependency --- avcodec/Makefile: Add missing entry for ADPCM_IMA_AMV_ENCODER --- avcodec/Makefile: Only compile nvenc.o if needed --- avcodec/av1_vaapi: improve decode quality --- avcodec/av1_vaapi: enable segmentation features --- avcodec/av1_vaapi: setting 2 output surface for film grain --- avcodec/vaapi: increase av1 decode pool size --- avcodec/dxva2_av1: fix global motion params --- avcodec/av1_vaapi: add gm params valid check --- avcodec/av1dec: support setup shear process --- avcodec/av1: extend some definitions in spec section 3 --- cbs_av1: fix incorrect data type --- avcodec/libdav1d: let libdav1d choose optimal max frame delay --- avcodec/libdav1d: pass auto threads value to libdav1d -- - version 4.4.1: - - avcodec/flac_parser: Consider AV_INPUT_BUFFER_PADDING_SIZE - - avcodec/ttadsp: Fix integer overflows in tta_filter_process_c() + version 4.4.3: + - avformat/vividas: Check packet size + - configure: link to libatomic when it's present diff --git a/MAINTAINERS b/MAINTAINERS -index b825b8d68e..3b6cfad4fc 100644 +index d4e940ba9f..b825b8d68e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS -@@ -615,7 +615,6 @@ Jean Delvare 7CA6 9F44 60F1 BDC4 1FD2 C858 A552 6B9B B3CD 4E6A - Loren Merritt ABD9 08F4 C920 3F65 D8BE 35D7 1540 DAA7 060F 56DE - Lynne FE50 139C 6805 72CA FD52 1F8D A2FE A5F0 3F03 4464 - Michael Niedermayer 9FF2 128B 147E F673 0BAD F133 611E C787 040B 0FAB -- DD1E C9E8 DE08 5C62 9B3E 1846 B18E 8928 B394 8D64 - Nicolas George 24CE 01CE 9ACC 5CEB 74D8 8D9D B063 D997 36E5 4C93 - Nikolay Aleksandrov 8978 1D8C FB71 588E 4B27 EAA8 C4F0 B5FC E011 13B1 - Panagiotis Issaris 6571 13A3 33D9 3726 F728 AA98 F643 B12E ECF3 E029 +@@ -583,12 +583,10 @@ wm4 + Releases + ======== + +-7.0 Michael Niedermayer +-6.1 Michael Niedermayer +-5.1 Michael Niedermayer +-4.4 Michael Niedermayer +-3.4 Michael Niedermayer + 2.8 Michael Niedermayer ++2.7 Michael Niedermayer ++2.6 Michael Niedermayer ++2.5 Michael Niedermayer + + If you want to maintain an older release, please contact us + diff --git a/RELEASE b/RELEASE -index cbe06cdbfc..cca25a93cd 100644 +index fa1ba0458a..9e3a93350d 100644 --- a/RELEASE +++ b/RELEASE @@ -1 +1 @@ --4.4.4 -+4.4.1 +-4.4.5 ++4.4.3 diff --git a/configure b/configure -index fb55e04ee7..f2fc33e89b 100755 +index 0ed5de8409..c69ed44178 100755 --- a/configure +++ b/configure @@ -207,6 +207,7 @@ External library support: @@ -357,6 +455,22 @@ index fb55e04ee7..f2fc33e89b 100755 libxcb libxcb_shm libxcb_shape +@@ -1735,6 +1744,7 @@ EXTERNAL_LIBRARY_GPL_LIST=" + EXTERNAL_LIBRARY_NONFREE_LIST=" + decklink + libfdk_aac ++ openssl + libtls + " + +@@ -1826,7 +1836,6 @@ EXTERNAL_LIBRARY_LIST=" + mediacodec + openal + opengl +- openssl + pocketsphinx + vapoursynth + " @@ -1868,7 +1877,10 @@ HWACCEL_LIBRARY_LIST=" mmal omx @@ -436,15 +550,6 @@ index fb55e04ee7..f2fc33e89b 100755 hevc_vaapi_hwaccel_deps="vaapi VAPictureParameterBufferHEVC" hevc_vaapi_hwaccel_select="hevc_decoder" hevc_vdpau_hwaccel_deps="vdpau VdpPictureInfoHEVC" -@@ -3268,7 +3297,7 @@ librav1e_encoder_deps="librav1e" - librav1e_encoder_select="extract_extradata_bsf" - librsvg_decoder_deps="librsvg" - libshine_encoder_deps="libshine" --libshine_encoder_select="audio_frame_queue mpegaudioheader" -+libshine_encoder_select="audio_frame_queue" - libspeex_decoder_deps="libspeex" - libspeex_encoder_deps="libspeex" - libspeex_encoder_select="audio_frame_queue" @@ -3438,8 +3467,13 @@ sndio_indev_deps="sndio" sndio_outdev_deps="sndio" v4l2_indev_deps_any="linux_videodev2_h sys_videoio_h" @@ -467,39 +572,23 @@ index fb55e04ee7..f2fc33e89b 100755 unsharp_opencl_filter_deps="opencl" uspp_filter_deps="gpl avcodec" vaguedenoiser_filter_deps="gpl" -@@ -3706,23 +3741,23 @@ cws2fws_extralibs="zlib_extralibs" - - # libraries, in any order - avcodec_deps="avutil" --avcodec_suggest="libm stdatomic" -+avcodec_suggest="libm" - avcodec_select="null_bsf" - avdevice_deps="avformat avcodec avutil" --avdevice_suggest="libm stdatomic" -+avdevice_suggest="libm" - avfilter_deps="avutil" --avfilter_suggest="libm stdatomic" -+avfilter_suggest="libm" - avformat_deps="avcodec avutil" --avformat_suggest="libm network zlib stdatomic" -+avformat_suggest="libm network zlib" - avresample_deps="avutil" - avresample_suggest="libm" --avutil_suggest="clock_gettime ffnvcodec libm libdrm libmfx opencl user32 vaapi vulkan videotoolbox corefoundation corevideo coremedia bcrypt stdatomic" -+avutil_suggest="clock_gettime ffnvcodec libm libdrm libmfx opencl user32 vaapi vulkan videotoolbox corefoundation corevideo coremedia bcrypt" - postproc_deps="avutil gpl" --postproc_suggest="libm stdatomic" -+postproc_suggest="libm" - swresample_deps="avutil" --swresample_suggest="libm libsoxr stdatomic" -+swresample_suggest="libm libsoxr" - swscale_deps="avutil" --swscale_suggest="libm stdatomic" -+swscale_suggest="libm" - - avcodec_extralibs="pthreads_extralibs iconv_extralibs dxva2_extralibs" - avfilter_extralibs="pthreads_extralibs" -@@ -6155,6 +6190,12 @@ check_func_headers glob.h glob +@@ -5369,7 +5404,6 @@ case $target_os in + ;; + netbsd) + disable symver +- enable section_data_rel_ro + oss_indev_extralibs="-lossaudio" + oss_outdev_extralibs="-lossaudio" + enabled gcc || check_ldflags -Wl,-zmuldefs +@@ -5388,7 +5422,6 @@ case $target_os in + disable symver + ;; + freebsd) +- enable section_data_rel_ro + ;; + bsd/os) + add_extralibs -lpoll -lgnugetopt +@@ -6157,6 +6190,12 @@ check_func_headers glob.h glob enabled xlib && check_lib xlib "X11/Xlib.h X11/extensions/Xvlib.h" XvGetPortAttribute -lXv -lX11 -lXext @@ -512,23 +601,7 @@ index fb55e04ee7..f2fc33e89b 100755 check_headers direct.h check_headers dirent.h check_headers dxgidebug.h -@@ -6186,14 +6227,7 @@ check_headers asm/types.h - # it seems there are versions of clang in some distros that try to use the - # gcc headers, which explodes for stdatomic - # so we also check that atomics actually work here --# --# some configurations also require linking to libatomic, so try --# both with -latomic and without --for LATOMIC in "-latomic" ""; do -- check_builtin stdatomic stdatomic.h \ -- "atomic_int foo, bar = ATOMIC_VAR_INIT(-1); atomic_store(&foo, 0); foo += bar" \ -- $LATOMIC && eval stdatomic_extralibs="\$LATOMIC" && break --done -+check_builtin stdatomic stdatomic.h "atomic_int foo, bar = ATOMIC_VAR_INIT(-1); atomic_store(&foo, 0); foo += bar" - - check_lib advapi32 "windows.h" RegCloseKey -ladvapi32 - check_lib bcrypt "windows.h bcrypt.h" BCryptGenRandom -lbcrypt && -@@ -6499,11 +6533,12 @@ enabled mbedtls && { check_pkg_config mbedtls mbedtls mbedtls/x509_crt +@@ -6501,11 +6540,12 @@ enabled mbedtls && { check_pkg_config mbedtls mbedtls mbedtls/x509_crt check_lib mbedtls mbedtls/ssl.h mbedtls_ssl_init -lmbedtls -lmbedx509 -lmbedcrypto || die "ERROR: mbedTLS not found"; } enabled mediacodec && { enabled jni || die "ERROR: mediacodec requires --enable-jni"; } @@ -543,7 +616,19 @@ index fb55e04ee7..f2fc33e89b 100755 die "ERROR: mmal not found" && check_func_headers interface/mmal/mmal.h "MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS"; } enabled openal && { { for al_extralibs in "${OPENAL_LIBS}" "-lopenal" "-lOpenAL32"; do -@@ -6544,8 +6579,16 @@ enabled rkmpp && { require_pkg_config rkmpp rockchip_mpp rockchip/r +@@ -6533,10 +6573,7 @@ enabled omx_rpi && { test_code cc OMX_Core.h OMX_IndexConfigBrcmVideoR + die "ERROR: OpenMAX IL headers from raspberrypi/firmware not found"; } && + enable omx + enabled omx && require_headers OMX_Core.h +-enabled openssl && { { check_pkg_config openssl "openssl >= 3.0.0" openssl/ssl.h OPENSSL_init_ssl && +- { enabled gplv3 || ! enabled gpl || enabled nonfree || die "ERROR: OpenSSL >=3.0.0 requires --enable-version3"; }; } || +- { enabled gpl && ! enabled nonfree && die "ERROR: OpenSSL <3.0.0 is incompatible with the gpl"; } || +- check_pkg_config openssl openssl openssl/ssl.h OPENSSL_init_ssl || ++enabled openssl && { check_pkg_config openssl openssl openssl/ssl.h OPENSSL_init_ssl || + check_pkg_config openssl openssl openssl/ssl.h SSL_library_init || + check_lib openssl openssl/ssl.h OPENSSL_init_ssl -lssl -lcrypto || + check_lib openssl openssl/ssl.h SSL_library_init -lssl -lcrypto || +@@ -6549,8 +6586,16 @@ enabled rkmpp && { require_pkg_config rkmpp rockchip_mpp rockchip/r { enabled libdrm || die "ERROR: rkmpp requires --enable-libdrm"; } } @@ -560,16 +645,7 @@ index fb55e04ee7..f2fc33e89b 100755 if enabled gcrypt; then GCRYPT_CONFIG="${cross_prefix}libgcrypt-config" -@@ -6562,7 +6605,7 @@ fi - - if enabled sdl2; then - SDL2_CONFIG="${cross_prefix}sdl2-config" -- test_pkg_config sdl2 "sdl2 >= 2.0.1 sdl2 < 3.0.0" SDL_events.h SDL_PollEvent -+ test_pkg_config sdl2 "sdl2 >= 2.0.1 sdl2 < 2.1.0" SDL_events.h SDL_PollEvent - if disabled sdl2 && "${SDL2_CONFIG}" --version > /dev/null 2>&1; then - sdl2_cflags=$("${SDL2_CONFIG}" --cflags) - sdl2_extralibs=$("${SDL2_CONFIG}" --libs) -@@ -6625,6 +6668,10 @@ if enabled v4l2_m2m; then +@@ -6630,6 +6675,10 @@ if enabled v4l2_m2m; then check_cc vp9_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_VP9;" fi @@ -580,7 +656,26 @@ index fb55e04ee7..f2fc33e89b 100755 check_headers sys/videoio.h test_code cc sys/videoio.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_sanitized struct_v4l2_frmivalenum_discrete -@@ -7112,6 +7159,9 @@ check_deps $CONFIG_LIST \ +@@ -6662,8 +6711,7 @@ enabled alsa && { check_pkg_config alsa alsa "alsa/asoundlib.h" snd_pcm_htimesta + enabled libjack && + require_pkg_config libjack jack jack/jack.h jack_port_get_latency_range + +-enabled sndio && { check_pkg_config sndio sndio "sndio.h" sio_open || +- check_lib sndio sndio.h sio_open -lsndio; } ++enabled sndio && check_lib sndio sndio.h sio_open -lsndio + + if enabled libcdio; then + check_pkg_config libcdio libcdio_paranoia "cdio/cdda.h cdio/paranoia.h" cdio_cddap_open || +@@ -6764,7 +6812,7 @@ enabled vulkan && + + if enabled x86; then + case $target_os in +- freebsd|mingw32*|mingw64*|win32|win64|linux|cygwin*) ++ mingw32*|mingw64*|win32|win64|linux|cygwin*) + ;; + *) + disable ffnvcodec cuvid nvdec nvenc +@@ -7118,6 +7166,9 @@ check_deps $CONFIG_LIST \ enabled threads && ! enabled pthreads && ! enabled atomics_native && die "non pthread threading without atomics not supported, try adding --enable-pthreads or --cpu=i486 or higher if you are on x86" enabled avresample && warn "Building with deprecated library libavresample" @@ -590,25 +685,25 @@ index fb55e04ee7..f2fc33e89b 100755 case $target_os in haiku) disable memalign -@@ -7591,7 +7641,7 @@ cat > $TMPH < $TMPH <.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table-bordered th,.table-bordered td{border:1px solid #ddd!important}}@font-face{font-family:'Glyphicons Halflings';src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix) format('embedded-opentype'),url(../fonts/glyphicons-halflings-regular.woff) format('woff'),url(../fonts/glyphicons-halflings-regular.ttf) format('truetype'),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular) format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\2a"}.glyphicon-plus:before{content:"\2b"}.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-lock:before{content:"\e033"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-bookmark:before{content:"\e044"}.glyphicon-print:before{content:"\e045"}.glyphicon-camera:before{content:"\e046"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-fire:before{content:"\e104"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-calendar:before{content:"\e109"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-bell:before{content:"\e123"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-wrench:before{content:"\e136"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-briefcase:before{content:"\e139"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-paperclip:before{content:"\e142"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-pushpin:before{content:"\e146"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}:before,:after{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.42857143;color:#333;background-color:#fff}input,button,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#428bca;text-decoration:none}a:hover,a:focus{color:#2a6496;text-decoration:underline}a:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.img-responsive,.thumbnail>img,.thumbnail a>img,.carousel-inner>.item>img,.carousel-inner>.item>a>img{display:block;width:100% \9;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{display:inline-block;width:100% \9;max-width:100%;height:auto;padding:4px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}h1,h2,h3,h4,h5,h6,.h1,.h2,.h3,.h4,.h5,.h6{font-family:inherit;font-weight:500;line-height:1.1;color:inherit}h1 small,h2 small,h3 small,h4 small,h5 small,h6 small,.h1 small,.h2 small,.h3 small,.h4 small,.h5 small,.h6 small,h1 .small,h2 .small,h3 .small,h4 .small,h5 .small,h6 .small,.h1 .small,.h2 .small,.h3 .small,.h4 .small,.h5 .small,.h6 .small{font-weight:400;line-height:1;color:#777}h1,.h1,h2,.h2,h3,.h3{margin-top:20px;margin-bottom:10px}h1 small,.h1 small,h2 small,.h2 small,h3 small,.h3 small,h1 .small,.h1 .small,h2 .small,.h2 .small,h3 .small,.h3 .small{font-size:65%}h4,.h4,h5,.h5,h6,.h6{margin-top:10px;margin-bottom:10px}h4 small,.h4 small,h5 small,.h5 small,h6 small,.h6 small,h4 .small,.h4 .small,h5 .small,.h5 .small,h6 .small,.h6 .small{font-size:75%}h1,.h1{font-size:36px}h2,.h2{font-size:30px}h3,.h3{font-size:24px}h4,.h4{font-size:18px}h5,.h5{font-size:14px}h6,.h6{font-size:12px}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16px;font-weight:300;line-height:1.4}@media (min-width:768px){.lead{font-size:21px}}small,.small{font-size:85%}cite{font-style:normal}mark,.mark{padding:.2em;background-color:#fcf8e3}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}.text-justify{text-align:justify}.text-nowrap{white-space:nowrap}.text-lowercase{text-transform:lowercase}.text-uppercase{text-transform:uppercase}.text-capitalize{text-transform:capitalize}.text-muted{color:#777}.text-primary{color:#428bca}a.text-primary:hover{color:#3071a9}.text-success{color:#3c763d}a.text-success:hover{color:#2b542c}.text-info{color:#31708f}a.text-info:hover{color:#245269}.text-warning{color:#8a6d3b}a.text-warning:hover{color:#66512c}.text-danger{color:#a94442}a.text-danger:hover{color:#843534}.bg-primary{color:#fff;background-color:#428bca}a.bg-primary:hover{background-color:#3071a9}.bg-success{background-color:#dff0d8}a.bg-success:hover{background-color:#c1e2b3}.bg-info{background-color:#d9edf7}a.bg-info:hover{background-color:#afd9ee}.bg-warning{background-color:#fcf8e3}a.bg-warning:hover{background-color:#f7ecb5}.bg-danger{background-color:#f2dede}a.bg-danger:hover{background-color:#e4b9b9}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ul,ol{margin-top:0;margin-bottom:10px}ul ul,ol ul,ul ol,ol ol{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;margin-left:-5px;list-style:none}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-top:0;margin-bottom:20px}dt,dd{line-height:1.42857143}dt{font-weight:700}dd{margin-left:0}@media (min-width:768px){.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}}abbr[title],abbr[data-original-title]{cursor:help;border-bottom:1px dotted #777}.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;font-size:17.5px;border-left:5px solid #eee}blockquote p:last-child,blockquote ul:last-child,blockquote ol:last-child{margin-bottom:0}blockquote footer,blockquote small,blockquote .small{display:block;font-size:80%;line-height:1.42857143;color:#777}blockquote footer:before,blockquote small:before,blockquote .small:before{content:'\2014 \00A0'}.blockquote-reverse,blockquote.pull-right{padding-right:15px;padding-left:0;text-align:right;border-right:5px solid #eee;border-left:0}.blockquote-reverse footer:before,blockquote.pull-right footer:before,.blockquote-reverse small:before,blockquote.pull-right small:before,.blockquote-reverse .small:before,blockquote.pull-right .small:before{content:''}.blockquote-reverse footer:after,blockquote.pull-right footer:after,.blockquote-reverse small:after,blockquote.pull-right small:after,.blockquote-reverse .small:after,blockquote.pull-right .small:after{content:'\00A0 \2014'}blockquote:before,blockquote:after{content:""}address{margin-bottom:20px;font-style:normal;line-height:1.42857143}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;background-color:#f9f2f4;border-radius:4px}kbd{padding:2px 4px;font-size:90%;color:#fff;background-color:#333;border-radius:3px;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.25);box-shadow:inset 0 -1px 0 rgba(0,0,0,.25)}kbd kbd{padding:0;font-size:100%;-webkit-box-shadow:none;box-shadow:none}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.42857143;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{margin-right:-15px;margin-left:-15px}.col-xs-1,.col-sm-1,.col-md-1,.col-lg-1,.col-xs-2,.col-sm-2,.col-md-2,.col-lg-2,.col-xs-3,.col-sm-3,.col-md-3,.col-lg-3,.col-xs-4,.col-sm-4,.col-md-4,.col-lg-4,.col-xs-5,.col-sm-5,.col-md-5,.col-lg-5,.col-xs-6,.col-sm-6,.col-md-6,.col-lg-6,.col-xs-7,.col-sm-7,.col-md-7,.col-lg-7,.col-xs-8,.col-sm-8,.col-md-8,.col-lg-8,.col-xs-9,.col-sm-9,.col-md-9,.col-lg-9,.col-xs-10,.col-sm-10,.col-md-10,.col-lg-10,.col-xs-11,.col-sm-11,.col-md-11,.col-lg-11,.col-xs-12,.col-sm-12,.col-md-12,.col-lg-12{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}table{background-color:transparent}th{text-align:left}.table{width:100%;max-width:100%;margin-bottom:20px}.table>thead>tr>th,.table>tbody>tr>th,.table>tfoot>tr>th,.table>thead>tr>td,.table>tbody>tr>td,.table>tfoot>tr>td{padding:8px;line-height:1.42857143;vertical-align:top;border-top:1px solid #ddd}.table>thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table>caption+thead>tr:first-child>th,.table>colgroup+thead>tr:first-child>th,.table>thead:first-child>tr:first-child>th,.table>caption+thead>tr:first-child>td,.table>colgroup+thead>tr:first-child>td,.table>thead:first-child>tr:first-child>td{border-top:0}.table>tbody+tbody{border-top:2px solid #ddd}.table-condensed>thead>tr>th,.table-condensed>tbody>tr>th,.table-condensed>tfoot>tr>th,.table-condensed>thead>tr>td,.table-condensed>tbody>tr>td,.table-condensed>tfoot>tr>td{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>tbody>tr>td,.table-bordered>tfoot>tr>td{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>thead>tr>td{border-bottom-width:2px}.table-striped>tbody>tr:nth-child(odd)>td,.table-striped>tbody>tr:nth-child(odd)>th{background-color:#f9f9f9}.table-hover>tbody>tr:hover>td,.table-hover>tbody>tr:hover>th{background-color:#f5f5f5}table col[class*=col-]{position:static;display:table-column;float:none}table td[class*=col-],table th[class*=col-]{position:static;display:table-cell;float:none}.table>thead>tr>td.active,.table>tbody>tr>td.active,.table>tfoot>tr>td.active,.table>thead>tr>th.active,.table>tbody>tr>th.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>tbody>tr.active>td,.table>tfoot>tr.active>td,.table>thead>tr.active>th,.table>tbody>tr.active>th,.table>tfoot>tr.active>th{background-color:#f5f5f5}.table-hover>tbody>tr>td.active:hover,.table-hover>tbody>tr>th.active:hover,.table-hover>tbody>tr.active:hover>td,.table-hover>tbody>tr:hover>.active,.table-hover>tbody>tr.active:hover>th{background-color:#e8e8e8}.table>thead>tr>td.success,.table>tbody>tr>td.success,.table>tfoot>tr>td.success,.table>thead>tr>th.success,.table>tbody>tr>th.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>tbody>tr.success>td,.table>tfoot>tr.success>td,.table>thead>tr.success>th,.table>tbody>tr.success>th,.table>tfoot>tr.success>th{background-color:#dff0d8}.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover,.table-hover>tbody>tr.success:hover>td,.table-hover>tbody>tr:hover>.success,.table-hover>tbody>tr.success:hover>th{background-color:#d0e9c6}.table>thead>tr>td.info,.table>tbody>tr>td.info,.table>tfoot>tr>td.info,.table>thead>tr>th.info,.table>tbody>tr>th.info,.table>tfoot>tr>th.info,.table>thead>tr.info>td,.table>tbody>tr.info>td,.table>tfoot>tr.info>td,.table>thead>tr.info>th,.table>tbody>tr.info>th,.table>tfoot>tr.info>th{background-color:#d9edf7}.table-hover>tbody>tr>td.info:hover,.table-hover>tbody>tr>th.info:hover,.table-hover>tbody>tr.info:hover>td,.table-hover>tbody>tr:hover>.info,.table-hover>tbody>tr.info:hover>th{background-color:#c4e3f3}.table>thead>tr>td.warning,.table>tbody>tr>td.warning,.table>tfoot>tr>td.warning,.table>thead>tr>th.warning,.table>tbody>tr>th.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>tbody>tr.warning>td,.table>tfoot>tr.warning>td,.table>thead>tr.warning>th,.table>tbody>tr.warning>th,.table>tfoot>tr.warning>th{background-color:#fcf8e3}.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover,.table-hover>tbody>tr.warning:hover>td,.table-hover>tbody>tr:hover>.warning,.table-hover>tbody>tr.warning:hover>th{background-color:#faf2cc}.table>thead>tr>td.danger,.table>tbody>tr>td.danger,.table>tfoot>tr>td.danger,.table>thead>tr>th.danger,.table>tbody>tr>th.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>tbody>tr.danger>td,.table>tfoot>tr.danger>td,.table>thead>tr.danger>th,.table>tbody>tr.danger>th,.table>tfoot>tr.danger>th{background-color:#f2dede}.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover,.table-hover>tbody>tr.danger:hover>td,.table-hover>tbody>tr:hover>.danger,.table-hover>tbody>tr.danger:hover>th{background-color:#ebcccc}@media screen and (max-width:767px){.table-responsive{width:100%;margin-bottom:15px;overflow-x:auto;overflow-y:hidden;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0}.table-responsive>.table>thead>tr>th,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tfoot>tr>td{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>thead>tr>th:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child{border-left:0}.table-responsive>.table-bordered>thead>tr>th:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child{border-right:0}.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>th,.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>td{border-bottom:0}}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;max-width:100%;margin-bottom:5px;font-weight:700}input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=radio],input[type=checkbox]{margin:4px 0 0;margin-top:1px \9;line-height:normal}input[type=file]{display:block}input[type=range]{display:block;width:100%}select[multiple],select[size]{height:auto}input[type=file]:focus,input[type=radio]:focus,input[type=checkbox]:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}output{display:block;padding-top:7px;font-size:14px;line-height:1.42857143;color:#555}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.42857143;color:#555;background-color:#fff;background-image:none;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075);-webkit-transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;-o-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6);box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6)}.form-control::-moz-placeholder{color:#777;opacity:1}.form-control:-ms-input-placeholder{color:#777}.form-control::-webkit-input-placeholder{color:#777}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{cursor:not-allowed;background-color:#eee;opacity:1}textarea.form-control{height:auto}input[type=search]{-webkit-appearance:none}input[type=date],input[type=time],input[type=datetime-local],input[type=month]{line-height:34px;line-height:1.42857143 \0}input[type=date].input-sm,input[type=time].input-sm,input[type=datetime-local].input-sm,input[type=month].input-sm{line-height:30px}input[type=date].input-lg,input[type=time].input-lg,input[type=datetime-local].input-lg,input[type=month].input-lg{line-height:46px}.form-group{margin-bottom:15px}.radio,.checkbox{position:relative;display:block;min-height:20px;margin-top:10px;margin-bottom:10px}.radio label,.checkbox label{padding-left:20px;margin-bottom:0;font-weight:400;cursor:pointer}.radio input[type=radio],.radio-inline input[type=radio],.checkbox input[type=checkbox],.checkbox-inline input[type=checkbox]{position:absolute;margin-top:4px \9;margin-left:-20px}.radio+.radio,.checkbox+.checkbox{margin-top:-5px}.radio-inline,.checkbox-inline{display:inline-block;padding-left:20px;margin-bottom:0;font-weight:400;vertical-align:middle;cursor:pointer}.radio-inline+.radio-inline,.checkbox-inline+.checkbox-inline{margin-top:0;margin-left:10px}input[type=radio][disabled],input[type=checkbox][disabled],input[type=radio].disabled,input[type=checkbox].disabled,fieldset[disabled] input[type=radio],fieldset[disabled] input[type=checkbox]{cursor:not-allowed}.radio-inline.disabled,.checkbox-inline.disabled,fieldset[disabled] .radio-inline,fieldset[disabled] .checkbox-inline{cursor:not-allowed}.radio.disabled label,.checkbox.disabled label,fieldset[disabled] .radio label,fieldset[disabled] .checkbox label{cursor:not-allowed}.form-control-static{padding-top:7px;padding-bottom:7px;margin-bottom:0}.form-control-static.input-lg,.form-control-static.input-sm{padding-right:0;padding-left:0}.input-sm,.form-horizontal .form-group-sm .form-control{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}textarea.input-sm,select[multiple].input-sm{height:auto}.input-lg,.form-horizontal .form-group-lg .form-control{height:46px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-lg{height:46px;line-height:46px}textarea.input-lg,select[multiple].input-lg{height:auto}.has-feedback{position:relative}.has-feedback .form-control{padding-right:42.5px}.form-control-feedback{position:absolute;top:25px;right:0;z-index:2;display:block;width:34px;height:34px;line-height:34px;text-align:center}.input-lg+.form-control-feedback{width:46px;height:46px;line-height:46px}.input-sm+.form-control-feedback{width:30px;height:30px;line-height:30px}.has-success .help-block,.has-success .control-label,.has-success .radio,.has-success .checkbox,.has-success .radio-inline,.has-success .checkbox-inline{color:#3c763d}.has-success .form-control{border-color:#3c763d;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-success .form-control:focus{border-color:#2b542c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168}.has-success .input-group-addon{color:#3c763d;background-color:#dff0d8;border-color:#3c763d}.has-success .form-control-feedback{color:#3c763d}.has-warning .help-block,.has-warning .control-label,.has-warning .radio,.has-warning .checkbox,.has-warning .radio-inline,.has-warning .checkbox-inline{color:#8a6d3b}.has-warning .form-control{border-color:#8a6d3b;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-warning .form-control:focus{border-color:#66512c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b}.has-warning .input-group-addon{color:#8a6d3b;background-color:#fcf8e3;border-color:#8a6d3b}.has-warning .form-control-feedback{color:#8a6d3b}.has-error .help-block,.has-error .control-label,.has-error .radio,.has-error .checkbox,.has-error .radio-inline,.has-error .checkbox-inline{color:#a94442}.has-error .form-control{border-color:#a94442;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-error .form-control:focus{border-color:#843534;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483}.has-error .input-group-addon{color:#a94442;background-color:#f2dede;border-color:#a94442}.has-error .form-control-feedback{color:#a94442}.has-feedback label.sr-only~.form-control-feedback{top:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media (min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .input-group{display:inline-table;vertical-align:middle}.form-inline .input-group .input-group-addon,.form-inline .input-group .input-group-btn,.form-inline .input-group .form-control{width:auto}.form-inline .input-group>.form-control{width:100%}.form-inline .control-label{margin-bottom:0;vertical-align:middle}.form-inline .radio,.form-inline .checkbox{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.form-inline .radio label,.form-inline .checkbox label{padding-left:0}.form-inline .radio input[type=radio],.form-inline .checkbox input[type=checkbox]{position:relative;margin-left:0}.form-inline .has-feedback .form-control-feedback{top:0}}.form-horizontal .radio,.form-horizontal .checkbox,.form-horizontal .radio-inline,.form-horizontal .checkbox-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .radio,.form-horizontal .checkbox{min-height:27px}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.form-horizontal .control-label{padding-top:7px;margin-bottom:0;text-align:right}}.form-horizontal .has-feedback .form-control-feedback{top:0;right:15px}@media (min-width:768px){.form-horizontal .form-group-lg .control-label{padding-top:14.3px}}@media (min-width:768px){.form-horizontal .form-group-sm .control-label{padding-top:6px}}.btn{display:inline-block;padding:6px 12px;margin-bottom:0;font-size:14px;font-weight:400;line-height:1.42857143;text-align:center;white-space:nowrap;vertical-align:middle;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-image:none;border:1px solid transparent;border-radius:4px}.btn:focus,.btn:active:focus,.btn.active:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn:hover,.btn:focus{color:#333;text-decoration:none}.btn:active,.btn.active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{pointer-events:none;cursor:not-allowed;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none;opacity:.65}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default:hover,.btn-default:focus,.btn-default:active,.btn-default.active,.open>.dropdown-toggle.btn-default{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default:active,.btn-default.active,.open>.dropdown-toggle.btn-default{background-image:none}.btn-default.disabled,.btn-default[disabled],fieldset[disabled] .btn-default,.btn-default.disabled:hover,.btn-default[disabled]:hover,fieldset[disabled] .btn-default:hover,.btn-default.disabled:focus,.btn-default[disabled]:focus,fieldset[disabled] .btn-default:focus,.btn-default.disabled:active,.btn-default[disabled]:active,fieldset[disabled] .btn-default:active,.btn-default.disabled.active,.btn-default[disabled].active,fieldset[disabled] .btn-default.active{background-color:#fff;border-color:#ccc}.btn-default .badge{color:#fff;background-color:#333}.btn-primary{color:#fff;background-color:#428bca;border-color:#357ebd}.btn-primary:hover,.btn-primary:focus,.btn-primary:active,.btn-primary.active,.open>.dropdown-toggle.btn-primary{color:#fff;background-color:#3071a9;border-color:#285e8e}.btn-primary:active,.btn-primary.active,.open>.dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled,.btn-primary[disabled],fieldset[disabled] .btn-primary,.btn-primary.disabled:hover,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary:hover,.btn-primary.disabled:focus,.btn-primary[disabled]:focus,fieldset[disabled] .btn-primary:focus,.btn-primary.disabled:active,.btn-primary[disabled]:active,fieldset[disabled] .btn-primary:active,.btn-primary.disabled.active,.btn-primary[disabled].active,fieldset[disabled] .btn-primary.active{background-color:#428bca;border-color:#357ebd}.btn-primary .badge{color:#428bca;background-color:#fff}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success:hover,.btn-success:focus,.btn-success:active,.btn-success.active,.open>.dropdown-toggle.btn-success{color:#fff;background-color:#449d44;border-color:#398439}.btn-success:active,.btn-success.active,.open>.dropdown-toggle.btn-success{background-image:none}.btn-success.disabled,.btn-success[disabled],fieldset[disabled] .btn-success,.btn-success.disabled:hover,.btn-success[disabled]:hover,fieldset[disabled] .btn-success:hover,.btn-success.disabled:focus,.btn-success[disabled]:focus,fieldset[disabled] .btn-success:focus,.btn-success.disabled:active,.btn-success[disabled]:active,fieldset[disabled] .btn-success:active,.btn-success.disabled.active,.btn-success[disabled].active,fieldset[disabled] .btn-success.active{background-color:#5cb85c;border-color:#4cae4c}.btn-success .badge{color:#5cb85c;background-color:#fff}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info:hover,.btn-info:focus,.btn-info:active,.btn-info.active,.open>.dropdown-toggle.btn-info{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info:active,.btn-info.active,.open>.dropdown-toggle.btn-info{background-image:none}.btn-info.disabled,.btn-info[disabled],fieldset[disabled] .btn-info,.btn-info.disabled:hover,.btn-info[disabled]:hover,fieldset[disabled] .btn-info:hover,.btn-info.disabled:focus,.btn-info[disabled]:focus,fieldset[disabled] .btn-info:focus,.btn-info.disabled:active,.btn-info[disabled]:active,fieldset[disabled] .btn-info:active,.btn-info.disabled.active,.btn-info[disabled].active,fieldset[disabled] .btn-info.active{background-color:#5bc0de;border-color:#46b8da}.btn-info .badge{color:#5bc0de;background-color:#fff}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning:hover,.btn-warning:focus,.btn-warning:active,.btn-warning.active,.open>.dropdown-toggle.btn-warning{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning:active,.btn-warning.active,.open>.dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-warning,.btn-warning.disabled:hover,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning:hover,.btn-warning.disabled:focus,.btn-warning[disabled]:focus,fieldset[disabled] .btn-warning:focus,.btn-warning.disabled:active,.btn-warning[disabled]:active,fieldset[disabled] .btn-warning:active,.btn-warning.disabled.active,.btn-warning[disabled].active,fieldset[disabled] .btn-warning.active{background-color:#f0ad4e;border-color:#eea236}.btn-warning .badge{color:#f0ad4e;background-color:#fff}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger:hover,.btn-danger:focus,.btn-danger:active,.btn-danger.active,.open>.dropdown-toggle.btn-danger{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger:active,.btn-danger.active,.open>.dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled,.btn-danger[disabled],fieldset[disabled] .btn-danger,.btn-danger.disabled:hover,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger:hover,.btn-danger.disabled:focus,.btn-danger[disabled]:focus,fieldset[disabled] .btn-danger:focus,.btn-danger.disabled:active,.btn-danger[disabled]:active,fieldset[disabled] .btn-danger:active,.btn-danger.disabled.active,.btn-danger[disabled].active,fieldset[disabled] .btn-danger.active{background-color:#d9534f;border-color:#d43f3a}.btn-danger .badge{color:#d9534f;background-color:#fff}.btn-link{font-weight:400;color:#428bca;cursor:pointer;border-radius:0}.btn-link,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:hover,.btn-link:focus,.btn-link:active{border-color:transparent}.btn-link:hover,.btn-link:focus{color:#2a6496;text-decoration:underline;background-color:transparent}.btn-link[disabled]:hover,fieldset[disabled] .btn-link:hover,.btn-link[disabled]:focus,fieldset[disabled] .btn-link:focus{color:#777;text-decoration:none}.btn-lg,.btn-group-lg>.btn{padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}.btn-sm,.btn-group-sm>.btn{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-xs,.btn-group-xs>.btn{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:5px}input[type=submit].btn-block,input[type=reset].btn-block,input[type=button].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}tr.collapse.in{display:table-row}tbody.collapse.in{display:table-row-group}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition:height .35s ease;-o-transition:height .35s ease;transition:height .35s ease}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px solid;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;text-align:left;list-style:none;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175)}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:#333;white-space:nowrap}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus{color:#262626;text-decoration:none;background-color:#f5f5f5}.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{color:#fff;text-decoration:none;background-color:#428bca;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{color:#777}.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{right:0;left:auto}.dropdown-menu-left{right:auto;left:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{content:"";border-top:0;border-bottom:4px solid}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:1px}@media (min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}.navbar-right .dropdown-menu-left{right:auto;left:0}}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group>.btn,.btn-group-vertical>.btn{position:relative;float:left}.btn-group>.btn:hover,.btn-group-vertical>.btn:hover,.btn-group>.btn:focus,.btn-group-vertical>.btn:focus,.btn-group>.btn:active,.btn-group-vertical>.btn:active,.btn-group>.btn.active,.btn-group-vertical>.btn.active{z-index:2}.btn-group>.btn:focus,.btn-group-vertical>.btn:focus{outline:0}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{margin-left:-5px}.btn-toolbar .btn-group,.btn-toolbar .input-group{float:left}.btn-toolbar>.btn,.btn-toolbar>.btn-group,.btn-toolbar>.input-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child>.btn:last-child,.btn-group>.btn-group:first-child>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child>.btn:first-child{border-top-left-radius:0;border-bottom-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-group.open .dropdown-toggle.btn-link{-webkit-box-shadow:none;box-shadow:none}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group,.btn-group-vertical>.btn-group>.btn{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-left-radius:0;border-top-right-radius:0;border-bottom-left-radius:4px}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-top-right-radius:0}.btn-group-justified{display:table;width:100%;table-layout:fixed;border-collapse:separate}.btn-group-justified>.btn,.btn-group-justified>.btn-group{display:table-cell;float:none;width:1%}.btn-group-justified>.btn-group .btn{width:100%}.btn-group-justified>.btn-group .dropdown-menu{left:auto}[data-toggle=buttons]>.btn>input[type=radio],[data-toggle=buttons]>.btn>input[type=checkbox]{position:absolute;z-index:-1;filter:alpha(opacity=0);opacity:0}.input-group{position:relative;display:table;border-collapse:separate}.input-group[class*=col-]{float:none;padding-right:0;padding-left:0}.input-group .form-control{position:relative;z-index:2;float:left;width:100%;margin-bottom:0}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:46px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:46px;line-height:46px}textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn,select[multiple].input-group-lg>.form-control,select[multiple].input-group-lg>.input-group-addon,select[multiple].input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn,select[multiple].input-group-sm>.form-control,select[multiple].input-group-sm>.input-group-addon,select[multiple].input-group-sm>.input-group-btn>.btn{height:auto}.input-group-addon,.input-group-btn,.input-group .form-control{display:table-cell}.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child),.input-group .form-control:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:400;line-height:1;color:#555;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type=radio],.input-group-addon input[type=checkbox]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle),.input-group-btn:last-child>.btn-group:not(:last-child)>.btn{border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group>.btn,.input-group-btn:last-child>.dropdown-toggle,.input-group-btn:first-child>.btn:not(:first-child),.input-group-btn:first-child>.btn-group:not(:first-child)>.btn{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:hover,.input-group-btn>.btn:focus,.input-group-btn>.btn:active{z-index:2}.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group{margin-right:-1px}.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group{margin-left:-1px}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:hover,.nav>li>a:focus{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#777}.nav>li.disabled>a:hover,.nav>li.disabled>a:focus{color:#777;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:hover,.nav .open>a:focus{background-color:#eee;border-color:#428bca}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.42857143;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:hover,.nav-tabs>li.active>a:focus{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-tabs.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}.nav-tabs.nav-justified>li>a{margin-bottom:0}}.nav-tabs.nav-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:hover,.nav-tabs.nav-justified>.active>a:focus{border:1px solid #ddd}@media (min-width:768px){.nav-tabs.nav-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:hover,.nav-tabs.nav-justified>.active>a:focus{border-bottom-color:#fff}}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:4px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:hover,.nav-pills>li.active>a:focus{color:#fff;background-color:#428bca}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-justified>li{display:table-cell;width:1%}.nav-justified>li>a{margin-bottom:0}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:hover,.nav-tabs-justified>.active>a:focus{border:1px solid #ddd}@media (min-width:768px){.nav-tabs-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:hover,.nav-tabs-justified>.active>a:focus{border-bottom-color:#fff}}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.navbar{position:relative;min-height:50px;margin-bottom:20px;border:1px solid transparent}@media (min-width:768px){.navbar{border-radius:4px}}@media (min-width:768px){.navbar-header{float:left}}.navbar-collapse{padding-right:15px;padding-left:15px;overflow-x:visible;-webkit-overflow-scrolling:touch;border-top:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1)}.navbar-collapse.in{overflow-y:auto}@media (min-width:768px){.navbar-collapse{width:auto;border-top:0;-webkit-box-shadow:none;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-fixed-top .navbar-collapse,.navbar-static-top .navbar-collapse,.navbar-fixed-bottom .navbar-collapse{padding-right:0;padding-left:0}}.navbar-fixed-top .navbar-collapse,.navbar-fixed-bottom .navbar-collapse{max-height:340px}@media (max-width:480px) and (orientation:landscape){.navbar-fixed-top .navbar-collapse,.navbar-fixed-bottom .navbar-collapse{max-height:200px}}.container>.navbar-header,.container-fluid>.navbar-header,.container>.navbar-collapse,.container-fluid>.navbar-collapse{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.container>.navbar-header,.container-fluid>.navbar-header,.container>.navbar-collapse,.container-fluid>.navbar-collapse{margin-right:0;margin-left:0}}.navbar-static-top{z-index:1000;border-width:0 0 1px}@media (min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-top,.navbar-fixed-bottom{position:fixed;right:0;left:0;z-index:1030;-webkit-transform:translate3d(0,0,0);-o-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}@media (min-width:768px){.navbar-fixed-top,.navbar-fixed-bottom{border-radius:0}}.navbar-fixed-top{top:0;border-width:0 0 1px}.navbar-fixed-bottom{bottom:0;margin-bottom:0;border-width:1px 0 0}.navbar-brand{float:left;height:50px;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:hover,.navbar-brand:focus{text-decoration:none}@media (min-width:768px){.navbar>.container .navbar-brand,.navbar>.container-fluid .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-top:8px;margin-right:15px;margin-bottom:8px;background-color:transparent;background-image:none;border:1px solid transparent;border-radius:4px}.navbar-toggle:focus{outline:0}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media (min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media (max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-nav .open .dropdown-menu>li>a,.navbar-nav .open .dropdown-menu .dropdown-header{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:hover,.navbar-nav .open .dropdown-menu>li>a:focus{background-image:none}}@media (min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}.navbar-nav.navbar-right:last-child{margin-right:-15px}}@media (min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important}}.navbar-form{padding:10px 15px;margin-top:8px;margin-right:-15px;margin-bottom:8px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1)}@media (min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block;width:auto;vertical-align:middle}.navbar-form .input-group{display:inline-table;vertical-align:middle}.navbar-form .input-group .input-group-addon,.navbar-form .input-group .input-group-btn,.navbar-form .input-group .form-control{width:auto}.navbar-form .input-group>.form-control{width:100%}.navbar-form .control-label{margin-bottom:0;vertical-align:middle}.navbar-form .radio,.navbar-form .checkbox{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.navbar-form .radio label,.navbar-form .checkbox label{padding-left:0}.navbar-form .radio input[type=radio],.navbar-form .checkbox input[type=checkbox]{position:relative;margin-left:0}.navbar-form .has-feedback .form-control-feedback{top:0}}@media (max-width:767px){.navbar-form .form-group{margin-bottom:5px}}@media (min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-form.navbar-right:last-child{margin-right:-15px}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-left-radius:0;border-top-right-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-btn.btn-sm{margin-top:10px;margin-bottom:10px}.navbar-btn.btn-xs{margin-top:14px;margin-bottom:14px}.navbar-text{margin-top:15px;margin-bottom:15px}@media (min-width:768px){.navbar-text{float:left;margin-right:15px;margin-left:15px}.navbar-text.navbar-right:last-child{margin-right:0}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:hover,.navbar-default .navbar-brand:focus{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:hover,.navbar-default .navbar-nav>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:hover,.navbar-default .navbar-nav>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:hover,.navbar-default .navbar-nav>.disabled>a:focus{color:#ccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:hover,.navbar-default .navbar-toggle:focus{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#888}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e7e7e7}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:hover,.navbar-default .navbar-nav>.open>a:focus{color:#555;background-color:#e7e7e7}@media (max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-default .btn-link{color:#777}.navbar-default .btn-link:hover,.navbar-default .btn-link:focus{color:#333}.navbar-default .btn-link[disabled]:hover,fieldset[disabled] .navbar-default .btn-link:hover,.navbar-default .btn-link[disabled]:focus,fieldset[disabled] .navbar-default .btn-link:focus{color:#ccc}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#777}.navbar-inverse .navbar-brand:hover,.navbar-inverse .navbar-brand:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#777}.navbar-inverse .navbar-nav>li>a{color:#777}.navbar-inverse .navbar-nav>li>a:hover,.navbar-inverse .navbar-nav>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:hover,.navbar-inverse .navbar-nav>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:hover,.navbar-inverse .navbar-nav>.disabled>a:focus{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:hover,.navbar-inverse .navbar-toggle:focus{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:hover,.navbar-inverse .navbar-nav>.open>a:focus{color:#fff;background-color:#080808}@media (max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu .divider{background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#777}.navbar-inverse .navbar-link:hover{color:#fff}.navbar-inverse .btn-link{color:#777}.navbar-inverse .btn-link:hover,.navbar-inverse .btn-link:focus{color:#fff}.navbar-inverse .btn-link[disabled]:hover,fieldset[disabled] .navbar-inverse .btn-link:hover,.navbar-inverse .btn-link[disabled]:focus,fieldset[disabled] .navbar-inverse .btn-link:focus{color:#444}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#777}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.42857143;color:#428bca;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-top-left-radius:4px;border-bottom-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>li>a:hover,.pagination>li>span:hover,.pagination>li>a:focus,.pagination>li>span:focus{color:#2a6496;background-color:#eee;border-color:#ddd}.pagination>.active>a,.pagination>.active>span,.pagination>.active>a:hover,.pagination>.active>span:hover,.pagination>.active>a:focus,.pagination>.active>span:focus{z-index:2;color:#fff;cursor:default;background-color:#428bca;border-color:#428bca}.pagination>.disabled>span,.pagination>.disabled>span:hover,.pagination>.disabled>span:focus,.pagination>.disabled>a,.pagination>.disabled>a:hover,.pagination>.disabled>a:focus{color:#777;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-top-left-radius:6px;border-bottom-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-top-left-radius:3px;border-bottom-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:hover,.pager li>a:focus{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:hover,.pager .disabled>a:focus,.pager .disabled>span{color:#777;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}a.label:hover,a.label:focus{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.btn .label{position:relative;top:-1px}.label-default{background-color:#777}.label-default[href]:hover,.label-default[href]:focus{background-color:#5e5e5e}.label-primary{background-color:#428bca}.label-primary[href]:hover,.label-primary[href]:focus{background-color:#3071a9}.label-success{background-color:#5cb85c}.label-success[href]:hover,.label-success[href]:focus{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:hover,.label-info[href]:focus{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:hover,.label-warning[href]:focus{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:hover,.label-danger[href]:focus{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;background-color:#777;border-radius:10px}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.btn-xs .badge{top:0;padding:1px 5px}a.badge:hover,a.badge:focus{color:#fff;text-decoration:none;cursor:pointer}a.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#428bca;background-color:#fff}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding:30px;margin-bottom:30px;color:inherit;background-color:#eee}.jumbotron h1,.jumbotron .h1{color:inherit}.jumbotron p{margin-bottom:15px;font-size:21px;font-weight:200}.jumbotron>hr{border-top-color:#d5d5d5}.container .jumbotron{border-radius:6px}.jumbotron .container{max-width:100%}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron{padding-right:60px;padding-left:60px}.jumbotron h1,.jumbotron .h1{font-size:63px}}.thumbnail{display:block;padding:4px;margin-bottom:20px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.thumbnail>img,.thumbnail a>img{margin-right:auto;margin-left:auto}a.thumbnail:hover,a.thumbnail:focus,a.thumbnail.active{border-color:#428bca}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:700}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable,.alert-dismissible{padding-right:35px}.alert-dismissable .close,.alert-dismissible .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#2b542c}.alert-info{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#245269}.alert-warning{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.alert-warning hr{border-top-color:#f7e1b5}.alert-warning .alert-link{color:#66512c}.alert-danger{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.alert-danger hr{border-top-color:#e4b9c0}.alert-danger .alert-link{color:#843534}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,.1);box-shadow:inset 0 1px 2px rgba(0,0,0,.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;line-height:20px;color:#fff;text-align:center;background-color:#428bca;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);-webkit-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress-striped .progress-bar,.progress-bar-striped{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;background-size:40px 40px}.progress.active .progress-bar,.progress-bar.active{-webkit-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar[aria-valuenow="1"],.progress-bar[aria-valuenow="2"]{min-width:30px}.progress-bar[aria-valuenow="0"]{min-width:30px;color:#777;background-color:transparent;background-image:none;-webkit-box-shadow:none;box-shadow:none}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.media,.media-body{overflow:hidden;zoom:1}.media,.media .media{margin-top:15px}.media:first-child{margin-top:0}.media-object{display:block}.media-heading{margin:0 0 5px}.media>.pull-left{margin-right:10px}.media>.pull-right{margin-left:10px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-left-radius:4px;border-top-right-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}a.list-group-item{color:#555}a.list-group-item .list-group-item-heading{color:#333}a.list-group-item:hover,a.list-group-item:focus{color:#555;text-decoration:none;background-color:#f5f5f5}.list-group-item.disabled,.list-group-item.disabled:hover,.list-group-item.disabled:focus{color:#777;background-color:#eee}.list-group-item.disabled .list-group-item-heading,.list-group-item.disabled:hover .list-group-item-heading,.list-group-item.disabled:focus .list-group-item-heading{color:inherit}.list-group-item.disabled .list-group-item-text,.list-group-item.disabled:hover .list-group-item-text,.list-group-item.disabled:focus .list-group-item-text{color:#777}.list-group-item.active,.list-group-item.active:hover,.list-group-item.active:focus{z-index:2;color:#fff;background-color:#428bca;border-color:#428bca}.list-group-item.active .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading,.list-group-item.active .list-group-item-heading>small,.list-group-item.active:hover .list-group-item-heading>small,.list-group-item.active:focus .list-group-item-heading>small,.list-group-item.active .list-group-item-heading>.small,.list-group-item.active:hover .list-group-item-heading>.small,.list-group-item.active:focus .list-group-item-heading>.small{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:hover .list-group-item-text,.list-group-item.active:focus .list-group-item-text{color:#e1edf7}.list-group-item-success{color:#3c763d;background-color:#dff0d8}a.list-group-item-success{color:#3c763d}a.list-group-item-success .list-group-item-heading{color:inherit}a.list-group-item-success:hover,a.list-group-item-success:focus{color:#3c763d;background-color:#d0e9c6}a.list-group-item-success.active,a.list-group-item-success.active:hover,a.list-group-item-success.active:focus{color:#fff;background-color:#3c763d;border-color:#3c763d}.list-group-item-info{color:#31708f;background-color:#d9edf7}a.list-group-item-info{color:#31708f}a.list-group-item-info .list-group-item-heading{color:inherit}a.list-group-item-info:hover,a.list-group-item-info:focus{color:#31708f;background-color:#c4e3f3}a.list-group-item-info.active,a.list-group-item-info.active:hover,a.list-group-item-info.active:focus{color:#fff;background-color:#31708f;border-color:#31708f}.list-group-item-warning{color:#8a6d3b;background-color:#fcf8e3}a.list-group-item-warning{color:#8a6d3b}a.list-group-item-warning .list-group-item-heading{color:inherit}a.list-group-item-warning:hover,a.list-group-item-warning:focus{color:#8a6d3b;background-color:#faf2cc}a.list-group-item-warning.active,a.list-group-item-warning.active:hover,a.list-group-item-warning.active:focus{color:#fff;background-color:#8a6d3b;border-color:#8a6d3b}.list-group-item-danger{color:#a94442;background-color:#f2dede}a.list-group-item-danger{color:#a94442}a.list-group-item-danger .list-group-item-heading{color:inherit}a.list-group-item-danger:hover,a.list-group-item-danger:focus{color:#a94442;background-color:#ebcccc}a.list-group-item-danger.active,a.list-group-item-danger.active:hover,a.list-group-item-danger.active:focus{color:#fff;background-color:#a94442;border-color:#a94442}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(0,0,0,.05)}.panel-body{padding:15px}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-left-radius:3px;border-top-right-radius:3px}.panel-heading>.dropdown .dropdown-toggle{color:inherit}.panel-title{margin-top:0;margin-bottom:0;font-size:16px;color:inherit}.panel-title>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.list-group{margin-bottom:0}.panel>.list-group .list-group-item{border-width:1px 0;border-radius:0}.panel>.list-group:first-child .list-group-item:first-child{border-top:0;border-top-left-radius:3px;border-top-right-radius:3px}.panel>.list-group:last-child .list-group-item:last-child{border-bottom:0;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.list-group+.panel-footer{border-top-width:0}.panel>.table,.panel>.table-responsive>.table,.panel>.panel-collapse>.table{margin-bottom:0}.panel>.table:first-child,.panel>.table-responsive:first-child>.table:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:first-child{border-top-left-radius:3px}.panel>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:last-child{border-top-right-radius:3px}.panel>.table:last-child,.panel>.table-responsive:last-child>.table:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:first-child{border-bottom-left-radius:3px}.panel>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:last-child{border-bottom-right-radius:3px}.panel>.panel-body+.table,.panel>.panel-body+.table-responsive{border-top:1px solid #ddd}.panel>.table>tbody:first-child>tr:first-child th,.panel>.table>tbody:first-child>tr:first-child td{border-top:0}.panel>.table-bordered,.panel>.table-responsive>.table-bordered{border:0}.panel>.table-bordered>thead>tr>th:first-child,.panel>.table-responsive>.table-bordered>thead>tr>th:first-child,.panel>.table-bordered>tbody>tr>th:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:first-child,.panel>.table-bordered>tfoot>tr>th:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:first-child,.panel>.table-bordered>thead>tr>td:first-child,.panel>.table-responsive>.table-bordered>thead>tr>td:first-child,.panel>.table-bordered>tbody>tr>td:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:first-child,.panel>.table-bordered>tfoot>tr>td:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:first-child{border-left:0}.panel>.table-bordered>thead>tr>th:last-child,.panel>.table-responsive>.table-bordered>thead>tr>th:last-child,.panel>.table-bordered>tbody>tr>th:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:last-child,.panel>.table-bordered>tfoot>tr>th:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:last-child,.panel>.table-bordered>thead>tr>td:last-child,.panel>.table-responsive>.table-bordered>thead>tr>td:last-child,.panel>.table-bordered>tbody>tr>td:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:last-child,.panel>.table-bordered>tfoot>tr>td:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:last-child{border-right:0}.panel>.table-bordered>thead>tr:first-child>td,.panel>.table-responsive>.table-bordered>thead>tr:first-child>td,.panel>.table-bordered>tbody>tr:first-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>td,.panel>.table-bordered>thead>tr:first-child>th,.panel>.table-responsive>.table-bordered>thead>tr:first-child>th,.panel>.table-bordered>tbody>tr:first-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>th{border-bottom:0}.panel>.table-bordered>tbody>tr:last-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>td,.panel>.table-bordered>tfoot>tr:last-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>td,.panel>.table-bordered>tbody>tr:last-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>th,.panel>.table-bordered>tfoot>tr:last-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}.panel>.table-responsive{margin-bottom:0;border:0}.panel-group{margin-bottom:20px}.panel-group .panel{margin-bottom:0;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse>.panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ddd}.panel-default>.panel-heading .badge{color:#f5f5f5;background-color:#333}.panel-default>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#428bca}.panel-primary>.panel-heading{color:#fff;background-color:#428bca;border-color:#428bca}.panel-primary>.panel-heading+.panel-collapse>.panel-body{border-top-color:#428bca}.panel-primary>.panel-heading .badge{color:#428bca;background-color:#fff}.panel-primary>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#428bca}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse>.panel-body{border-top-color:#d6e9c6}.panel-success>.panel-heading .badge{color:#dff0d8;background-color:#3c763d}.panel-success>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#d6e9c6}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse>.panel-body{border-top-color:#bce8f1}.panel-info>.panel-heading .badge{color:#d9edf7;background-color:#31708f}.panel-info>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#bce8f1}.panel-warning{border-color:#faebcc}.panel-warning>.panel-heading{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.panel-warning>.panel-heading+.panel-collapse>.panel-body{border-top-color:#faebcc}.panel-warning>.panel-heading .badge{color:#fcf8e3;background-color:#8a6d3b}.panel-warning>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#faebcc}.panel-danger{border-color:#ebccd1}.panel-danger>.panel-heading{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.panel-danger>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ebccd1}.panel-danger>.panel-heading .badge{color:#f2dede;background-color:#a94442}.panel-danger>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ebccd1}.embed-responsive{position:relative;display:block;height:0;padding:0;overflow:hidden}.embed-responsive .embed-responsive-item,.embed-responsive iframe,.embed-responsive embed,.embed-responsive object{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive.embed-responsive-16by9{padding-bottom:56.25%}.embed-responsive.embed-responsive-4by3{padding-bottom:75%}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.05);box-shadow:inset 0 1px 1px rgba(0,0,0,.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;filter:alpha(opacity=20);opacity:.2}.close:hover,.close:focus{color:#000;text-decoration:none;cursor:pointer;filter:alpha(opacity=50);opacity:.5}button.close{-webkit-appearance:none;padding:0;cursor:pointer;background:0 0;border:0}.modal-open{overflow:hidden}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;display:none;overflow:hidden;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transition:-webkit-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out;-webkit-transform:translate3d(0,-25%,0);-o-transform:translate3d(0,-25%,0);transform:translate3d(0,-25%,0)}.modal.in .modal-dialog{-webkit-transform:translate3d(0,0,0);-o-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #999;border:1px solid rgba(0,0,0,.2);border-radius:6px;outline:0;-webkit-box-shadow:0 3px 9px rgba(0,0,0,.5);box-shadow:0 3px 9px rgba(0,0,0,.5)}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{filter:alpha(opacity=0);opacity:0}.modal-backdrop.in{filter:alpha(opacity=50);opacity:.5}.modal-header{min-height:16.43px;padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.42857143}.modal-body{position:relative;padding:15px}.modal-footer{padding:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,.5);box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:300px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-size:12px;line-height:1.4;visibility:visible;filter:alpha(opacity=0);opacity:0}.tooltip.in{filter:alpha(opacity=90);opacity:.9}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;text-decoration:none;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{bottom:0;left:5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{right:5px;bottom:0;border-width:5px 5px 0;border-top-color:#000}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;left:5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;right:5px;border-width:0 5px 5px;border-bottom-color:#000}.popover{position:absolute;top:0;left:0;z-index:1060;display:none;max-width:276px;padding:1px;text-align:left;white-space:normal;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,.2);box-shadow:0 5px 10px rgba(0,0,0,.2)}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;font-weight:400;line-height:18px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover>.arrow,.popover>.arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover>.arrow{border-width:11px}.popover>.arrow:after{content:"";border-width:10px}.popover.top>.arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,.25);border-bottom-width:0}.popover.top>.arrow:after{bottom:1px;margin-left:-10px;content:" ";border-top-color:#fff;border-bottom-width:0}.popover.right>.arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,.25);border-left-width:0}.popover.right>.arrow:after{bottom:-10px;left:1px;content:" ";border-right-color:#fff;border-left-width:0}.popover.bottom>.arrow{top:-11px;left:50%;margin-left:-11px;border-top-width:0;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,.25)}.popover.bottom>.arrow:after{top:1px;margin-left:-10px;content:" ";border-top-width:0;border-bottom-color:#fff}.popover.left>.arrow{top:50%;right:-11px;margin-top:-11px;border-right-width:0;border-left-color:#999;border-left-color:rgba(0,0,0,.25)}.popover.left>.arrow:after{right:1px;bottom:-10px;content:" ";border-right-width:0;border-left-color:#fff}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>img,.carousel-inner>.item>a>img{line-height:1}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6);filter:alpha(opacity=50);opacity:.5}.carousel-control.left{background-image:-webkit-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.5)),to(rgba(0,0,0,.0001)));background-image:linear-gradient(to right,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);background-repeat:repeat-x}.carousel-control.right{right:0;left:auto;background-image:-webkit-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.0001)),to(rgba(0,0,0,.5)));background-image:linear-gradient(to right,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);background-repeat:repeat-x}.carousel-control:hover,.carousel-control:focus{color:#fff;text-decoration:none;filter:alpha(opacity=90);outline:0;opacity:.9}.carousel-control .icon-prev,.carousel-control .icon-next,.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right{position:absolute;top:50%;z-index:5;display:inline-block}.carousel-control .icon-prev,.carousel-control .glyphicon-chevron-left{left:50%;margin-left:-10px}.carousel-control .icon-next,.carousel-control .glyphicon-chevron-right{right:50%;margin-right:-10px}.carousel-control .icon-prev,.carousel-control .icon-next{width:20px;height:20px;margin-top:-10px;font-family:serif}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;background-color:#000 \9;background-color:rgba(0,0,0,0);border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-prev,.carousel-control .icon-next{width:30px;height:30px;margin-top:-15px;font-size:30px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{margin-left:-15px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{margin-right:-15px}.carousel-caption{right:20%;left:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.clearfix:before,.clearfix:after,.dl-horizontal dd:before,.dl-horizontal dd:after,.container:before,.container:after,.container-fluid:before,.container-fluid:after,.row:before,.row:after,.form-horizontal .form-group:before,.form-horizontal .form-group:after,.btn-toolbar:before,.btn-toolbar:after,.btn-group-vertical>.btn-group:before,.btn-group-vertical>.btn-group:after,.nav:before,.nav:after,.navbar:before,.navbar:after,.navbar-header:before,.navbar-header:after,.navbar-collapse:before,.navbar-collapse:after,.pager:before,.pager:after,.panel-body:before,.panel-body:after,.modal-footer:before,.modal-footer:after{display:table;content:" "}.clearfix:after,.dl-horizontal dd:after,.container:after,.container-fluid:after,.row:after,.form-horizontal .form-group:after,.btn-toolbar:after,.btn-group-vertical>.btn-group:after,.nav:after,.navbar:after,.navbar-header:after,.navbar-collapse:after,.pager:after,.panel-body:after,.modal-footer:after{clear:both}.center-block{display:block;margin-right:auto;margin-left:auto}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.hidden{display:none!important;visibility:hidden!important}.affix{position:fixed;-webkit-transform:translate3d(0,0,0);-o-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}@-ms-viewport{width:device-width}.visible-xs,.visible-sm,.visible-md,.visible-lg{display:none!important}.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table}tr.visible-xs{display:table-row!important}th.visible-xs,td.visible-xs{display:table-cell!important}}@media (max-width:767px){.visible-xs-block{display:block!important}}@media (max-width:767px){.visible-xs-inline{display:inline!important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table}tr.visible-sm{display:table-row!important}th.visible-sm,td.visible-sm{display:table-cell!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table}tr.visible-md{display:table-row!important}th.visible-md,td.visible-md{display:table-cell!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block!important}}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table}tr.visible-lg{display:table-row!important}th.visible-lg,td.visible-lg{display:table-cell!important}}@media (min-width:1200px){.visible-lg-block{display:block!important}}@media (min-width:1200px){.visible-lg-inline{display:inline!important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block!important}}@media (max-width:767px){.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table}tr.visible-print{display:table-row!important}th.visible-print,td.visible-print{display:table-cell!important}}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}} ++ *//*! normalize.css v3.0.1 | MIT License | git.io/normalize */html{font-family:sans-serif;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}article,aside,details,figcaption,figure,footer,header,hgroup,main,nav,section,summary{display:block}audio,canvas,progress,video{display:inline-block;vertical-align:baseline}audio:not([controls]){display:none;height:0}[hidden],template{display:none}a{background:0 0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}dfn{font-style:italic}h1{margin:.67em 0;font-size:2em}mark{color:#000;background:#ff0}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}img{border:0}svg:not(:root){overflow:hidden}figure{margin:1em 40px}hr{height:0;-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box}pre{overflow:auto}code,kbd,pre,samp{font-family:monospace,monospace;font-size:1em}button,input,optgroup,select,textarea{margin:0;font:inherit;color:inherit}button{overflow:visible}button,select{text-transform:none}button,html input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer}button[disabled],html input[disabled]{cursor:default}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}input{line-height:normal}input[type=checkbox],input[type=radio]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;padding:0}input[type=number]::-webkit-inner-spin-button,input[type=number]::-webkit-outer-spin-button{height:auto}input[type=search]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}fieldset{padding:.35em .625em .75em;margin:0 2px;border:1px solid silver}legend{padding:0;border:0}textarea{overflow:auto}optgroup{font-weight:700}table{border-spacing:0;border-collapse:collapse}td,th{padding:0}@media print{*{color:#000!important;text-shadow:none!important;background:transparent!important;-webkit-box-shadow:none!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100%!important}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}select{background:#fff!important}.navbar{display:none}.table td,.table th{background-color:#fff!important}.btn>.caret,.dropup>.btn>.caret{border-top-color:#000!important}.label{border:1px solid #000}.table{border-collapse:collapse!important}.table-bordered th,.table-bordered td{border:1px solid #ddd!important}}@font-face{font-family:'Glyphicons Halflings';src:url(../fonts/glyphicons-halflings-regular.eot);src:url(../fonts/glyphicons-halflings-regular.eot?#iefix) format('embedded-opentype'),url(../fonts/glyphicons-halflings-regular.woff) format('woff'),url(../fonts/glyphicons-halflings-regular.ttf) format('truetype'),url(../fonts/glyphicons-halflings-regular.svg#glyphicons_halflingsregular) format('svg')}.glyphicon{position:relative;top:1px;display:inline-block;font-family:'Glyphicons Halflings';font-style:normal;font-weight:400;line-height:1;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.glyphicon-asterisk:before{content:"\2a"}.glyphicon-plus:before{content:"\2b"}.glyphicon-euro:before{content:"\20ac"}.glyphicon-minus:before{content:"\2212"}.glyphicon-cloud:before{content:"\2601"}.glyphicon-envelope:before{content:"\2709"}.glyphicon-pencil:before{content:"\270f"}.glyphicon-glass:before{content:"\e001"}.glyphicon-music:before{content:"\e002"}.glyphicon-search:before{content:"\e003"}.glyphicon-heart:before{content:"\e005"}.glyphicon-star:before{content:"\e006"}.glyphicon-star-empty:before{content:"\e007"}.glyphicon-user:before{content:"\e008"}.glyphicon-film:before{content:"\e009"}.glyphicon-th-large:before{content:"\e010"}.glyphicon-th:before{content:"\e011"}.glyphicon-th-list:before{content:"\e012"}.glyphicon-ok:before{content:"\e013"}.glyphicon-remove:before{content:"\e014"}.glyphicon-zoom-in:before{content:"\e015"}.glyphicon-zoom-out:before{content:"\e016"}.glyphicon-off:before{content:"\e017"}.glyphicon-signal:before{content:"\e018"}.glyphicon-cog:before{content:"\e019"}.glyphicon-trash:before{content:"\e020"}.glyphicon-home:before{content:"\e021"}.glyphicon-file:before{content:"\e022"}.glyphicon-time:before{content:"\e023"}.glyphicon-road:before{content:"\e024"}.glyphicon-download-alt:before{content:"\e025"}.glyphicon-download:before{content:"\e026"}.glyphicon-upload:before{content:"\e027"}.glyphicon-inbox:before{content:"\e028"}.glyphicon-play-circle:before{content:"\e029"}.glyphicon-repeat:before{content:"\e030"}.glyphicon-refresh:before{content:"\e031"}.glyphicon-list-alt:before{content:"\e032"}.glyphicon-lock:before{content:"\e033"}.glyphicon-flag:before{content:"\e034"}.glyphicon-headphones:before{content:"\e035"}.glyphicon-volume-off:before{content:"\e036"}.glyphicon-volume-down:before{content:"\e037"}.glyphicon-volume-up:before{content:"\e038"}.glyphicon-qrcode:before{content:"\e039"}.glyphicon-barcode:before{content:"\e040"}.glyphicon-tag:before{content:"\e041"}.glyphicon-tags:before{content:"\e042"}.glyphicon-book:before{content:"\e043"}.glyphicon-bookmark:before{content:"\e044"}.glyphicon-print:before{content:"\e045"}.glyphicon-camera:before{content:"\e046"}.glyphicon-font:before{content:"\e047"}.glyphicon-bold:before{content:"\e048"}.glyphicon-italic:before{content:"\e049"}.glyphicon-text-height:before{content:"\e050"}.glyphicon-text-width:before{content:"\e051"}.glyphicon-align-left:before{content:"\e052"}.glyphicon-align-center:before{content:"\e053"}.glyphicon-align-right:before{content:"\e054"}.glyphicon-align-justify:before{content:"\e055"}.glyphicon-list:before{content:"\e056"}.glyphicon-indent-left:before{content:"\e057"}.glyphicon-indent-right:before{content:"\e058"}.glyphicon-facetime-video:before{content:"\e059"}.glyphicon-picture:before{content:"\e060"}.glyphicon-map-marker:before{content:"\e062"}.glyphicon-adjust:before{content:"\e063"}.glyphicon-tint:before{content:"\e064"}.glyphicon-edit:before{content:"\e065"}.glyphicon-share:before{content:"\e066"}.glyphicon-check:before{content:"\e067"}.glyphicon-move:before{content:"\e068"}.glyphicon-step-backward:before{content:"\e069"}.glyphicon-fast-backward:before{content:"\e070"}.glyphicon-backward:before{content:"\e071"}.glyphicon-play:before{content:"\e072"}.glyphicon-pause:before{content:"\e073"}.glyphicon-stop:before{content:"\e074"}.glyphicon-forward:before{content:"\e075"}.glyphicon-fast-forward:before{content:"\e076"}.glyphicon-step-forward:before{content:"\e077"}.glyphicon-eject:before{content:"\e078"}.glyphicon-chevron-left:before{content:"\e079"}.glyphicon-chevron-right:before{content:"\e080"}.glyphicon-plus-sign:before{content:"\e081"}.glyphicon-minus-sign:before{content:"\e082"}.glyphicon-remove-sign:before{content:"\e083"}.glyphicon-ok-sign:before{content:"\e084"}.glyphicon-question-sign:before{content:"\e085"}.glyphicon-info-sign:before{content:"\e086"}.glyphicon-screenshot:before{content:"\e087"}.glyphicon-remove-circle:before{content:"\e088"}.glyphicon-ok-circle:before{content:"\e089"}.glyphicon-ban-circle:before{content:"\e090"}.glyphicon-arrow-left:before{content:"\e091"}.glyphicon-arrow-right:before{content:"\e092"}.glyphicon-arrow-up:before{content:"\e093"}.glyphicon-arrow-down:before{content:"\e094"}.glyphicon-share-alt:before{content:"\e095"}.glyphicon-resize-full:before{content:"\e096"}.glyphicon-resize-small:before{content:"\e097"}.glyphicon-exclamation-sign:before{content:"\e101"}.glyphicon-gift:before{content:"\e102"}.glyphicon-leaf:before{content:"\e103"}.glyphicon-fire:before{content:"\e104"}.glyphicon-eye-open:before{content:"\e105"}.glyphicon-eye-close:before{content:"\e106"}.glyphicon-warning-sign:before{content:"\e107"}.glyphicon-plane:before{content:"\e108"}.glyphicon-calendar:before{content:"\e109"}.glyphicon-random:before{content:"\e110"}.glyphicon-comment:before{content:"\e111"}.glyphicon-magnet:before{content:"\e112"}.glyphicon-chevron-up:before{content:"\e113"}.glyphicon-chevron-down:before{content:"\e114"}.glyphicon-retweet:before{content:"\e115"}.glyphicon-shopping-cart:before{content:"\e116"}.glyphicon-folder-close:before{content:"\e117"}.glyphicon-folder-open:before{content:"\e118"}.glyphicon-resize-vertical:before{content:"\e119"}.glyphicon-resize-horizontal:before{content:"\e120"}.glyphicon-hdd:before{content:"\e121"}.glyphicon-bullhorn:before{content:"\e122"}.glyphicon-bell:before{content:"\e123"}.glyphicon-certificate:before{content:"\e124"}.glyphicon-thumbs-up:before{content:"\e125"}.glyphicon-thumbs-down:before{content:"\e126"}.glyphicon-hand-right:before{content:"\e127"}.glyphicon-hand-left:before{content:"\e128"}.glyphicon-hand-up:before{content:"\e129"}.glyphicon-hand-down:before{content:"\e130"}.glyphicon-circle-arrow-right:before{content:"\e131"}.glyphicon-circle-arrow-left:before{content:"\e132"}.glyphicon-circle-arrow-up:before{content:"\e133"}.glyphicon-circle-arrow-down:before{content:"\e134"}.glyphicon-globe:before{content:"\e135"}.glyphicon-wrench:before{content:"\e136"}.glyphicon-tasks:before{content:"\e137"}.glyphicon-filter:before{content:"\e138"}.glyphicon-briefcase:before{content:"\e139"}.glyphicon-fullscreen:before{content:"\e140"}.glyphicon-dashboard:before{content:"\e141"}.glyphicon-paperclip:before{content:"\e142"}.glyphicon-heart-empty:before{content:"\e143"}.glyphicon-link:before{content:"\e144"}.glyphicon-phone:before{content:"\e145"}.glyphicon-pushpin:before{content:"\e146"}.glyphicon-usd:before{content:"\e148"}.glyphicon-gbp:before{content:"\e149"}.glyphicon-sort:before{content:"\e150"}.glyphicon-sort-by-alphabet:before{content:"\e151"}.glyphicon-sort-by-alphabet-alt:before{content:"\e152"}.glyphicon-sort-by-order:before{content:"\e153"}.glyphicon-sort-by-order-alt:before{content:"\e154"}.glyphicon-sort-by-attributes:before{content:"\e155"}.glyphicon-sort-by-attributes-alt:before{content:"\e156"}.glyphicon-unchecked:before{content:"\e157"}.glyphicon-expand:before{content:"\e158"}.glyphicon-collapse-down:before{content:"\e159"}.glyphicon-collapse-up:before{content:"\e160"}.glyphicon-log-in:before{content:"\e161"}.glyphicon-flash:before{content:"\e162"}.glyphicon-log-out:before{content:"\e163"}.glyphicon-new-window:before{content:"\e164"}.glyphicon-record:before{content:"\e165"}.glyphicon-save:before{content:"\e166"}.glyphicon-open:before{content:"\e167"}.glyphicon-saved:before{content:"\e168"}.glyphicon-import:before{content:"\e169"}.glyphicon-export:before{content:"\e170"}.glyphicon-send:before{content:"\e171"}.glyphicon-floppy-disk:before{content:"\e172"}.glyphicon-floppy-saved:before{content:"\e173"}.glyphicon-floppy-remove:before{content:"\e174"}.glyphicon-floppy-save:before{content:"\e175"}.glyphicon-floppy-open:before{content:"\e176"}.glyphicon-credit-card:before{content:"\e177"}.glyphicon-transfer:before{content:"\e178"}.glyphicon-cutlery:before{content:"\e179"}.glyphicon-header:before{content:"\e180"}.glyphicon-compressed:before{content:"\e181"}.glyphicon-earphone:before{content:"\e182"}.glyphicon-phone-alt:before{content:"\e183"}.glyphicon-tower:before{content:"\e184"}.glyphicon-stats:before{content:"\e185"}.glyphicon-sd-video:before{content:"\e186"}.glyphicon-hd-video:before{content:"\e187"}.glyphicon-subtitles:before{content:"\e188"}.glyphicon-sound-stereo:before{content:"\e189"}.glyphicon-sound-dolby:before{content:"\e190"}.glyphicon-sound-5-1:before{content:"\e191"}.glyphicon-sound-6-1:before{content:"\e192"}.glyphicon-sound-7-1:before{content:"\e193"}.glyphicon-copyright-mark:before{content:"\e194"}.glyphicon-registration-mark:before{content:"\e195"}.glyphicon-cloud-download:before{content:"\e197"}.glyphicon-cloud-upload:before{content:"\e198"}.glyphicon-tree-conifer:before{content:"\e199"}.glyphicon-tree-deciduous:before{content:"\e200"}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}:before,:after{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:10px;-webkit-tap-highlight-color:rgba(0,0,0,0)}body{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:1.42857143;color:#333;background-color:#fff}input,button,select,textarea{font-family:inherit;font-size:inherit;line-height:inherit}a{color:#428bca;text-decoration:none}a:hover,a:focus{color:#2a6496;text-decoration:underline}a:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}figure{margin:0}img{vertical-align:middle}.img-responsive,.thumbnail>img,.thumbnail a>img,.carousel-inner>.item>img,.carousel-inner>.item>a>img{display:block;width:100% \9;max-width:100%;height:auto}.img-rounded{border-radius:6px}.img-thumbnail{display:inline-block;width:100% \9;max-width:100%;height:auto;padding:4px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.img-circle{border-radius:50%}hr{margin-top:20px;margin-bottom:20px;border:0;border-top:1px solid #eee}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}h1,h2,h3,h4,h5,h6,.h1,.h2,.h3,.h4,.h5,.h6{font-family:inherit;font-weight:500;line-height:1.1;color:inherit}h1 small,h2 small,h3 small,h4 small,h5 small,h6 small,.h1 small,.h2 small,.h3 small,.h4 small,.h5 small,.h6 small,h1 .small,h2 .small,h3 .small,h4 .small,h5 .small,h6 .small,.h1 .small,.h2 .small,.h3 .small,.h4 .small,.h5 .small,.h6 .small{font-weight:400;line-height:1;color:#777}h1,.h1,h2,.h2,h3,.h3{margin-top:20px;margin-bottom:10px}h1 small,.h1 small,h2 small,.h2 small,h3 small,.h3 small,h1 .small,.h1 .small,h2 .small,.h2 .small,h3 .small,.h3 .small{font-size:65%}h4,.h4,h5,.h5,h6,.h6{margin-top:10px;margin-bottom:10px}h4 small,.h4 small,h5 small,.h5 small,h6 small,.h6 small,h4 .small,.h4 .small,h5 .small,.h5 .small,h6 .small,.h6 .small{font-size:75%}h1,.h1{font-size:36px}h2,.h2{font-size:30px}h3,.h3{font-size:24px}h4,.h4{font-size:18px}h5,.h5{font-size:14px}h6,.h6{font-size:12px}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:16px;font-weight:300;line-height:1.4}@media (min-width:768px){.lead{font-size:21px}}small,.small{font-size:85%}cite{font-style:normal}mark,.mark{padding:.2em;background-color:#fcf8e3}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}.text-justify{text-align:justify}.text-nowrap{white-space:nowrap}.text-lowercase{text-transform:lowercase}.text-uppercase{text-transform:uppercase}.text-capitalize{text-transform:capitalize}.text-muted{color:#777}.text-primary{color:#428bca}a.text-primary:hover{color:#3071a9}.text-success{color:#3c763d}a.text-success:hover{color:#2b542c}.text-info{color:#31708f}a.text-info:hover{color:#245269}.text-warning{color:#8a6d3b}a.text-warning:hover{color:#66512c}.text-danger{color:#a94442}a.text-danger:hover{color:#843534}.bg-primary{color:#fff;background-color:#428bca}a.bg-primary:hover{background-color:#3071a9}.bg-success{background-color:#dff0d8}a.bg-success:hover{background-color:#c1e2b3}.bg-info{background-color:#d9edf7}a.bg-info:hover{background-color:#afd9ee}.bg-warning{background-color:#fcf8e3}a.bg-warning:hover{background-color:#f7ecb5}.bg-danger{background-color:#f2dede}a.bg-danger:hover{background-color:#e4b9b9}.page-header{padding-bottom:9px;margin:40px 0 20px;border-bottom:1px solid #eee}ul,ol{margin-top:0;margin-bottom:10px}ul ul,ol ul,ul ol,ol ol{margin-bottom:0}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;margin-left:-5px;list-style:none}.list-inline>li{display:inline-block;padding-right:5px;padding-left:5px}dl{margin-top:0;margin-bottom:20px}dt,dd{line-height:1.42857143}dt{font-weight:700}dd{margin-left:0}@media (min-width:768px){.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}}abbr[title],abbr[data-original-title]{cursor:help;border-bottom:1px dotted #777}.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:10px 20px;margin:0 0 20px;font-size:17.5px;border-left:5px solid #eee}blockquote p:last-child,blockquote ul:last-child,blockquote ol:last-child{margin-bottom:0}blockquote footer,blockquote small,blockquote .small{display:block;font-size:80%;line-height:1.42857143;color:#777}blockquote footer:before,blockquote small:before,blockquote .small:before{content:'\2014 \00A0'}.blockquote-reverse,blockquote.pull-right{padding-right:15px;padding-left:0;text-align:right;border-right:5px solid #eee;border-left:0}.blockquote-reverse footer:before,blockquote.pull-right footer:before,.blockquote-reverse small:before,blockquote.pull-right small:before,.blockquote-reverse .small:before,blockquote.pull-right .small:before{content:''}.blockquote-reverse footer:after,blockquote.pull-right footer:after,.blockquote-reverse small:after,blockquote.pull-right small:after,.blockquote-reverse .small:after,blockquote.pull-right .small:after{content:'\00A0 \2014'}blockquote:before,blockquote:after{content:""}address{margin-bottom:20px;font-style:normal;line-height:1.42857143}code,kbd,pre,samp{font-family:Menlo,Monaco,Consolas,"Courier New",monospace}code{padding:2px 4px;font-size:90%;color:#c7254e;background-color:#f9f2f4;border-radius:4px}kbd{padding:2px 4px;font-size:90%;color:#fff;background-color:#333;border-radius:3px;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.25);box-shadow:inset 0 -1px 0 rgba(0,0,0,.25)}kbd kbd{padding:0;font-size:100%;-webkit-box-shadow:none;box-shadow:none}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:1.42857143;color:#333;word-break:break-all;word-wrap:break-word;background-color:#f5f5f5;border:1px solid #ccc;border-radius:4px}pre code{padding:0;font-size:inherit;color:inherit;white-space:pre-wrap;background-color:transparent;border-radius:0}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width:768px){.container{width:750px}}@media (min-width:992px){.container{width:970px}}@media (min-width:1200px){.container{width:1170px}}.container-fluid{padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{margin-right:-15px;margin-left:-15px}.col-xs-1,.col-sm-1,.col-md-1,.col-lg-1,.col-xs-2,.col-sm-2,.col-md-2,.col-lg-2,.col-xs-3,.col-sm-3,.col-md-3,.col-lg-3,.col-xs-4,.col-sm-4,.col-md-4,.col-lg-4,.col-xs-5,.col-sm-5,.col-md-5,.col-lg-5,.col-xs-6,.col-sm-6,.col-md-6,.col-lg-6,.col-xs-7,.col-sm-7,.col-md-7,.col-lg-7,.col-xs-8,.col-sm-8,.col-md-8,.col-lg-8,.col-xs-9,.col-sm-9,.col-md-9,.col-lg-9,.col-xs-10,.col-sm-10,.col-md-10,.col-lg-10,.col-xs-11,.col-sm-11,.col-md-11,.col-lg-11,.col-xs-12,.col-sm-12,.col-md-12,.col-lg-12{position:relative;min-height:1px;padding-right:15px;padding-left:15px}.col-xs-1,.col-xs-2,.col-xs-3,.col-xs-4,.col-xs-5,.col-xs-6,.col-xs-7,.col-xs-8,.col-xs-9,.col-xs-10,.col-xs-11,.col-xs-12{float:left}.col-xs-12{width:100%}.col-xs-11{width:91.66666667%}.col-xs-10{width:83.33333333%}.col-xs-9{width:75%}.col-xs-8{width:66.66666667%}.col-xs-7{width:58.33333333%}.col-xs-6{width:50%}.col-xs-5{width:41.66666667%}.col-xs-4{width:33.33333333%}.col-xs-3{width:25%}.col-xs-2{width:16.66666667%}.col-xs-1{width:8.33333333%}.col-xs-pull-12{right:100%}.col-xs-pull-11{right:91.66666667%}.col-xs-pull-10{right:83.33333333%}.col-xs-pull-9{right:75%}.col-xs-pull-8{right:66.66666667%}.col-xs-pull-7{right:58.33333333%}.col-xs-pull-6{right:50%}.col-xs-pull-5{right:41.66666667%}.col-xs-pull-4{right:33.33333333%}.col-xs-pull-3{right:25%}.col-xs-pull-2{right:16.66666667%}.col-xs-pull-1{right:8.33333333%}.col-xs-pull-0{right:auto}.col-xs-push-12{left:100%}.col-xs-push-11{left:91.66666667%}.col-xs-push-10{left:83.33333333%}.col-xs-push-9{left:75%}.col-xs-push-8{left:66.66666667%}.col-xs-push-7{left:58.33333333%}.col-xs-push-6{left:50%}.col-xs-push-5{left:41.66666667%}.col-xs-push-4{left:33.33333333%}.col-xs-push-3{left:25%}.col-xs-push-2{left:16.66666667%}.col-xs-push-1{left:8.33333333%}.col-xs-push-0{left:auto}.col-xs-offset-12{margin-left:100%}.col-xs-offset-11{margin-left:91.66666667%}.col-xs-offset-10{margin-left:83.33333333%}.col-xs-offset-9{margin-left:75%}.col-xs-offset-8{margin-left:66.66666667%}.col-xs-offset-7{margin-left:58.33333333%}.col-xs-offset-6{margin-left:50%}.col-xs-offset-5{margin-left:41.66666667%}.col-xs-offset-4{margin-left:33.33333333%}.col-xs-offset-3{margin-left:25%}.col-xs-offset-2{margin-left:16.66666667%}.col-xs-offset-1{margin-left:8.33333333%}.col-xs-offset-0{margin-left:0}@media (min-width:768px){.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12{float:left}.col-sm-12{width:100%}.col-sm-11{width:91.66666667%}.col-sm-10{width:83.33333333%}.col-sm-9{width:75%}.col-sm-8{width:66.66666667%}.col-sm-7{width:58.33333333%}.col-sm-6{width:50%}.col-sm-5{width:41.66666667%}.col-sm-4{width:33.33333333%}.col-sm-3{width:25%}.col-sm-2{width:16.66666667%}.col-sm-1{width:8.33333333%}.col-sm-pull-12{right:100%}.col-sm-pull-11{right:91.66666667%}.col-sm-pull-10{right:83.33333333%}.col-sm-pull-9{right:75%}.col-sm-pull-8{right:66.66666667%}.col-sm-pull-7{right:58.33333333%}.col-sm-pull-6{right:50%}.col-sm-pull-5{right:41.66666667%}.col-sm-pull-4{right:33.33333333%}.col-sm-pull-3{right:25%}.col-sm-pull-2{right:16.66666667%}.col-sm-pull-1{right:8.33333333%}.col-sm-pull-0{right:auto}.col-sm-push-12{left:100%}.col-sm-push-11{left:91.66666667%}.col-sm-push-10{left:83.33333333%}.col-sm-push-9{left:75%}.col-sm-push-8{left:66.66666667%}.col-sm-push-7{left:58.33333333%}.col-sm-push-6{left:50%}.col-sm-push-5{left:41.66666667%}.col-sm-push-4{left:33.33333333%}.col-sm-push-3{left:25%}.col-sm-push-2{left:16.66666667%}.col-sm-push-1{left:8.33333333%}.col-sm-push-0{left:auto}.col-sm-offset-12{margin-left:100%}.col-sm-offset-11{margin-left:91.66666667%}.col-sm-offset-10{margin-left:83.33333333%}.col-sm-offset-9{margin-left:75%}.col-sm-offset-8{margin-left:66.66666667%}.col-sm-offset-7{margin-left:58.33333333%}.col-sm-offset-6{margin-left:50%}.col-sm-offset-5{margin-left:41.66666667%}.col-sm-offset-4{margin-left:33.33333333%}.col-sm-offset-3{margin-left:25%}.col-sm-offset-2{margin-left:16.66666667%}.col-sm-offset-1{margin-left:8.33333333%}.col-sm-offset-0{margin-left:0}}@media (min-width:992px){.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12{float:left}.col-md-12{width:100%}.col-md-11{width:91.66666667%}.col-md-10{width:83.33333333%}.col-md-9{width:75%}.col-md-8{width:66.66666667%}.col-md-7{width:58.33333333%}.col-md-6{width:50%}.col-md-5{width:41.66666667%}.col-md-4{width:33.33333333%}.col-md-3{width:25%}.col-md-2{width:16.66666667%}.col-md-1{width:8.33333333%}.col-md-pull-12{right:100%}.col-md-pull-11{right:91.66666667%}.col-md-pull-10{right:83.33333333%}.col-md-pull-9{right:75%}.col-md-pull-8{right:66.66666667%}.col-md-pull-7{right:58.33333333%}.col-md-pull-6{right:50%}.col-md-pull-5{right:41.66666667%}.col-md-pull-4{right:33.33333333%}.col-md-pull-3{right:25%}.col-md-pull-2{right:16.66666667%}.col-md-pull-1{right:8.33333333%}.col-md-pull-0{right:auto}.col-md-push-12{left:100%}.col-md-push-11{left:91.66666667%}.col-md-push-10{left:83.33333333%}.col-md-push-9{left:75%}.col-md-push-8{left:66.66666667%}.col-md-push-7{left:58.33333333%}.col-md-push-6{left:50%}.col-md-push-5{left:41.66666667%}.col-md-push-4{left:33.33333333%}.col-md-push-3{left:25%}.col-md-push-2{left:16.66666667%}.col-md-push-1{left:8.33333333%}.col-md-push-0{left:auto}.col-md-offset-12{margin-left:100%}.col-md-offset-11{margin-left:91.66666667%}.col-md-offset-10{margin-left:83.33333333%}.col-md-offset-9{margin-left:75%}.col-md-offset-8{margin-left:66.66666667%}.col-md-offset-7{margin-left:58.33333333%}.col-md-offset-6{margin-left:50%}.col-md-offset-5{margin-left:41.66666667%}.col-md-offset-4{margin-left:33.33333333%}.col-md-offset-3{margin-left:25%}.col-md-offset-2{margin-left:16.66666667%}.col-md-offset-1{margin-left:8.33333333%}.col-md-offset-0{margin-left:0}}@media (min-width:1200px){.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12{float:left}.col-lg-12{width:100%}.col-lg-11{width:91.66666667%}.col-lg-10{width:83.33333333%}.col-lg-9{width:75%}.col-lg-8{width:66.66666667%}.col-lg-7{width:58.33333333%}.col-lg-6{width:50%}.col-lg-5{width:41.66666667%}.col-lg-4{width:33.33333333%}.col-lg-3{width:25%}.col-lg-2{width:16.66666667%}.col-lg-1{width:8.33333333%}.col-lg-pull-12{right:100%}.col-lg-pull-11{right:91.66666667%}.col-lg-pull-10{right:83.33333333%}.col-lg-pull-9{right:75%}.col-lg-pull-8{right:66.66666667%}.col-lg-pull-7{right:58.33333333%}.col-lg-pull-6{right:50%}.col-lg-pull-5{right:41.66666667%}.col-lg-pull-4{right:33.33333333%}.col-lg-pull-3{right:25%}.col-lg-pull-2{right:16.66666667%}.col-lg-pull-1{right:8.33333333%}.col-lg-pull-0{right:auto}.col-lg-push-12{left:100%}.col-lg-push-11{left:91.66666667%}.col-lg-push-10{left:83.33333333%}.col-lg-push-9{left:75%}.col-lg-push-8{left:66.66666667%}.col-lg-push-7{left:58.33333333%}.col-lg-push-6{left:50%}.col-lg-push-5{left:41.66666667%}.col-lg-push-4{left:33.33333333%}.col-lg-push-3{left:25%}.col-lg-push-2{left:16.66666667%}.col-lg-push-1{left:8.33333333%}.col-lg-push-0{left:auto}.col-lg-offset-12{margin-left:100%}.col-lg-offset-11{margin-left:91.66666667%}.col-lg-offset-10{margin-left:83.33333333%}.col-lg-offset-9{margin-left:75%}.col-lg-offset-8{margin-left:66.66666667%}.col-lg-offset-7{margin-left:58.33333333%}.col-lg-offset-6{margin-left:50%}.col-lg-offset-5{margin-left:41.66666667%}.col-lg-offset-4{margin-left:33.33333333%}.col-lg-offset-3{margin-left:25%}.col-lg-offset-2{margin-left:16.66666667%}.col-lg-offset-1{margin-left:8.33333333%}.col-lg-offset-0{margin-left:0}}table{background-color:transparent}th{text-align:left}.table{width:100%;max-width:100%;margin-bottom:20px}.table>thead>tr>th,.table>tbody>tr>th,.table>tfoot>tr>th,.table>thead>tr>td,.table>tbody>tr>td,.table>tfoot>tr>td{padding:8px;line-height:1.42857143;vertical-align:top;border-top:1px solid #ddd}.table>thead>tr>th{vertical-align:bottom;border-bottom:2px solid #ddd}.table>caption+thead>tr:first-child>th,.table>colgroup+thead>tr:first-child>th,.table>thead:first-child>tr:first-child>th,.table>caption+thead>tr:first-child>td,.table>colgroup+thead>tr:first-child>td,.table>thead:first-child>tr:first-child>td{border-top:0}.table>tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed>thead>tr>th,.table-condensed>tbody>tr>th,.table-condensed>tfoot>tr>th,.table-condensed>thead>tr>td,.table-condensed>tbody>tr>td,.table-condensed>tfoot>tr>td{padding:5px}.table-bordered{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>tbody>tr>th,.table-bordered>tfoot>tr>th,.table-bordered>thead>tr>td,.table-bordered>tbody>tr>td,.table-bordered>tfoot>tr>td{border:1px solid #ddd}.table-bordered>thead>tr>th,.table-bordered>thead>tr>td{border-bottom-width:2px}.table-striped>tbody>tr:nth-child(odd)>td,.table-striped>tbody>tr:nth-child(odd)>th{background-color:#f9f9f9}.table-hover>tbody>tr:hover>td,.table-hover>tbody>tr:hover>th{background-color:#f5f5f5}table col[class*=col-]{position:static;display:table-column;float:none}table td[class*=col-],table th[class*=col-]{position:static;display:table-cell;float:none}.table>thead>tr>td.active,.table>tbody>tr>td.active,.table>tfoot>tr>td.active,.table>thead>tr>th.active,.table>tbody>tr>th.active,.table>tfoot>tr>th.active,.table>thead>tr.active>td,.table>tbody>tr.active>td,.table>tfoot>tr.active>td,.table>thead>tr.active>th,.table>tbody>tr.active>th,.table>tfoot>tr.active>th{background-color:#f5f5f5}.table-hover>tbody>tr>td.active:hover,.table-hover>tbody>tr>th.active:hover,.table-hover>tbody>tr.active:hover>td,.table-hover>tbody>tr:hover>.active,.table-hover>tbody>tr.active:hover>th{background-color:#e8e8e8}.table>thead>tr>td.success,.table>tbody>tr>td.success,.table>tfoot>tr>td.success,.table>thead>tr>th.success,.table>tbody>tr>th.success,.table>tfoot>tr>th.success,.table>thead>tr.success>td,.table>tbody>tr.success>td,.table>tfoot>tr.success>td,.table>thead>tr.success>th,.table>tbody>tr.success>th,.table>tfoot>tr.success>th{background-color:#dff0d8}.table-hover>tbody>tr>td.success:hover,.table-hover>tbody>tr>th.success:hover,.table-hover>tbody>tr.success:hover>td,.table-hover>tbody>tr:hover>.success,.table-hover>tbody>tr.success:hover>th{background-color:#d0e9c6}.table>thead>tr>td.info,.table>tbody>tr>td.info,.table>tfoot>tr>td.info,.table>thead>tr>th.info,.table>tbody>tr>th.info,.table>tfoot>tr>th.info,.table>thead>tr.info>td,.table>tbody>tr.info>td,.table>tfoot>tr.info>td,.table>thead>tr.info>th,.table>tbody>tr.info>th,.table>tfoot>tr.info>th{background-color:#d9edf7}.table-hover>tbody>tr>td.info:hover,.table-hover>tbody>tr>th.info:hover,.table-hover>tbody>tr.info:hover>td,.table-hover>tbody>tr:hover>.info,.table-hover>tbody>tr.info:hover>th{background-color:#c4e3f3}.table>thead>tr>td.warning,.table>tbody>tr>td.warning,.table>tfoot>tr>td.warning,.table>thead>tr>th.warning,.table>tbody>tr>th.warning,.table>tfoot>tr>th.warning,.table>thead>tr.warning>td,.table>tbody>tr.warning>td,.table>tfoot>tr.warning>td,.table>thead>tr.warning>th,.table>tbody>tr.warning>th,.table>tfoot>tr.warning>th{background-color:#fcf8e3}.table-hover>tbody>tr>td.warning:hover,.table-hover>tbody>tr>th.warning:hover,.table-hover>tbody>tr.warning:hover>td,.table-hover>tbody>tr:hover>.warning,.table-hover>tbody>tr.warning:hover>th{background-color:#faf2cc}.table>thead>tr>td.danger,.table>tbody>tr>td.danger,.table>tfoot>tr>td.danger,.table>thead>tr>th.danger,.table>tbody>tr>th.danger,.table>tfoot>tr>th.danger,.table>thead>tr.danger>td,.table>tbody>tr.danger>td,.table>tfoot>tr.danger>td,.table>thead>tr.danger>th,.table>tbody>tr.danger>th,.table>tfoot>tr.danger>th{background-color:#f2dede}.table-hover>tbody>tr>td.danger:hover,.table-hover>tbody>tr>th.danger:hover,.table-hover>tbody>tr.danger:hover>td,.table-hover>tbody>tr:hover>.danger,.table-hover>tbody>tr.danger:hover>th{background-color:#ebcccc}@media screen and (max-width:767px){.table-responsive{width:100%;margin-bottom:15px;overflow-x:auto;overflow-y:hidden;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar;border:1px solid #ddd}.table-responsive>.table{margin-bottom:0}.table-responsive>.table>thead>tr>th,.table-responsive>.table>tbody>tr>th,.table-responsive>.table>tfoot>tr>th,.table-responsive>.table>thead>tr>td,.table-responsive>.table>tbody>tr>td,.table-responsive>.table>tfoot>tr>td{white-space:nowrap}.table-responsive>.table-bordered{border:0}.table-responsive>.table-bordered>thead>tr>th:first-child,.table-responsive>.table-bordered>tbody>tr>th:first-child,.table-responsive>.table-bordered>tfoot>tr>th:first-child,.table-responsive>.table-bordered>thead>tr>td:first-child,.table-responsive>.table-bordered>tbody>tr>td:first-child,.table-responsive>.table-bordered>tfoot>tr>td:first-child{border-left:0}.table-responsive>.table-bordered>thead>tr>th:last-child,.table-responsive>.table-bordered>tbody>tr>th:last-child,.table-responsive>.table-bordered>tfoot>tr>th:last-child,.table-responsive>.table-bordered>thead>tr>td:last-child,.table-responsive>.table-bordered>tbody>tr>td:last-child,.table-responsive>.table-bordered>tfoot>tr>td:last-child{border-right:0}.table-responsive>.table-bordered>tbody>tr:last-child>th,.table-responsive>.table-bordered>tfoot>tr:last-child>th,.table-responsive>.table-bordered>tbody>tr:last-child>td,.table-responsive>.table-bordered>tfoot>tr:last-child>td{border-bottom:0}}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:inherit;color:#333;border:0;border-bottom:1px solid #e5e5e5}label{display:inline-block;max-width:100%;margin-bottom:5px;font-weight:700}input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=radio],input[type=checkbox]{margin:4px 0 0;margin-top:1px \9;line-height:normal}input[type=file]{display:block}input[type=range]{display:block;width:100%}select[multiple],select[size]{height:auto}input[type=file]:focus,input[type=radio]:focus,input[type=checkbox]:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}output{display:block;padding-top:7px;font-size:14px;line-height:1.42857143;color:#555}.form-control{display:block;width:100%;height:34px;padding:6px 12px;font-size:14px;line-height:1.42857143;color:#555;background-color:#fff;background-image:none;border:1px solid #ccc;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075);-webkit-transition:border-color ease-in-out .15s,-webkit-box-shadow ease-in-out .15s;-o-transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s;transition:border-color ease-in-out .15s,box-shadow ease-in-out .15s}.form-control:focus{border-color:#66afe9;outline:0;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6);box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 8px rgba(102,175,233,.6)}.form-control::-moz-placeholder{color:#777;opacity:1}.form-control:-ms-input-placeholder{color:#777}.form-control::-webkit-input-placeholder{color:#777}.form-control[disabled],.form-control[readonly],fieldset[disabled] .form-control{cursor:not-allowed;background-color:#eee;opacity:1}textarea.form-control{height:auto}input[type=search]{-webkit-appearance:none}input[type=date],input[type=time],input[type=datetime-local],input[type=month]{line-height:34px;line-height:1.42857143 \0}input[type=date].input-sm,input[type=time].input-sm,input[type=datetime-local].input-sm,input[type=month].input-sm{line-height:30px}input[type=date].input-lg,input[type=time].input-lg,input[type=datetime-local].input-lg,input[type=month].input-lg{line-height:46px}.form-group{margin-bottom:15px}.radio,.checkbox{position:relative;display:block;min-height:20px;margin-top:10px;margin-bottom:10px}.radio label,.checkbox label{padding-left:20px;margin-bottom:0;font-weight:400;cursor:pointer}.radio input[type=radio],.radio-inline input[type=radio],.checkbox input[type=checkbox],.checkbox-inline input[type=checkbox]{position:absolute;margin-top:4px \9;margin-left:-20px}.radio+.radio,.checkbox+.checkbox{margin-top:-5px}.radio-inline,.checkbox-inline{display:inline-block;padding-left:20px;margin-bottom:0;font-weight:400;vertical-align:middle;cursor:pointer}.radio-inline+.radio-inline,.checkbox-inline+.checkbox-inline{margin-top:0;margin-left:10px}input[type=radio][disabled],input[type=checkbox][disabled],input[type=radio].disabled,input[type=checkbox].disabled,fieldset[disabled] input[type=radio],fieldset[disabled] input[type=checkbox]{cursor:not-allowed}.radio-inline.disabled,.checkbox-inline.disabled,fieldset[disabled] .radio-inline,fieldset[disabled] .checkbox-inline{cursor:not-allowed}.radio.disabled label,.checkbox.disabled label,fieldset[disabled] .radio label,fieldset[disabled] .checkbox label{cursor:not-allowed}.form-control-static{padding-top:7px;padding-bottom:7px;margin-bottom:0}.form-control-static.input-lg,.form-control-static.input-sm{padding-right:0;padding-left:0}.input-sm,.form-horizontal .form-group-sm .form-control{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-sm{height:30px;line-height:30px}textarea.input-sm,select[multiple].input-sm{height:auto}.input-lg,.form-horizontal .form-group-lg .form-control{height:46px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-lg{height:46px;line-height:46px}textarea.input-lg,select[multiple].input-lg{height:auto}.has-feedback{position:relative}.has-feedback .form-control{padding-right:42.5px}.form-control-feedback{position:absolute;top:25px;right:0;z-index:2;display:block;width:34px;height:34px;line-height:34px;text-align:center}.input-lg+.form-control-feedback{width:46px;height:46px;line-height:46px}.input-sm+.form-control-feedback{width:30px;height:30px;line-height:30px}.has-success .help-block,.has-success .control-label,.has-success .radio,.has-success .checkbox,.has-success .radio-inline,.has-success .checkbox-inline{color:#3c763d}.has-success .form-control{border-color:#3c763d;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-success .form-control:focus{border-color:#2b542c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #67b168}.has-success .input-group-addon{color:#3c763d;background-color:#dff0d8;border-color:#3c763d}.has-success .form-control-feedback{color:#3c763d}.has-warning .help-block,.has-warning .control-label,.has-warning .radio,.has-warning .checkbox,.has-warning .radio-inline,.has-warning .checkbox-inline{color:#8a6d3b}.has-warning .form-control{border-color:#8a6d3b;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-warning .form-control:focus{border-color:#66512c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #c0a16b}.has-warning .input-group-addon{color:#8a6d3b;background-color:#fcf8e3;border-color:#8a6d3b}.has-warning .form-control-feedback{color:#8a6d3b}.has-error .help-block,.has-error .control-label,.has-error .radio,.has-error .checkbox,.has-error .radio-inline,.has-error .checkbox-inline{color:#a94442}.has-error .form-control{border-color:#a94442;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075);box-shadow:inset 0 1px 1px rgba(0,0,0,.075)}.has-error .form-control:focus{border-color:#843534;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483;box-shadow:inset 0 1px 1px rgba(0,0,0,.075),0 0 6px #ce8483}.has-error .input-group-addon{color:#a94442;background-color:#f2dede;border-color:#a94442}.has-error .form-control-feedback{color:#a94442}.has-feedback label.sr-only~.form-control-feedback{top:0}.help-block{display:block;margin-top:5px;margin-bottom:10px;color:#737373}@media (min-width:768px){.form-inline .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .input-group{display:inline-table;vertical-align:middle}.form-inline .input-group .input-group-addon,.form-inline .input-group .input-group-btn,.form-inline .input-group .form-control{width:auto}.form-inline .input-group>.form-control{width:100%}.form-inline .control-label{margin-bottom:0;vertical-align:middle}.form-inline .radio,.form-inline .checkbox{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.form-inline .radio label,.form-inline .checkbox label{padding-left:0}.form-inline .radio input[type=radio],.form-inline .checkbox input[type=checkbox]{position:relative;margin-left:0}.form-inline .has-feedback .form-control-feedback{top:0}}.form-horizontal .radio,.form-horizontal .checkbox,.form-horizontal .radio-inline,.form-horizontal .checkbox-inline{padding-top:7px;margin-top:0;margin-bottom:0}.form-horizontal .radio,.form-horizontal .checkbox{min-height:27px}.form-horizontal .form-group{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.form-horizontal .control-label{padding-top:7px;margin-bottom:0;text-align:right}}.form-horizontal .has-feedback .form-control-feedback{top:0;right:15px}@media (min-width:768px){.form-horizontal .form-group-lg .control-label{padding-top:14.3px}}@media (min-width:768px){.form-horizontal .form-group-sm .control-label{padding-top:6px}}.btn{display:inline-block;padding:6px 12px;margin-bottom:0;font-size:14px;font-weight:400;line-height:1.42857143;text-align:center;white-space:nowrap;vertical-align:middle;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-image:none;border:1px solid transparent;border-radius:4px}.btn:focus,.btn:active:focus,.btn.active:focus{outline:thin dotted;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn:hover,.btn:focus{color:#333;text-decoration:none}.btn:active,.btn.active{background-image:none;outline:0;-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn.disabled,.btn[disabled],fieldset[disabled] .btn{pointer-events:none;cursor:not-allowed;filter:alpha(opacity=65);-webkit-box-shadow:none;box-shadow:none;opacity:.65}.btn-default{color:#333;background-color:#fff;border-color:#ccc}.btn-default:hover,.btn-default:focus,.btn-default:active,.btn-default.active,.open>.dropdown-toggle.btn-default{color:#333;background-color:#e6e6e6;border-color:#adadad}.btn-default:active,.btn-default.active,.open>.dropdown-toggle.btn-default{background-image:none}.btn-default.disabled,.btn-default[disabled],fieldset[disabled] .btn-default,.btn-default.disabled:hover,.btn-default[disabled]:hover,fieldset[disabled] .btn-default:hover,.btn-default.disabled:focus,.btn-default[disabled]:focus,fieldset[disabled] .btn-default:focus,.btn-default.disabled:active,.btn-default[disabled]:active,fieldset[disabled] .btn-default:active,.btn-default.disabled.active,.btn-default[disabled].active,fieldset[disabled] .btn-default.active{background-color:#fff;border-color:#ccc}.btn-default .badge{color:#fff;background-color:#333}.btn-primary{color:#fff;background-color:#428bca;border-color:#357ebd}.btn-primary:hover,.btn-primary:focus,.btn-primary:active,.btn-primary.active,.open>.dropdown-toggle.btn-primary{color:#fff;background-color:#3071a9;border-color:#285e8e}.btn-primary:active,.btn-primary.active,.open>.dropdown-toggle.btn-primary{background-image:none}.btn-primary.disabled,.btn-primary[disabled],fieldset[disabled] .btn-primary,.btn-primary.disabled:hover,.btn-primary[disabled]:hover,fieldset[disabled] .btn-primary:hover,.btn-primary.disabled:focus,.btn-primary[disabled]:focus,fieldset[disabled] .btn-primary:focus,.btn-primary.disabled:active,.btn-primary[disabled]:active,fieldset[disabled] .btn-primary:active,.btn-primary.disabled.active,.btn-primary[disabled].active,fieldset[disabled] .btn-primary.active{background-color:#428bca;border-color:#357ebd}.btn-primary .badge{color:#428bca;background-color:#fff}.btn-success{color:#fff;background-color:#5cb85c;border-color:#4cae4c}.btn-success:hover,.btn-success:focus,.btn-success:active,.btn-success.active,.open>.dropdown-toggle.btn-success{color:#fff;background-color:#449d44;border-color:#398439}.btn-success:active,.btn-success.active,.open>.dropdown-toggle.btn-success{background-image:none}.btn-success.disabled,.btn-success[disabled],fieldset[disabled] .btn-success,.btn-success.disabled:hover,.btn-success[disabled]:hover,fieldset[disabled] .btn-success:hover,.btn-success.disabled:focus,.btn-success[disabled]:focus,fieldset[disabled] .btn-success:focus,.btn-success.disabled:active,.btn-success[disabled]:active,fieldset[disabled] .btn-success:active,.btn-success.disabled.active,.btn-success[disabled].active,fieldset[disabled] .btn-success.active{background-color:#5cb85c;border-color:#4cae4c}.btn-success .badge{color:#5cb85c;background-color:#fff}.btn-info{color:#fff;background-color:#5bc0de;border-color:#46b8da}.btn-info:hover,.btn-info:focus,.btn-info:active,.btn-info.active,.open>.dropdown-toggle.btn-info{color:#fff;background-color:#31b0d5;border-color:#269abc}.btn-info:active,.btn-info.active,.open>.dropdown-toggle.btn-info{background-image:none}.btn-info.disabled,.btn-info[disabled],fieldset[disabled] .btn-info,.btn-info.disabled:hover,.btn-info[disabled]:hover,fieldset[disabled] .btn-info:hover,.btn-info.disabled:focus,.btn-info[disabled]:focus,fieldset[disabled] .btn-info:focus,.btn-info.disabled:active,.btn-info[disabled]:active,fieldset[disabled] .btn-info:active,.btn-info.disabled.active,.btn-info[disabled].active,fieldset[disabled] .btn-info.active{background-color:#5bc0de;border-color:#46b8da}.btn-info .badge{color:#5bc0de;background-color:#fff}.btn-warning{color:#fff;background-color:#f0ad4e;border-color:#eea236}.btn-warning:hover,.btn-warning:focus,.btn-warning:active,.btn-warning.active,.open>.dropdown-toggle.btn-warning{color:#fff;background-color:#ec971f;border-color:#d58512}.btn-warning:active,.btn-warning.active,.open>.dropdown-toggle.btn-warning{background-image:none}.btn-warning.disabled,.btn-warning[disabled],fieldset[disabled] .btn-warning,.btn-warning.disabled:hover,.btn-warning[disabled]:hover,fieldset[disabled] .btn-warning:hover,.btn-warning.disabled:focus,.btn-warning[disabled]:focus,fieldset[disabled] .btn-warning:focus,.btn-warning.disabled:active,.btn-warning[disabled]:active,fieldset[disabled] .btn-warning:active,.btn-warning.disabled.active,.btn-warning[disabled].active,fieldset[disabled] .btn-warning.active{background-color:#f0ad4e;border-color:#eea236}.btn-warning .badge{color:#f0ad4e;background-color:#fff}.btn-danger{color:#fff;background-color:#d9534f;border-color:#d43f3a}.btn-danger:hover,.btn-danger:focus,.btn-danger:active,.btn-danger.active,.open>.dropdown-toggle.btn-danger{color:#fff;background-color:#c9302c;border-color:#ac2925}.btn-danger:active,.btn-danger.active,.open>.dropdown-toggle.btn-danger{background-image:none}.btn-danger.disabled,.btn-danger[disabled],fieldset[disabled] .btn-danger,.btn-danger.disabled:hover,.btn-danger[disabled]:hover,fieldset[disabled] .btn-danger:hover,.btn-danger.disabled:focus,.btn-danger[disabled]:focus,fieldset[disabled] .btn-danger:focus,.btn-danger.disabled:active,.btn-danger[disabled]:active,fieldset[disabled] .btn-danger:active,.btn-danger.disabled.active,.btn-danger[disabled].active,fieldset[disabled] .btn-danger.active{background-color:#d9534f;border-color:#d43f3a}.btn-danger .badge{color:#d9534f;background-color:#fff}.btn-link{font-weight:400;color:#428bca;cursor:pointer;border-radius:0}.btn-link,.btn-link:active,.btn-link[disabled],fieldset[disabled] .btn-link{background-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link,.btn-link:hover,.btn-link:focus,.btn-link:active{border-color:transparent}.btn-link:hover,.btn-link:focus{color:#2a6496;text-decoration:underline;background-color:transparent}.btn-link[disabled]:hover,fieldset[disabled] .btn-link:hover,.btn-link[disabled]:focus,fieldset[disabled] .btn-link:focus{color:#777;text-decoration:none}.btn-lg,.btn-group-lg>.btn{padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}.btn-sm,.btn-group-sm>.btn{padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}.btn-xs,.btn-group-xs>.btn{padding:1px 5px;font-size:12px;line-height:1.5;border-radius:3px}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:5px}input[type=submit].btn-block,input[type=reset].btn-block,input[type=button].btn-block{width:100%}.fade{opacity:0;-webkit-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{display:none}.collapse.in{display:block}tr.collapse.in{display:table-row}tbody.collapse.in{display:table-row-group}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition:height .35s ease;-o-transition:height .35s ease;transition:height .35s ease}.caret{display:inline-block;width:0;height:0;margin-left:2px;vertical-align:middle;border-top:4px solid;border-right:4px solid transparent;border-left:4px solid transparent}.dropdown{position:relative}.dropdown-toggle:focus{outline:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;font-size:14px;text-align:left;list-style:none;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.15);border-radius:4px;-webkit-box-shadow:0 6px 12px rgba(0,0,0,.175);box-shadow:0 6px 12px rgba(0,0,0,.175)}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:400;line-height:1.42857143;color:#333;white-space:nowrap}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus{color:#262626;text-decoration:none;background-color:#f5f5f5}.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{color:#fff;text-decoration:none;background-color:#428bca;outline:0}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{color:#777}.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{text-decoration:none;cursor:not-allowed;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open>.dropdown-menu{display:block}.open>a{outline:0}.dropdown-menu-right{right:0;left:auto}.dropdown-menu-left{right:auto;left:0}.dropdown-header{display:block;padding:3px 20px;font-size:12px;line-height:1.42857143;color:#777;white-space:nowrap}.dropdown-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:990}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{content:"";border-top:0;border-bottom:4px solid}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:1px}@media (min-width:768px){.navbar-right .dropdown-menu{right:0;left:auto}.navbar-right .dropdown-menu-left{right:auto;left:0}}.btn-group,.btn-group-vertical{position:relative;display:inline-block;vertical-align:middle}.btn-group>.btn,.btn-group-vertical>.btn{position:relative;float:left}.btn-group>.btn:hover,.btn-group-vertical>.btn:hover,.btn-group>.btn:focus,.btn-group-vertical>.btn:focus,.btn-group>.btn:active,.btn-group-vertical>.btn:active,.btn-group>.btn.active,.btn-group-vertical>.btn.active{z-index:2}.btn-group>.btn:focus,.btn-group-vertical>.btn:focus{outline:0}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{margin-left:-5px}.btn-toolbar .btn-group,.btn-toolbar .input-group{float:left}.btn-toolbar>.btn,.btn-toolbar>.btn-group,.btn-toolbar>.input-group{margin-left:5px}.btn-group>.btn:not(:first-child):not(:last-child):not(.dropdown-toggle){border-radius:0}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:first-child:not(:last-child):not(.dropdown-toggle){border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:last-child:not(:first-child),.btn-group>.dropdown-toggle:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.btn-group>.btn-group{float:left}.btn-group>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group>.btn-group:first-child>.btn:last-child,.btn-group>.btn-group:first-child>.dropdown-toggle{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn-group:last-child>.btn:first-child{border-top-left-radius:0;border-bottom-left-radius:0}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{padding-right:8px;padding-left:8px}.btn-group>.btn-lg+.dropdown-toggle{padding-right:12px;padding-left:12px}.btn-group.open .dropdown-toggle{-webkit-box-shadow:inset 0 3px 5px rgba(0,0,0,.125);box-shadow:inset 0 3px 5px rgba(0,0,0,.125)}.btn-group.open .dropdown-toggle.btn-link{-webkit-box-shadow:none;box-shadow:none}.btn .caret{margin-left:0}.btn-lg .caret{border-width:5px 5px 0;border-bottom-width:0}.dropup .btn-lg .caret{border-width:0 5px 5px}.btn-group-vertical>.btn,.btn-group-vertical>.btn-group,.btn-group-vertical>.btn-group>.btn{display:block;float:none;width:100%;max-width:100%}.btn-group-vertical>.btn-group>.btn{float:none}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:first-child):not(:last-child){border-radius:0}.btn-group-vertical>.btn:first-child:not(:last-child){border-top-right-radius:4px;border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:last-child:not(:first-child){border-top-left-radius:0;border-top-right-radius:0;border-bottom-left-radius:4px}.btn-group-vertical>.btn-group:not(:first-child):not(:last-child)>.btn{border-radius:0}.btn-group-vertical>.btn-group:first-child:not(:last-child)>.btn:last-child,.btn-group-vertical>.btn-group:first-child:not(:last-child)>.dropdown-toggle{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn-group:last-child:not(:first-child)>.btn:first-child{border-top-left-radius:0;border-top-right-radius:0}.btn-group-justified{display:table;width:100%;table-layout:fixed;border-collapse:separate}.btn-group-justified>.btn,.btn-group-justified>.btn-group{display:table-cell;float:none;width:1%}.btn-group-justified>.btn-group .btn{width:100%}.btn-group-justified>.btn-group .dropdown-menu{left:auto}[data-toggle=buttons]>.btn>input[type=radio],[data-toggle=buttons]>.btn>input[type=checkbox]{position:absolute;z-index:-1;filter:alpha(opacity=0);opacity:0}.input-group{position:relative;display:table;border-collapse:separate}.input-group[class*=col-]{float:none;padding-right:0;padding-left:0}.input-group .form-control{position:relative;z-index:2;float:left;width:100%;margin-bottom:0}.input-group-lg>.form-control,.input-group-lg>.input-group-addon,.input-group-lg>.input-group-btn>.btn{height:46px;padding:10px 16px;font-size:18px;line-height:1.33;border-radius:6px}select.input-group-lg>.form-control,select.input-group-lg>.input-group-addon,select.input-group-lg>.input-group-btn>.btn{height:46px;line-height:46px}textarea.input-group-lg>.form-control,textarea.input-group-lg>.input-group-addon,textarea.input-group-lg>.input-group-btn>.btn,select[multiple].input-group-lg>.form-control,select[multiple].input-group-lg>.input-group-addon,select[multiple].input-group-lg>.input-group-btn>.btn{height:auto}.input-group-sm>.form-control,.input-group-sm>.input-group-addon,.input-group-sm>.input-group-btn>.btn{height:30px;padding:5px 10px;font-size:12px;line-height:1.5;border-radius:3px}select.input-group-sm>.form-control,select.input-group-sm>.input-group-addon,select.input-group-sm>.input-group-btn>.btn{height:30px;line-height:30px}textarea.input-group-sm>.form-control,textarea.input-group-sm>.input-group-addon,textarea.input-group-sm>.input-group-btn>.btn,select[multiple].input-group-sm>.form-control,select[multiple].input-group-sm>.input-group-addon,select[multiple].input-group-sm>.input-group-btn>.btn{height:auto}.input-group-addon,.input-group-btn,.input-group .form-control{display:table-cell}.input-group-addon:not(:first-child):not(:last-child),.input-group-btn:not(:first-child):not(:last-child),.input-group .form-control:not(:first-child):not(:last-child){border-radius:0}.input-group-addon,.input-group-btn{width:1%;white-space:nowrap;vertical-align:middle}.input-group-addon{padding:6px 12px;font-size:14px;font-weight:400;line-height:1;color:#555;text-align:center;background-color:#eee;border:1px solid #ccc;border-radius:4px}.input-group-addon.input-sm{padding:5px 10px;font-size:12px;border-radius:3px}.input-group-addon.input-lg{padding:10px 16px;font-size:18px;border-radius:6px}.input-group-addon input[type=radio],.input-group-addon input[type=checkbox]{margin-top:0}.input-group .form-control:first-child,.input-group-addon:first-child,.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group>.btn,.input-group-btn:first-child>.dropdown-toggle,.input-group-btn:last-child>.btn:not(:last-child):not(.dropdown-toggle),.input-group-btn:last-child>.btn-group:not(:last-child)>.btn{border-top-right-radius:0;border-bottom-right-radius:0}.input-group-addon:first-child{border-right:0}.input-group .form-control:last-child,.input-group-addon:last-child,.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group>.btn,.input-group-btn:last-child>.dropdown-toggle,.input-group-btn:first-child>.btn:not(:first-child),.input-group-btn:first-child>.btn-group:not(:first-child)>.btn{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-addon:last-child{border-left:0}.input-group-btn{position:relative;font-size:0;white-space:nowrap}.input-group-btn>.btn{position:relative}.input-group-btn>.btn+.btn{margin-left:-1px}.input-group-btn>.btn:hover,.input-group-btn>.btn:focus,.input-group-btn>.btn:active{z-index:2}.input-group-btn:first-child>.btn,.input-group-btn:first-child>.btn-group{margin-right:-1px}.input-group-btn:last-child>.btn,.input-group-btn:last-child>.btn-group{margin-left:-1px}.nav{padding-left:0;margin-bottom:0;list-style:none}.nav>li{position:relative;display:block}.nav>li>a{position:relative;display:block;padding:10px 15px}.nav>li>a:hover,.nav>li>a:focus{text-decoration:none;background-color:#eee}.nav>li.disabled>a{color:#777}.nav>li.disabled>a:hover,.nav>li.disabled>a:focus{color:#777;text-decoration:none;cursor:not-allowed;background-color:transparent}.nav .open>a,.nav .open>a:hover,.nav .open>a:focus{background-color:#eee;border-color:#428bca}.nav .nav-divider{height:1px;margin:9px 0;overflow:hidden;background-color:#e5e5e5}.nav>li>a>img{max-width:none}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{float:left;margin-bottom:-1px}.nav-tabs>li>a{margin-right:2px;line-height:1.42857143;border:1px solid transparent;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover{border-color:#eee #eee #ddd}.nav-tabs>li.active>a,.nav-tabs>li.active>a:hover,.nav-tabs>li.active>a:focus{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-tabs.nav-justified{width:100%;border-bottom:0}.nav-tabs.nav-justified>li{float:none}.nav-tabs.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-tabs.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-tabs.nav-justified>li{display:table-cell;width:1%}.nav-tabs.nav-justified>li>a{margin-bottom:0}}.nav-tabs.nav-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:hover,.nav-tabs.nav-justified>.active>a:focus{border:1px solid #ddd}@media (min-width:768px){.nav-tabs.nav-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs.nav-justified>.active>a,.nav-tabs.nav-justified>.active>a:hover,.nav-tabs.nav-justified>.active>a:focus{border-bottom-color:#fff}}.nav-pills>li{float:left}.nav-pills>li>a{border-radius:4px}.nav-pills>li+li{margin-left:2px}.nav-pills>li.active>a,.nav-pills>li.active>a:hover,.nav-pills>li.active>a:focus{color:#fff;background-color:#428bca}.nav-stacked>li{float:none}.nav-stacked>li+li{margin-top:2px;margin-left:0}.nav-justified{width:100%}.nav-justified>li{float:none}.nav-justified>li>a{margin-bottom:5px;text-align:center}.nav-justified>.dropdown .dropdown-menu{top:auto;left:auto}@media (min-width:768px){.nav-justified>li{display:table-cell;width:1%}.nav-justified>li>a{margin-bottom:0}}.nav-tabs-justified{border-bottom:0}.nav-tabs-justified>li>a{margin-right:0;border-radius:4px}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:hover,.nav-tabs-justified>.active>a:focus{border:1px solid #ddd}@media (min-width:768px){.nav-tabs-justified>li>a{border-bottom:1px solid #ddd;border-radius:4px 4px 0 0}.nav-tabs-justified>.active>a,.nav-tabs-justified>.active>a:hover,.nav-tabs-justified>.active>a:focus{border-bottom-color:#fff}}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.navbar{position:relative;min-height:50px;margin-bottom:20px;border:1px solid transparent}@media (min-width:768px){.navbar{border-radius:4px}}@media (min-width:768px){.navbar-header{float:left}}.navbar-collapse{padding-right:15px;padding-left:15px;overflow-x:visible;-webkit-overflow-scrolling:touch;border-top:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1)}.navbar-collapse.in{overflow-y:auto}@media (min-width:768px){.navbar-collapse{width:auto;border-top:0;-webkit-box-shadow:none;box-shadow:none}.navbar-collapse.collapse{display:block!important;height:auto!important;padding-bottom:0;overflow:visible!important}.navbar-collapse.in{overflow-y:visible}.navbar-fixed-top .navbar-collapse,.navbar-static-top .navbar-collapse,.navbar-fixed-bottom .navbar-collapse{padding-right:0;padding-left:0}}.navbar-fixed-top .navbar-collapse,.navbar-fixed-bottom .navbar-collapse{max-height:340px}@media (max-width:480px) and (orientation:landscape){.navbar-fixed-top .navbar-collapse,.navbar-fixed-bottom .navbar-collapse{max-height:200px}}.container>.navbar-header,.container-fluid>.navbar-header,.container>.navbar-collapse,.container-fluid>.navbar-collapse{margin-right:-15px;margin-left:-15px}@media (min-width:768px){.container>.navbar-header,.container-fluid>.navbar-header,.container>.navbar-collapse,.container-fluid>.navbar-collapse{margin-right:0;margin-left:0}}.navbar-static-top{z-index:1000;border-width:0 0 1px}@media (min-width:768px){.navbar-static-top{border-radius:0}}.navbar-fixed-top,.navbar-fixed-bottom{position:fixed;right:0;left:0;z-index:1030;-webkit-transform:translate3d(0,0,0);-o-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}@media (min-width:768px){.navbar-fixed-top,.navbar-fixed-bottom{border-radius:0}}.navbar-fixed-top{top:0;border-width:0 0 1px}.navbar-fixed-bottom{bottom:0;margin-bottom:0;border-width:1px 0 0}.navbar-brand{float:left;height:50px;padding:15px 15px;font-size:18px;line-height:20px}.navbar-brand:hover,.navbar-brand:focus{text-decoration:none}@media (min-width:768px){.navbar>.container .navbar-brand,.navbar>.container-fluid .navbar-brand{margin-left:-15px}}.navbar-toggle{position:relative;float:right;padding:9px 10px;margin-top:8px;margin-right:15px;margin-bottom:8px;background-color:transparent;background-image:none;border:1px solid transparent;border-radius:4px}.navbar-toggle:focus{outline:0}.navbar-toggle .icon-bar{display:block;width:22px;height:2px;border-radius:1px}.navbar-toggle .icon-bar+.icon-bar{margin-top:4px}@media (min-width:768px){.navbar-toggle{display:none}}.navbar-nav{margin:7.5px -15px}.navbar-nav>li>a{padding-top:10px;padding-bottom:10px;line-height:20px}@media (max-width:767px){.navbar-nav .open .dropdown-menu{position:static;float:none;width:auto;margin-top:0;background-color:transparent;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-nav .open .dropdown-menu>li>a,.navbar-nav .open .dropdown-menu .dropdown-header{padding:5px 15px 5px 25px}.navbar-nav .open .dropdown-menu>li>a{line-height:20px}.navbar-nav .open .dropdown-menu>li>a:hover,.navbar-nav .open .dropdown-menu>li>a:focus{background-image:none}}@media (min-width:768px){.navbar-nav{float:left;margin:0}.navbar-nav>li{float:left}.navbar-nav>li>a{padding-top:15px;padding-bottom:15px}.navbar-nav.navbar-right:last-child{margin-right:-15px}}@media (min-width:768px){.navbar-left{float:left!important}.navbar-right{float:right!important}}.navbar-form{padding:10px 15px;margin-top:8px;margin-right:-15px;margin-bottom:8px;margin-left:-15px;border-top:1px solid transparent;border-bottom:1px solid transparent;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 1px 0 rgba(255,255,255,.1),0 1px 0 rgba(255,255,255,.1)}@media (min-width:768px){.navbar-form .form-group{display:inline-block;margin-bottom:0;vertical-align:middle}.navbar-form .form-control{display:inline-block;width:auto;vertical-align:middle}.navbar-form .input-group{display:inline-table;vertical-align:middle}.navbar-form .input-group .input-group-addon,.navbar-form .input-group .input-group-btn,.navbar-form .input-group .form-control{width:auto}.navbar-form .input-group>.form-control{width:100%}.navbar-form .control-label{margin-bottom:0;vertical-align:middle}.navbar-form .radio,.navbar-form .checkbox{display:inline-block;margin-top:0;margin-bottom:0;vertical-align:middle}.navbar-form .radio label,.navbar-form .checkbox label{padding-left:0}.navbar-form .radio input[type=radio],.navbar-form .checkbox input[type=checkbox]{position:relative;margin-left:0}.navbar-form .has-feedback .form-control-feedback{top:0}}@media (max-width:767px){.navbar-form .form-group{margin-bottom:5px}}@media (min-width:768px){.navbar-form{width:auto;padding-top:0;padding-bottom:0;margin-right:0;margin-left:0;border:0;-webkit-box-shadow:none;box-shadow:none}.navbar-form.navbar-right:last-child{margin-right:-15px}}.navbar-nav>li>.dropdown-menu{margin-top:0;border-top-left-radius:0;border-top-right-radius:0}.navbar-fixed-bottom .navbar-nav>li>.dropdown-menu{border-bottom-right-radius:0;border-bottom-left-radius:0}.navbar-btn{margin-top:8px;margin-bottom:8px}.navbar-btn.btn-sm{margin-top:10px;margin-bottom:10px}.navbar-btn.btn-xs{margin-top:14px;margin-bottom:14px}.navbar-text{margin-top:15px;margin-bottom:15px}@media (min-width:768px){.navbar-text{float:left;margin-right:15px;margin-left:15px}.navbar-text.navbar-right:last-child{margin-right:0}}.navbar-default{background-color:#f8f8f8;border-color:#e7e7e7}.navbar-default .navbar-brand{color:#777}.navbar-default .navbar-brand:hover,.navbar-default .navbar-brand:focus{color:#5e5e5e;background-color:transparent}.navbar-default .navbar-text{color:#777}.navbar-default .navbar-nav>li>a{color:#777}.navbar-default .navbar-nav>li>a:hover,.navbar-default .navbar-nav>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav>.active>a,.navbar-default .navbar-nav>.active>a:hover,.navbar-default .navbar-nav>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav>.disabled>a,.navbar-default .navbar-nav>.disabled>a:hover,.navbar-default .navbar-nav>.disabled>a:focus{color:#ccc;background-color:transparent}.navbar-default .navbar-toggle{border-color:#ddd}.navbar-default .navbar-toggle:hover,.navbar-default .navbar-toggle:focus{background-color:#ddd}.navbar-default .navbar-toggle .icon-bar{background-color:#888}.navbar-default .navbar-collapse,.navbar-default .navbar-form{border-color:#e7e7e7}.navbar-default .navbar-nav>.open>a,.navbar-default .navbar-nav>.open>a:hover,.navbar-default .navbar-nav>.open>a:focus{color:#555;background-color:#e7e7e7}@media (max-width:767px){.navbar-default .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-default .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>li>a:focus{color:#333;background-color:transparent}.navbar-default .navbar-nav .open .dropdown-menu>.active>a,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.active>a:focus{color:#555;background-color:#e7e7e7}.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-default .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#ccc;background-color:transparent}}.navbar-default .navbar-link{color:#777}.navbar-default .navbar-link:hover{color:#333}.navbar-default .btn-link{color:#777}.navbar-default .btn-link:hover,.navbar-default .btn-link:focus{color:#333}.navbar-default .btn-link[disabled]:hover,fieldset[disabled] .navbar-default .btn-link:hover,.navbar-default .btn-link[disabled]:focus,fieldset[disabled] .navbar-default .btn-link:focus{color:#ccc}.navbar-inverse{background-color:#222;border-color:#080808}.navbar-inverse .navbar-brand{color:#777}.navbar-inverse .navbar-brand:hover,.navbar-inverse .navbar-brand:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-text{color:#777}.navbar-inverse .navbar-nav>li>a{color:#777}.navbar-inverse .navbar-nav>li>a:hover,.navbar-inverse .navbar-nav>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav>.active>a,.navbar-inverse .navbar-nav>.active>a:hover,.navbar-inverse .navbar-nav>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav>.disabled>a,.navbar-inverse .navbar-nav>.disabled>a:hover,.navbar-inverse .navbar-nav>.disabled>a:focus{color:#444;background-color:transparent}.navbar-inverse .navbar-toggle{border-color:#333}.navbar-inverse .navbar-toggle:hover,.navbar-inverse .navbar-toggle:focus{background-color:#333}.navbar-inverse .navbar-toggle .icon-bar{background-color:#fff}.navbar-inverse .navbar-collapse,.navbar-inverse .navbar-form{border-color:#101010}.navbar-inverse .navbar-nav>.open>a,.navbar-inverse .navbar-nav>.open>a:hover,.navbar-inverse .navbar-nav>.open>a:focus{color:#fff;background-color:#080808}@media (max-width:767px){.navbar-inverse .navbar-nav .open .dropdown-menu>.dropdown-header{border-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu .divider{background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a{color:#777}.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>li>a:focus{color:#fff;background-color:transparent}.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.active>a:focus{color:#fff;background-color:#080808}.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:hover,.navbar-inverse .navbar-nav .open .dropdown-menu>.disabled>a:focus{color:#444;background-color:transparent}}.navbar-inverse .navbar-link{color:#777}.navbar-inverse .navbar-link:hover{color:#fff}.navbar-inverse .btn-link{color:#777}.navbar-inverse .btn-link:hover,.navbar-inverse .btn-link:focus{color:#fff}.navbar-inverse .btn-link[disabled]:hover,fieldset[disabled] .navbar-inverse .btn-link:hover,.navbar-inverse .btn-link[disabled]:focus,fieldset[disabled] .navbar-inverse .btn-link:focus{color:#444}.breadcrumb{padding:8px 15px;margin-bottom:20px;list-style:none;background-color:#f5f5f5;border-radius:4px}.breadcrumb>li{display:inline-block}.breadcrumb>li+li:before{padding:0 5px;color:#ccc;content:"/\00a0"}.breadcrumb>.active{color:#777}.pagination{display:inline-block;padding-left:0;margin:20px 0;border-radius:4px}.pagination>li{display:inline}.pagination>li>a,.pagination>li>span{position:relative;float:left;padding:6px 12px;margin-left:-1px;line-height:1.42857143;color:#428bca;text-decoration:none;background-color:#fff;border:1px solid #ddd}.pagination>li:first-child>a,.pagination>li:first-child>span{margin-left:0;border-top-left-radius:4px;border-bottom-left-radius:4px}.pagination>li:last-child>a,.pagination>li:last-child>span{border-top-right-radius:4px;border-bottom-right-radius:4px}.pagination>li>a:hover,.pagination>li>span:hover,.pagination>li>a:focus,.pagination>li>span:focus{color:#2a6496;background-color:#eee;border-color:#ddd}.pagination>.active>a,.pagination>.active>span,.pagination>.active>a:hover,.pagination>.active>span:hover,.pagination>.active>a:focus,.pagination>.active>span:focus{z-index:2;color:#fff;cursor:default;background-color:#428bca;border-color:#428bca}.pagination>.disabled>span,.pagination>.disabled>span:hover,.pagination>.disabled>span:focus,.pagination>.disabled>a,.pagination>.disabled>a:hover,.pagination>.disabled>a:focus{color:#777;cursor:not-allowed;background-color:#fff;border-color:#ddd}.pagination-lg>li>a,.pagination-lg>li>span{padding:10px 16px;font-size:18px}.pagination-lg>li:first-child>a,.pagination-lg>li:first-child>span{border-top-left-radius:6px;border-bottom-left-radius:6px}.pagination-lg>li:last-child>a,.pagination-lg>li:last-child>span{border-top-right-radius:6px;border-bottom-right-radius:6px}.pagination-sm>li>a,.pagination-sm>li>span{padding:5px 10px;font-size:12px}.pagination-sm>li:first-child>a,.pagination-sm>li:first-child>span{border-top-left-radius:3px;border-bottom-left-radius:3px}.pagination-sm>li:last-child>a,.pagination-sm>li:last-child>span{border-top-right-radius:3px;border-bottom-right-radius:3px}.pager{padding-left:0;margin:20px 0;text-align:center;list-style:none}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;border-radius:15px}.pager li>a:hover,.pager li>a:focus{text-decoration:none;background-color:#eee}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:hover,.pager .disabled>a:focus,.pager .disabled>span{color:#777;cursor:not-allowed;background-color:#fff}.label{display:inline;padding:.2em .6em .3em;font-size:75%;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:.25em}a.label:hover,a.label:focus{color:#fff;text-decoration:none;cursor:pointer}.label:empty{display:none}.btn .label{position:relative;top:-1px}.label-default{background-color:#777}.label-default[href]:hover,.label-default[href]:focus{background-color:#5e5e5e}.label-primary{background-color:#428bca}.label-primary[href]:hover,.label-primary[href]:focus{background-color:#3071a9}.label-success{background-color:#5cb85c}.label-success[href]:hover,.label-success[href]:focus{background-color:#449d44}.label-info{background-color:#5bc0de}.label-info[href]:hover,.label-info[href]:focus{background-color:#31b0d5}.label-warning{background-color:#f0ad4e}.label-warning[href]:hover,.label-warning[href]:focus{background-color:#ec971f}.label-danger{background-color:#d9534f}.label-danger[href]:hover,.label-danger[href]:focus{background-color:#c9302c}.badge{display:inline-block;min-width:10px;padding:3px 7px;font-size:12px;font-weight:700;line-height:1;color:#fff;text-align:center;white-space:nowrap;vertical-align:baseline;background-color:#777;border-radius:10px}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.btn-xs .badge{top:0;padding:1px 5px}a.badge:hover,a.badge:focus{color:#fff;text-decoration:none;cursor:pointer}a.list-group-item.active>.badge,.nav-pills>.active>a>.badge{color:#428bca;background-color:#fff}.nav-pills>li>a>.badge{margin-left:3px}.jumbotron{padding:30px;margin-bottom:30px;color:inherit;background-color:#eee}.jumbotron h1,.jumbotron .h1{color:inherit}.jumbotron p{margin-bottom:15px;font-size:21px;font-weight:200}.jumbotron>hr{border-top-color:#d5d5d5}.container .jumbotron{border-radius:6px}.jumbotron .container{max-width:100%}@media screen and (min-width:768px){.jumbotron{padding-top:48px;padding-bottom:48px}.container .jumbotron{padding-right:60px;padding-left:60px}.jumbotron h1,.jumbotron .h1{font-size:63px}}.thumbnail{display:block;padding:4px;margin-bottom:20px;line-height:1.42857143;background-color:#fff;border:1px solid #ddd;border-radius:4px;-webkit-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.thumbnail>img,.thumbnail a>img{margin-right:auto;margin-left:auto}a.thumbnail:hover,a.thumbnail:focus,a.thumbnail.active{border-color:#428bca}.thumbnail .caption{padding:9px;color:#333}.alert{padding:15px;margin-bottom:20px;border:1px solid transparent;border-radius:4px}.alert h4{margin-top:0;color:inherit}.alert .alert-link{font-weight:700}.alert>p,.alert>ul{margin-bottom:0}.alert>p+p{margin-top:5px}.alert-dismissable,.alert-dismissible{padding-right:35px}.alert-dismissable .close,.alert-dismissible .close{position:relative;top:-2px;right:-21px;color:inherit}.alert-success{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.alert-success hr{border-top-color:#c9e2b3}.alert-success .alert-link{color:#2b542c}.alert-info{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.alert-info hr{border-top-color:#a6e1ec}.alert-info .alert-link{color:#245269}.alert-warning{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.alert-warning hr{border-top-color:#f7e1b5}.alert-warning .alert-link{color:#66512c}.alert-danger{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.alert-danger hr{border-top-color:#e4b9c0}.alert-danger .alert-link{color:#843534}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f5f5f5;border-radius:4px;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,.1);box-shadow:inset 0 1px 2px rgba(0,0,0,.1)}.progress-bar{float:left;width:0;height:100%;font-size:12px;line-height:20px;color:#fff;text-align:center;background-color:#428bca;-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,.15);-webkit-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress-striped .progress-bar,.progress-bar-striped{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;background-size:40px 40px}.progress.active .progress-bar,.progress-bar.active{-webkit-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-bar[aria-valuenow="1"],.progress-bar[aria-valuenow="2"]{min-width:30px}.progress-bar[aria-valuenow="0"]{min-width:30px;color:#777;background-color:transparent;background-image:none;-webkit-box-shadow:none;box-shadow:none}.progress-bar-success{background-color:#5cb85c}.progress-striped .progress-bar-success{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-info{background-color:#5bc0de}.progress-striped .progress-bar-info{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-warning{background-color:#f0ad4e}.progress-striped .progress-bar-warning{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.progress-bar-danger{background-color:#d9534f}.progress-striped .progress-bar-danger{background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,.15) 50%,rgba(255,255,255,.15) 75%,transparent 75%,transparent)}.media,.media-body{overflow:hidden;zoom:1}.media,.media .media{margin-top:15px}.media:first-child{margin-top:0}.media-object{display:block}.media-heading{margin:0 0 5px}.media>.pull-left{margin-right:10px}.media>.pull-right{margin-left:10px}.media-list{padding-left:0;list-style:none}.list-group{padding-left:0;margin-bottom:20px}.list-group-item{position:relative;display:block;padding:10px 15px;margin-bottom:-1px;background-color:#fff;border:1px solid #ddd}.list-group-item:first-child{border-top-left-radius:4px;border-top-right-radius:4px}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.list-group-item>.badge{float:right}.list-group-item>.badge+.badge{margin-right:5px}a.list-group-item{color:#555}a.list-group-item .list-group-item-heading{color:#333}a.list-group-item:hover,a.list-group-item:focus{color:#555;text-decoration:none;background-color:#f5f5f5}.list-group-item.disabled,.list-group-item.disabled:hover,.list-group-item.disabled:focus{color:#777;background-color:#eee}.list-group-item.disabled .list-group-item-heading,.list-group-item.disabled:hover .list-group-item-heading,.list-group-item.disabled:focus .list-group-item-heading{color:inherit}.list-group-item.disabled .list-group-item-text,.list-group-item.disabled:hover .list-group-item-text,.list-group-item.disabled:focus .list-group-item-text{color:#777}.list-group-item.active,.list-group-item.active:hover,.list-group-item.active:focus{z-index:2;color:#fff;background-color:#428bca;border-color:#428bca}.list-group-item.active .list-group-item-heading,.list-group-item.active:hover .list-group-item-heading,.list-group-item.active:focus .list-group-item-heading,.list-group-item.active .list-group-item-heading>small,.list-group-item.active:hover .list-group-item-heading>small,.list-group-item.active:focus .list-group-item-heading>small,.list-group-item.active .list-group-item-heading>.small,.list-group-item.active:hover .list-group-item-heading>.small,.list-group-item.active:focus .list-group-item-heading>.small{color:inherit}.list-group-item.active .list-group-item-text,.list-group-item.active:hover .list-group-item-text,.list-group-item.active:focus .list-group-item-text{color:#e1edf7}.list-group-item-success{color:#3c763d;background-color:#dff0d8}a.list-group-item-success{color:#3c763d}a.list-group-item-success .list-group-item-heading{color:inherit}a.list-group-item-success:hover,a.list-group-item-success:focus{color:#3c763d;background-color:#d0e9c6}a.list-group-item-success.active,a.list-group-item-success.active:hover,a.list-group-item-success.active:focus{color:#fff;background-color:#3c763d;border-color:#3c763d}.list-group-item-info{color:#31708f;background-color:#d9edf7}a.list-group-item-info{color:#31708f}a.list-group-item-info .list-group-item-heading{color:inherit}a.list-group-item-info:hover,a.list-group-item-info:focus{color:#31708f;background-color:#c4e3f3}a.list-group-item-info.active,a.list-group-item-info.active:hover,a.list-group-item-info.active:focus{color:#fff;background-color:#31708f;border-color:#31708f}.list-group-item-warning{color:#8a6d3b;background-color:#fcf8e3}a.list-group-item-warning{color:#8a6d3b}a.list-group-item-warning .list-group-item-heading{color:inherit}a.list-group-item-warning:hover,a.list-group-item-warning:focus{color:#8a6d3b;background-color:#faf2cc}a.list-group-item-warning.active,a.list-group-item-warning.active:hover,a.list-group-item-warning.active:focus{color:#fff;background-color:#8a6d3b;border-color:#8a6d3b}.list-group-item-danger{color:#a94442;background-color:#f2dede}a.list-group-item-danger{color:#a94442}a.list-group-item-danger .list-group-item-heading{color:inherit}a.list-group-item-danger:hover,a.list-group-item-danger:focus{color:#a94442;background-color:#ebcccc}a.list-group-item-danger.active,a.list-group-item-danger.active:hover,a.list-group-item-danger.active:focus{color:#fff;background-color:#a94442;border-color:#a94442}.list-group-item-heading{margin-top:0;margin-bottom:5px}.list-group-item-text{margin-bottom:0;line-height:1.3}.panel{margin-bottom:20px;background-color:#fff;border:1px solid transparent;border-radius:4px;-webkit-box-shadow:0 1px 1px rgba(0,0,0,.05);box-shadow:0 1px 1px rgba(0,0,0,.05)}.panel-body{padding:15px}.panel-heading{padding:10px 15px;border-bottom:1px solid transparent;border-top-left-radius:3px;border-top-right-radius:3px}.panel-heading>.dropdown .dropdown-toggle{color:inherit}.panel-title{margin-top:0;margin-bottom:0;font-size:16px;color:inherit}.panel-title>a{color:inherit}.panel-footer{padding:10px 15px;background-color:#f5f5f5;border-top:1px solid #ddd;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.list-group{margin-bottom:0}.panel>.list-group .list-group-item{border-width:1px 0;border-radius:0}.panel>.list-group:first-child .list-group-item:first-child{border-top:0;border-top-left-radius:3px;border-top-right-radius:3px}.panel>.list-group:last-child .list-group-item:last-child{border-bottom:0;border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel-heading+.list-group .list-group-item:first-child{border-top-width:0}.list-group+.panel-footer{border-top-width:0}.panel>.table,.panel>.table-responsive>.table,.panel>.panel-collapse>.table{margin-bottom:0}.panel>.table:first-child,.panel>.table-responsive:first-child>.table:first-child{border-top-left-radius:3px;border-top-right-radius:3px}.panel>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:first-child,.panel>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:first-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:first-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:first-child{border-top-left-radius:3px}.panel>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child td:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child td:last-child,.panel>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>thead:first-child>tr:first-child th:last-child,.panel>.table:first-child>tbody:first-child>tr:first-child th:last-child,.panel>.table-responsive:first-child>.table:first-child>tbody:first-child>tr:first-child th:last-child{border-top-right-radius:3px}.panel>.table:last-child,.panel>.table-responsive:last-child>.table:last-child{border-bottom-right-radius:3px;border-bottom-left-radius:3px}.panel>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:first-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:first-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:first-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:first-child{border-bottom-left-radius:3px}.panel>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child td:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child td:last-child,.panel>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tbody:last-child>tr:last-child th:last-child,.panel>.table:last-child>tfoot:last-child>tr:last-child th:last-child,.panel>.table-responsive:last-child>.table:last-child>tfoot:last-child>tr:last-child th:last-child{border-bottom-right-radius:3px}.panel>.panel-body+.table,.panel>.panel-body+.table-responsive{border-top:1px solid #ddd}.panel>.table>tbody:first-child>tr:first-child th,.panel>.table>tbody:first-child>tr:first-child td{border-top:0}.panel>.table-bordered,.panel>.table-responsive>.table-bordered{border:0}.panel>.table-bordered>thead>tr>th:first-child,.panel>.table-responsive>.table-bordered>thead>tr>th:first-child,.panel>.table-bordered>tbody>tr>th:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:first-child,.panel>.table-bordered>tfoot>tr>th:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:first-child,.panel>.table-bordered>thead>tr>td:first-child,.panel>.table-responsive>.table-bordered>thead>tr>td:first-child,.panel>.table-bordered>tbody>tr>td:first-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:first-child,.panel>.table-bordered>tfoot>tr>td:first-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:first-child{border-left:0}.panel>.table-bordered>thead>tr>th:last-child,.panel>.table-responsive>.table-bordered>thead>tr>th:last-child,.panel>.table-bordered>tbody>tr>th:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>th:last-child,.panel>.table-bordered>tfoot>tr>th:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>th:last-child,.panel>.table-bordered>thead>tr>td:last-child,.panel>.table-responsive>.table-bordered>thead>tr>td:last-child,.panel>.table-bordered>tbody>tr>td:last-child,.panel>.table-responsive>.table-bordered>tbody>tr>td:last-child,.panel>.table-bordered>tfoot>tr>td:last-child,.panel>.table-responsive>.table-bordered>tfoot>tr>td:last-child{border-right:0}.panel>.table-bordered>thead>tr:first-child>td,.panel>.table-responsive>.table-bordered>thead>tr:first-child>td,.panel>.table-bordered>tbody>tr:first-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>td,.panel>.table-bordered>thead>tr:first-child>th,.panel>.table-responsive>.table-bordered>thead>tr:first-child>th,.panel>.table-bordered>tbody>tr:first-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:first-child>th{border-bottom:0}.panel>.table-bordered>tbody>tr:last-child>td,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>td,.panel>.table-bordered>tfoot>tr:last-child>td,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>td,.panel>.table-bordered>tbody>tr:last-child>th,.panel>.table-responsive>.table-bordered>tbody>tr:last-child>th,.panel>.table-bordered>tfoot>tr:last-child>th,.panel>.table-responsive>.table-bordered>tfoot>tr:last-child>th{border-bottom:0}.panel>.table-responsive{margin-bottom:0;border:0}.panel-group{margin-bottom:20px}.panel-group .panel{margin-bottom:0;border-radius:4px}.panel-group .panel+.panel{margin-top:5px}.panel-group .panel-heading{border-bottom:0}.panel-group .panel-heading+.panel-collapse>.panel-body{border-top:1px solid #ddd}.panel-group .panel-footer{border-top:0}.panel-group .panel-footer+.panel-collapse .panel-body{border-bottom:1px solid #ddd}.panel-default{border-color:#ddd}.panel-default>.panel-heading{color:#333;background-color:#f5f5f5;border-color:#ddd}.panel-default>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ddd}.panel-default>.panel-heading .badge{color:#f5f5f5;background-color:#333}.panel-default>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ddd}.panel-primary{border-color:#428bca}.panel-primary>.panel-heading{color:#fff;background-color:#428bca;border-color:#428bca}.panel-primary>.panel-heading+.panel-collapse>.panel-body{border-top-color:#428bca}.panel-primary>.panel-heading .badge{color:#428bca;background-color:#fff}.panel-primary>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#428bca}.panel-success{border-color:#d6e9c6}.panel-success>.panel-heading{color:#3c763d;background-color:#dff0d8;border-color:#d6e9c6}.panel-success>.panel-heading+.panel-collapse>.panel-body{border-top-color:#d6e9c6}.panel-success>.panel-heading .badge{color:#dff0d8;background-color:#3c763d}.panel-success>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#d6e9c6}.panel-info{border-color:#bce8f1}.panel-info>.panel-heading{color:#31708f;background-color:#d9edf7;border-color:#bce8f1}.panel-info>.panel-heading+.panel-collapse>.panel-body{border-top-color:#bce8f1}.panel-info>.panel-heading .badge{color:#d9edf7;background-color:#31708f}.panel-info>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#bce8f1}.panel-warning{border-color:#faebcc}.panel-warning>.panel-heading{color:#8a6d3b;background-color:#fcf8e3;border-color:#faebcc}.panel-warning>.panel-heading+.panel-collapse>.panel-body{border-top-color:#faebcc}.panel-warning>.panel-heading .badge{color:#fcf8e3;background-color:#8a6d3b}.panel-warning>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#faebcc}.panel-danger{border-color:#ebccd1}.panel-danger>.panel-heading{color:#a94442;background-color:#f2dede;border-color:#ebccd1}.panel-danger>.panel-heading+.panel-collapse>.panel-body{border-top-color:#ebccd1}.panel-danger>.panel-heading .badge{color:#f2dede;background-color:#a94442}.panel-danger>.panel-footer+.panel-collapse>.panel-body{border-bottom-color:#ebccd1}.embed-responsive{position:relative;display:block;height:0;padding:0;overflow:hidden}.embed-responsive .embed-responsive-item,.embed-responsive iframe,.embed-responsive embed,.embed-responsive object{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive.embed-responsive-16by9{padding-bottom:56.25%}.embed-responsive.embed-responsive-4by3{padding-bottom:75%}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,.05);box-shadow:inset 0 1px 1px rgba(0,0,0,.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,.15)}.well-lg{padding:24px;border-radius:6px}.well-sm{padding:9px;border-radius:3px}.close{float:right;font-size:21px;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;filter:alpha(opacity=20);opacity:.2}.close:hover,.close:focus{color:#000;text-decoration:none;cursor:pointer;filter:alpha(opacity=50);opacity:.5}button.close{-webkit-appearance:none;padding:0;cursor:pointer;background:0 0;border:0}.modal-open{overflow:hidden}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;display:none;overflow:hidden;-webkit-overflow-scrolling:touch;outline:0}.modal.fade .modal-dialog{-webkit-transition:-webkit-transform .3s ease-out;-o-transition:-o-transform .3s ease-out;transition:transform .3s ease-out;-webkit-transform:translate3d(0,-25%,0);-o-transform:translate3d(0,-25%,0);transform:translate3d(0,-25%,0)}.modal.in .modal-dialog{-webkit-transform:translate3d(0,0,0);-o-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal-dialog{position:relative;width:auto;margin:10px}.modal-content{position:relative;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #999;border:1px solid rgba(0,0,0,.2);border-radius:6px;outline:0;-webkit-box-shadow:0 3px 9px rgba(0,0,0,.5);box-shadow:0 3px 9px rgba(0,0,0,.5)}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{filter:alpha(opacity=0);opacity:0}.modal-backdrop.in{filter:alpha(opacity=50);opacity:.5}.modal-header{min-height:16.43px;padding:15px;border-bottom:1px solid #e5e5e5}.modal-header .close{margin-top:-2px}.modal-title{margin:0;line-height:1.42857143}.modal-body{position:relative;padding:15px}.modal-footer{padding:15px;text-align:right;border-top:1px solid #e5e5e5}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width:768px){.modal-dialog{width:600px;margin:30px auto}.modal-content{-webkit-box-shadow:0 5px 15px rgba(0,0,0,.5);box-shadow:0 5px 15px rgba(0,0,0,.5)}.modal-sm{width:300px}}@media (min-width:992px){.modal-lg{width:900px}}.tooltip{position:absolute;z-index:1070;display:block;font-size:12px;line-height:1.4;visibility:visible;filter:alpha(opacity=0);opacity:0}.tooltip.in{filter:alpha(opacity=90);opacity:.9}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip-inner{max-width:200px;padding:3px 8px;color:#fff;text-align:center;text-decoration:none;background-color:#000;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-left .tooltip-arrow{bottom:0;left:5px;border-width:5px 5px 0;border-top-color:#000}.tooltip.top-right .tooltip-arrow{right:5px;bottom:0;border-width:5px 5px 0;border-top-color:#000}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-width:5px 5px 5px 0;border-right-color:#000}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-width:5px 0 5px 5px;border-left-color:#000}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-left .tooltip-arrow{top:0;left:5px;border-width:0 5px 5px;border-bottom-color:#000}.tooltip.bottom-right .tooltip-arrow{top:0;right:5px;border-width:0 5px 5px;border-bottom-color:#000}.popover{position:absolute;top:0;left:0;z-index:1060;display:none;max-width:276px;padding:1px;text-align:left;white-space:normal;background-color:#fff;-webkit-background-clip:padding-box;background-clip:padding-box;border:1px solid #ccc;border:1px solid rgba(0,0,0,.2);border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,.2);box-shadow:0 5px 10px rgba(0,0,0,.2)}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;font-weight:400;line-height:18px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-radius:5px 5px 0 0}.popover-content{padding:9px 14px}.popover>.arrow,.popover>.arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover>.arrow{border-width:11px}.popover>.arrow:after{content:"";border-width:10px}.popover.top>.arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,.25);border-bottom-width:0}.popover.top>.arrow:after{bottom:1px;margin-left:-10px;content:" ";border-top-color:#fff;border-bottom-width:0}.popover.right>.arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,.25);border-left-width:0}.popover.right>.arrow:after{bottom:-10px;left:1px;content:" ";border-right-color:#fff;border-left-width:0}.popover.bottom>.arrow{top:-11px;left:50%;margin-left:-11px;border-top-width:0;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,.25)}.popover.bottom>.arrow:after{top:1px;margin-left:-10px;content:" ";border-top-width:0;border-bottom-color:#fff}.popover.left>.arrow{top:50%;right:-11px;margin-top:-11px;border-right-width:0;border-left-color:#999;border-left-color:rgba(0,0,0,.25)}.popover.left>.arrow:after{right:1px;bottom:-10px;content:" ";border-right-width:0;border-left-color:#fff}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>img,.carousel-inner>.item>a>img{line-height:1}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:0;bottom:0;left:0;width:15%;font-size:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6);filter:alpha(opacity=50);opacity:.5}.carousel-control.left{background-image:-webkit-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.5)),to(rgba(0,0,0,.0001)));background-image:linear-gradient(to right,rgba(0,0,0,.5) 0,rgba(0,0,0,.0001) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#80000000', endColorstr='#00000000', GradientType=1);background-repeat:repeat-x}.carousel-control.right{right:0;left:auto;background-image:-webkit-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-o-linear-gradient(left,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);background-image:-webkit-gradient(linear,left top,right top,from(rgba(0,0,0,.0001)),to(rgba(0,0,0,.5)));background-image:linear-gradient(to right,rgba(0,0,0,.0001) 0,rgba(0,0,0,.5) 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#00000000', endColorstr='#80000000', GradientType=1);background-repeat:repeat-x}.carousel-control:hover,.carousel-control:focus{color:#fff;text-decoration:none;filter:alpha(opacity=90);outline:0;opacity:.9}.carousel-control .icon-prev,.carousel-control .icon-next,.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right{position:absolute;top:50%;z-index:5;display:inline-block}.carousel-control .icon-prev,.carousel-control .glyphicon-chevron-left{left:50%;margin-left:-10px}.carousel-control .icon-next,.carousel-control .glyphicon-chevron-right{right:50%;margin-right:-10px}.carousel-control .icon-prev,.carousel-control .icon-next{width:20px;height:20px;margin-top:-10px;font-family:serif}.carousel-control .icon-prev:before{content:'\2039'}.carousel-control .icon-next:before{content:'\203a'}.carousel-indicators{position:absolute;bottom:10px;left:50%;z-index:15;width:60%;padding-left:0;margin-left:-30%;text-align:center;list-style:none}.carousel-indicators li{display:inline-block;width:10px;height:10px;margin:1px;text-indent:-999px;cursor:pointer;background-color:#000 \9;background-color:rgba(0,0,0,0);border:1px solid #fff;border-radius:10px}.carousel-indicators .active{width:12px;height:12px;margin:0;background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center;text-shadow:0 1px 2px rgba(0,0,0,.6)}.carousel-caption .btn{text-shadow:none}@media screen and (min-width:768px){.carousel-control .glyphicon-chevron-left,.carousel-control .glyphicon-chevron-right,.carousel-control .icon-prev,.carousel-control .icon-next{width:30px;height:30px;margin-top:-15px;font-size:30px}.carousel-control .glyphicon-chevron-left,.carousel-control .icon-prev{margin-left:-15px}.carousel-control .glyphicon-chevron-right,.carousel-control .icon-next{margin-right:-15px}.carousel-caption{right:20%;left:20%;padding-bottom:30px}.carousel-indicators{bottom:20px}}.clearfix:before,.clearfix:after,.dl-horizontal dd:before,.dl-horizontal dd:after,.container:before,.container:after,.container-fluid:before,.container-fluid:after,.row:before,.row:after,.form-horizontal .form-group:before,.form-horizontal .form-group:after,.btn-toolbar:before,.btn-toolbar:after,.btn-group-vertical>.btn-group:before,.btn-group-vertical>.btn-group:after,.nav:before,.nav:after,.navbar:before,.navbar:after,.navbar-header:before,.navbar-header:after,.navbar-collapse:before,.navbar-collapse:after,.pager:before,.pager:after,.panel-body:before,.panel-body:after,.modal-footer:before,.modal-footer:after{display:table;content:" "}.clearfix:after,.dl-horizontal dd:after,.container:after,.container-fluid:after,.row:after,.form-horizontal .form-group:after,.btn-toolbar:after,.btn-group-vertical>.btn-group:after,.nav:after,.navbar:after,.navbar-header:after,.navbar-collapse:after,.pager:after,.panel-body:after,.modal-footer:after{clear:both}.center-block{display:block;margin-right:auto;margin-left:auto}.pull-right{float:right!important}.pull-left{float:left!important}.hide{display:none!important}.show{display:block!important}.invisible{visibility:hidden}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.hidden{display:none!important;visibility:hidden!important}.affix{position:fixed;-webkit-transform:translate3d(0,0,0);-o-transform:translate3d(0,0,0);transform:translate3d(0,0,0)}@-ms-viewport{width:device-width}.visible-xs,.visible-sm,.visible-md,.visible-lg{display:none!important}.visible-xs-block,.visible-xs-inline,.visible-xs-inline-block,.visible-sm-block,.visible-sm-inline,.visible-sm-inline-block,.visible-md-block,.visible-md-inline,.visible-md-inline-block,.visible-lg-block,.visible-lg-inline,.visible-lg-inline-block{display:none!important}@media (max-width:767px){.visible-xs{display:block!important}table.visible-xs{display:table}tr.visible-xs{display:table-row!important}th.visible-xs,td.visible-xs{display:table-cell!important}}@media (max-width:767px){.visible-xs-block{display:block!important}}@media (max-width:767px){.visible-xs-inline{display:inline!important}}@media (max-width:767px){.visible-xs-inline-block{display:inline-block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm{display:block!important}table.visible-sm{display:table}tr.visible-sm{display:table-row!important}th.visible-sm,td.visible-sm{display:table-cell!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-block{display:block!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline{display:inline!important}}@media (min-width:768px) and (max-width:991px){.visible-sm-inline-block{display:inline-block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md{display:block!important}table.visible-md{display:table}tr.visible-md{display:table-row!important}th.visible-md,td.visible-md{display:table-cell!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-block{display:block!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline{display:inline!important}}@media (min-width:992px) and (max-width:1199px){.visible-md-inline-block{display:inline-block!important}}@media (min-width:1200px){.visible-lg{display:block!important}table.visible-lg{display:table}tr.visible-lg{display:table-row!important}th.visible-lg,td.visible-lg{display:table-cell!important}}@media (min-width:1200px){.visible-lg-block{display:block!important}}@media (min-width:1200px){.visible-lg-inline{display:inline!important}}@media (min-width:1200px){.visible-lg-inline-block{display:inline-block!important}}@media (max-width:767px){.hidden-xs{display:none!important}}@media (min-width:768px) and (max-width:991px){.hidden-sm{display:none!important}}@media (min-width:992px) and (max-width:1199px){.hidden-md{display:none!important}}@media (min-width:1200px){.hidden-lg{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:block!important}table.visible-print{display:table}tr.visible-print{display:table-row!important}th.visible-print,td.visible-print{display:table-cell!important}}.visible-print-block{display:none!important}@media print{.visible-print-block{display:block!important}}.visible-print-inline{display:none!important}@media print{.visible-print-inline{display:inline!important}}.visible-print-inline-block{display:none!important}@media print{.visible-print-inline-block{display:inline-block!important}}@media print{.hidden-print{display:none!important}} +diff --git a/doc/examples/demuxing_decoding.c b/doc/examples/demuxing_decoding.c +index b95f7682ac..db5e0cb951 100644 +--- a/doc/examples/demuxing_decoding.c ++++ b/doc/examples/demuxing_decoding.c +@@ -137,9 +137,11 @@ static int decode_packet(AVCodecContext *dec, const AVPacket *pkt) + ret = output_audio_frame(frame); + + av_frame_unref(frame); ++ if (ret < 0) ++ return ret; + } + +- return ret; ++ return 0; + } + + static int open_codec_context(int *stream_idx, +diff --git a/doc/examples/muxing.c b/doc/examples/muxing.c +index c98f749163..42f704c258 100644 +--- a/doc/examples/muxing.c ++++ b/doc/examples/muxing.c +@@ -350,7 +350,8 @@ static int write_audio_frame(AVFormatContext *oc, OutputStream *ost) + if (frame) { + /* convert samples from native format to destination codec format, using the resampler */ + /* compute destination number of samples */ +- dst_nb_samples = swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples; ++ dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples, ++ c->sample_rate, c->sample_rate, AV_ROUND_UP); + av_assert0(dst_nb_samples == frame->nb_samples); + + /* when we pass a frame to the encoder, it may keep a reference to it +diff --git a/doc/examples/vaapi_encode.c b/doc/examples/vaapi_encode.c +index 07b20a7ec4..46bca1b3fe 100644 +--- a/doc/examples/vaapi_encode.c ++++ b/doc/examples/vaapi_encode.c +@@ -91,10 +91,6 @@ static int encode_write(AVCodecContext *avctx, AVFrame *frame, FILE *fout) + enc_pkt->stream_index = 0; + ret = fwrite(enc_pkt->data, enc_pkt->size, 1, fout); + av_packet_unref(enc_pkt); +- if (ret != enc_pkt->size) { +- ret = AVERROR(errno); +- break; +- } + } + + end: +diff --git a/doc/examples/vaapi_transcode.c b/doc/examples/vaapi_transcode.c +index 40a6d58aad..5a1a704a8e 100644 +--- a/doc/examples/vaapi_transcode.c ++++ b/doc/examples/vaapi_transcode.c +@@ -218,8 +218,10 @@ static int dec_enc(AVPacket *pkt, AVCodec *enc_codec) + + fail: + av_frame_free(&frame); ++ if (ret < 0) ++ return ret; + } +- return ret; ++ return 0; + } + + int main(int argc, char **argv) diff --git a/doc/git-howto.texi b/doc/git-howto.texi -index a6723931ce..2b4fb80233 100644 +index a6723931ce..bd26fcb259 100644 --- a/doc/git-howto.texi +++ b/doc/git-howto.texi @@ -53,7 +53,7 @@ Most distribution and operating system provide a package for it. @@ -641,51 +808,252 @@ index a6723931ce..2b4fb80233 100644 @end example This will put the FFmpeg sources into the directory @var{}. -@@ -187,18 +187,11 @@ to make sure you don't have untracked files or deletions. - git add [-i|-p|-A] - @end example +diff --git a/doc/t2h.pm b/doc/t2h.pm +index b7485e1f1e..e83d564a65 100644 +--- a/doc/t2h.pm ++++ b/doc/t2h.pm +@@ -20,45 +20,8 @@ + # License along with FFmpeg; if not, write to the Free Software + # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA --Make sure you have told Git your name, email address and GPG key -+Make sure you have told Git your name and email address +-# Texinfo 7.0 changed the syntax of various functions. +-# Provide a shim for older versions. +-sub ff_set_from_init_file($$) { +- my $key = shift; +- my $value = shift; +- if (exists &{'texinfo_set_from_init_file'}) { +- texinfo_set_from_init_file($key, $value); +- } else { +- set_from_init_file($key, $value); +- } +-} +- +-sub ff_get_conf($) { +- my $key = shift; +- if (exists &{'texinfo_get_conf'}) { +- texinfo_get_conf($key); +- } else { +- get_conf($key); +- } +-} +- +-sub get_formatting_function($$) { +- my $obj = shift; +- my $func = shift; +- +- my $sub = $obj->can('formatting_function'); +- if ($sub) { +- return $obj->formatting_function($func); +- } else { +- return $obj->{$func}; +- } +-} +- +-# determine texinfo version +-my $program_version_num = version->declare(ff_get_conf('PACKAGE_VERSION'))->numify; +-my $program_version_6_8 = $program_version_num >= 6.008000; +- + # no navigation elements +-ff_set_from_init_file('HEADERS', 0); ++set_from_init_file('HEADERS', 0); - @example - git config --global user.name "My Name" - git config --global user.email my@@email.invalid --git config --global user.signingkey ABCDEF0123245 --@end example -- --Enable signing all commits or use -S -- --@example --git config --global commit.gpgsign true - @end example + sub ffmpeg_heading_command($$$$$) + { +@@ -92,7 +55,7 @@ sub ffmpeg_heading_command($$$$$) + $element = $command->{'parent'}; + } + if ($element) { +- $result .= &{get_formatting_function($self, 'format_element_header')}($self, $cmdname, ++ $result .= &{$self->{'format_element_header'}}($self, $cmdname, + $command, $element); + } - Use @option{--global} to set the global configuration for all your Git checkouts. -@@ -400,19 +393,6 @@ git checkout -b svn_23456 $SHA1 - where @var{$SHA1} is the commit hash from the @command{git log} output. +@@ -149,11 +112,7 @@ sub ffmpeg_heading_command($$$$$) + $cmdname + = $Texinfo::Common::level_to_structuring_command{$cmdname}->[$heading_level]; + } +- # format_heading_text expects an array of headings for texinfo >= 7.0 +- if ($program_version_num >= 7.000000) { +- $heading = [$heading]; +- } +- $result .= &{get_formatting_function($self,'format_heading_text')}( ++ $result .= &{$self->{'format_heading_text'}}( + $self, $cmdname, $heading, + $heading_level + + $self->get_conf('CHAPTER_HEADER_LEVEL') - 1, $command); +@@ -168,18 +127,14 @@ foreach my $command (keys(%Texinfo::Common::sectioning_commands), 'node') { + } + # print the TOC where @contents is used +-if ($program_version_6_8) { +- ff_set_from_init_file('CONTENTS_OUTPUT_LOCATION', 'inline'); +-} else { +- ff_set_from_init_file('INLINE_CONTENTS', 1); +-} ++set_from_init_file('INLINE_CONTENTS', 1); --@chapter gpg key generation -- --If you have no gpg key yet, we recommend that you create a ed25519 based key as it --is small, fast and secure. Especially it results in small signatures in git. -- --@example --gpg --default-new-key-algo "ed25519/cert,sign+cv25519/encr" --quick-generate-key "human@@server.com" --@end example -- --When generating a key, make sure the email specified matches the email used in git as some sites like --github consider mismatches a reason to declare such commits unverified. After generating a key you --can add it to the MAINTAINER file and upload it to a keyserver. -- - @chapter Pre-push checklist + # make chapters

+-ff_set_from_init_file('CHAPTER_HEADER_LEVEL', 2); ++set_from_init_file('CHAPTER_HEADER_LEVEL', 2); - Once you have a set of commits that you feel are ready for pushing, + # Do not add
+-ff_set_from_init_file('DEFAULT_RULE', ''); +-ff_set_from_init_file('BIG_RULE', ''); ++set_from_init_file('DEFAULT_RULE', ''); ++set_from_init_file('BIG_RULE', ''); + + # Customized file beginning + sub ffmpeg_begin_file($$$) +@@ -196,18 +151,7 @@ sub ffmpeg_begin_file($$$) + my ($title, $description, $encoding, $date, $css_lines, + $doctype, $bodytext, $copying_comment, $after_body_open, + $extra_head, $program_and_version, $program_homepage, +- $program, $generator); +- if ($program_version_num >= 7.000000) { +- ($title, $description, $encoding, $date, $css_lines, +- $doctype, $bodytext, $copying_comment, $after_body_open, +- $extra_head, $program_and_version, $program_homepage, +- $program, $generator) = $self->_file_header_information($command); +- } else { +- ($title, $description, $encoding, $date, $css_lines, +- $doctype, $bodytext, $copying_comment, $after_body_open, +- $extra_head, $program_and_version, $program_homepage, +- $program, $generator) = $self->_file_header_informations($command); +- } ++ $program, $generator) = $self->_file_header_informations($command); + + my $links = $self->_get_links ($filename, $element); + +@@ -240,11 +184,7 @@ EOT + + return $head1 . $head_title . $head2 . $head_title . $head3; + } +-if ($program_version_6_8) { +- texinfo_register_formatting_function('format_begin_file', \&ffmpeg_begin_file); +-} else { +- texinfo_register_formatting_function('begin_file', \&ffmpeg_begin_file); +-} ++texinfo_register_formatting_function('begin_file', \&ffmpeg_begin_file); + + sub ffmpeg_program_string($) + { +@@ -261,17 +201,13 @@ sub ffmpeg_program_string($) + $self->gdt('This document was generated automatically.')); + } + } +-if ($program_version_6_8) { +- texinfo_register_formatting_function('format_program_string', \&ffmpeg_program_string); +-} else { +- texinfo_register_formatting_function('program_string', \&ffmpeg_program_string); +-} ++texinfo_register_formatting_function('program_string', \&ffmpeg_program_string); + + # Customized file ending + sub ffmpeg_end_file($) + { + my $self = shift; +- my $program_string = &{get_formatting_function($self,'format_program_string')}($self); ++ my $program_string = &{$self->{'format_program_string'}}($self); + my $program_text = < + $program_string +@@ -284,15 +220,11 @@ EOT + EOT + return $program_text . $footer; + } +-if ($program_version_6_8) { +- texinfo_register_formatting_function('format_end_file', \&ffmpeg_end_file); +-} else { +- texinfo_register_formatting_function('end_file', \&ffmpeg_end_file); +-} ++texinfo_register_formatting_function('end_file', \&ffmpeg_end_file); + + # Dummy title command + # Ignore title. Title is handled through ffmpeg_begin_file(). +-ff_set_from_init_file('USE_TITLEPAGE_FOR_TITLE', 1); ++set_from_init_file('USE_TITLEPAGE_FOR_TITLE', 1); + sub ffmpeg_title($$$$) + { + return ''; +@@ -310,14 +242,8 @@ sub ffmpeg_float($$$$$) + my $args = shift; + my $content = shift; + +- my ($caption, $prepended); +- if ($program_version_num >= 7.000000) { +- ($caption, $prepended) = Texinfo::Convert::Converter::float_name_caption($self, +- $command); +- } else { +- ($caption, $prepended) = Texinfo::Common::float_name_caption($self, +- $command); +- } ++ my ($caption, $prepended) = Texinfo::Common::float_name_caption($self, ++ $command); + my $caption_text = ''; + my $prepended_text; + my $prepended_save = ''; +@@ -389,13 +315,8 @@ sub ffmpeg_float($$$$$) + $caption->{'args'}->[0], 'float caption'); + } + if ($prepended_text.$caption_text ne '') { +- if ($program_version_num >= 7.000000) { +- $prepended_text = $self->html_attribute_class('div',['float-caption']). '>' +- . $prepended_text; +- } else { +- $prepended_text = $self->_attribute_class('div','float-caption'). '>' +- . $prepended_text; +- } ++ $prepended_text = $self->_attribute_class('div','float-caption'). '>' ++ . $prepended_text; + $caption_text .= ''; + } + my $html_class = ''; +@@ -408,13 +329,8 @@ sub ffmpeg_float($$$$$) + $prepended_text = ''; + $caption_text = ''; + } +- if ($program_version_num >= 7.000000) { +- return $self->html_attribute_class('div', [$html_class]). '>' . "\n" . +- $prepended_text . $caption_text . $content . ''; +- } else { +- return $self->_attribute_class('div', $html_class). '>' . "\n" . +- $prepended_text . $caption_text . $content . ''; +- } ++ return $self->_attribute_class('div', $html_class). '>' . "\n" . ++ $prepended_text . $caption_text . $content . ''; + } + + texinfo_register_command_formatting('float', +diff --git a/fftools/cmdutils.c b/fftools/cmdutils.c +index a647651b1f..fe424b6a4c 100644 +--- a/fftools/cmdutils.c ++++ b/fftools/cmdutils.c +@@ -537,7 +537,7 @@ static const AVOption *opt_find(void *obj, const char *name, const char *unit, + return o; + } + +-#define FLAGS ((o->type == AV_OPT_TYPE_FLAGS && (arg[0]=='-' || arg[0]=='+')) ? AV_DICT_APPEND : 0) ++#define FLAGS (o->type == AV_OPT_TYPE_FLAGS && (arg[0]=='-' || arg[0]=='+')) ? AV_DICT_APPEND : 0 + int opt_default(void *optctx, const char *opt, const char *arg) + { + const AVOption *o; diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c -index dec012a299..8aa13007f9 100644 +index b5cc3fa023..8aa13007f9 100644 --- a/fftools/ffmpeg.c +++ b/fftools/ffmpeg.c -@@ -2189,8 +2189,8 @@ static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame) +@@ -492,9 +492,8 @@ static int read_key(void) + } + //Read it + if(nchars != 0) { +- if (read(0, &ch, 1) == 1) +- return ch; +- return 0; ++ read(0, &ch, 1); ++ return ch; + }else{ + return -1; + } +@@ -2190,8 +2189,8 @@ static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame) ifilter->channel_layout != frame->channel_layout; break; case AVMEDIA_TYPE_VIDEO: @@ -696,7 +1064,7 @@ index dec012a299..8aa13007f9 100644 break; } -@@ -2201,6 +2201,9 @@ static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame) +@@ -2202,6 +2201,9 @@ static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame) (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data)) need_reinit = 1; @@ -706,7 +1074,7 @@ index dec012a299..8aa13007f9 100644 if (need_reinit) { ret = ifilter_parameters_from_frame(ifilter, frame); if (ret < 0) -@@ -2469,8 +2472,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_ +@@ -2470,8 +2472,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_ decoded_frame->top_field_first = ist->top_field_first; ist->frames_decoded++; @@ -716,7 +1084,7 @@ index dec012a299..8aa13007f9 100644 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame); if (err < 0) goto fail; -@@ -2674,7 +2676,12 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo +@@ -2675,7 +2676,12 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo case AVMEDIA_TYPE_VIDEO: ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt, &decode_failed); @@ -730,7 +1098,7 @@ index dec012a299..8aa13007f9 100644 if (pkt && pkt->duration) { duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q); } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) { -@@ -2898,6 +2905,16 @@ static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat +@@ -2899,6 +2905,16 @@ static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat } else { const HWAccel *hwaccel = NULL; int i; @@ -747,7 +1115,7 @@ index dec012a299..8aa13007f9 100644 for (i = 0; hwaccels[i].name; i++) { if (hwaccels[i].pix_fmt == *p) { hwaccel = &hwaccels[i]; -@@ -2993,6 +3010,15 @@ static int init_input_stream(int ist_index, char *error, int error_len) +@@ -2994,6 +3010,15 @@ static int init_input_stream(int ist_index, char *error, int error_len) return ret; } @@ -870,22 +1238,21 @@ index 41d9e2708e..b5a4066656 100644 } line_end += stride; -diff --git a/libavcodec/8bps.c b/libavcodec/8bps.c -index 6cc9a0c9ae..53e939d35d 100644 ---- a/libavcodec/8bps.c -+++ b/libavcodec/8bps.c -@@ -70,9 +70,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, - unsigned char *planemap = c->planemap; - int ret; - -- if (buf_size < planes * height *2) -- return AVERROR_INVALIDDATA; -- - if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) - return ret; +diff --git a/libavcodec/4xm.c b/libavcodec/4xm.c +index dcb21a4ac2..cbd8637763 100644 +--- a/libavcodec/4xm.c ++++ b/libavcodec/4xm.c +@@ -886,8 +886,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, + } + if (i >= CFRAME_BUFFER_COUNT) { +- if (free_index < 0) +- return AVERROR_INVALIDDATA; + i = free_index; + f->cfrm[i].id = id; + } diff --git a/libavcodec/Makefile b/libavcodec/Makefile -index b3d284d7d0..e93c842047 100644 +index b3d284d7d0..7a3773b9e3 100644 --- a/libavcodec/Makefile +++ b/libavcodec/Makefile @@ -19,6 +19,7 @@ HEADERS = ac3_parser.h \ @@ -896,15 +1263,7 @@ index b3d284d7d0..e93c842047 100644 vaapi.h \ vdpau.h \ version.h \ -@@ -132,6 +133,7 @@ OBJS-$(CONFIG_MPEGVIDEOENC) += mpegvideo_enc.o mpeg12data.o \ - motion_est.o ratecontrol.o \ - mpegvideoencdsp.o - OBJS-$(CONFIG_MSS34DSP) += mss34dsp.o -+OBJS-$(CONFIG_NVENC) += nvenc.o - OBJS-$(CONFIG_PIXBLOCKDSP) += pixblockdsp.o - OBJS-$(CONFIG_QPELDSP) += qpeldsp.o - OBJS-$(CONFIG_QSV) += qsv.o -@@ -139,6 +141,7 @@ OBJS-$(CONFIG_QSVDEC) += qsvdec.o +@@ -139,6 +140,7 @@ OBJS-$(CONFIG_QSVDEC) += qsvdec.o OBJS-$(CONFIG_QSVENC) += qsvenc.o OBJS-$(CONFIG_RANGECODER) += rangecoder.o OBJS-$(CONFIG_RDFT) += rdft.o @@ -912,7 +1271,7 @@ index b3d284d7d0..e93c842047 100644 OBJS-$(CONFIG_RV34DSP) += rv34dsp.o OBJS-$(CONFIG_SHARED) += log2_tab.o reverse.o OBJS-$(CONFIG_SINEWIN) += sinewin.o -@@ -153,7 +156,10 @@ OBJS-$(CONFIG_VIDEODSP) += videodsp.o +@@ -153,7 +155,10 @@ OBJS-$(CONFIG_VIDEODSP) += videodsp.o OBJS-$(CONFIG_VP3DSP) += vp3dsp.o OBJS-$(CONFIG_VP56DSP) += vp56dsp.o OBJS-$(CONFIG_VP8DSP) += vp8dsp.o @@ -924,28 +1283,7 @@ index b3d284d7d0..e93c842047 100644 OBJS-$(CONFIG_WMA_FREQS) += wma_freqs.o OBJS-$(CONFIG_WMV2DSP) += wmv2dsp.o -@@ -374,9 +380,9 @@ OBJS-$(CONFIG_H264_CUVID_DECODER) += cuviddec.o - OBJS-$(CONFIG_H264_MEDIACODEC_DECODER) += mediacodecdec.o - OBJS-$(CONFIG_H264_MF_ENCODER) += mfenc.o mf_utils.o - OBJS-$(CONFIG_H264_MMAL_DECODER) += mmaldec.o --OBJS-$(CONFIG_H264_NVENC_ENCODER) += nvenc.o nvenc_h264.o --OBJS-$(CONFIG_NVENC_ENCODER) += nvenc.o nvenc_h264.o --OBJS-$(CONFIG_NVENC_H264_ENCODER) += nvenc.o nvenc_h264.o -+OBJS-$(CONFIG_H264_NVENC_ENCODER) += nvenc_h264.o -+OBJS-$(CONFIG_NVENC_ENCODER) += nvenc_h264.o -+OBJS-$(CONFIG_NVENC_H264_ENCODER) += nvenc_h264.o - OBJS-$(CONFIG_H264_OMX_ENCODER) += omx.o - OBJS-$(CONFIG_H264_QSV_DECODER) += qsvdec.o - OBJS-$(CONFIG_H264_QSV_ENCODER) += qsvenc_h264.o -@@ -396,12 +402,20 @@ OBJS-$(CONFIG_HEVC_AMF_ENCODER) += amfenc_hevc.o - OBJS-$(CONFIG_HEVC_CUVID_DECODER) += cuviddec.o - OBJS-$(CONFIG_HEVC_MEDIACODEC_DECODER) += mediacodecdec.o - OBJS-$(CONFIG_HEVC_MF_ENCODER) += mfenc.o mf_utils.o --OBJS-$(CONFIG_HEVC_NVENC_ENCODER) += nvenc.o nvenc_hevc.o --OBJS-$(CONFIG_NVENC_HEVC_ENCODER) += nvenc.o nvenc_hevc.o -+OBJS-$(CONFIG_HEVC_NVENC_ENCODER) += nvenc_hevc.o -+OBJS-$(CONFIG_NVENC_HEVC_ENCODER) += nvenc_hevc.o - OBJS-$(CONFIG_HEVC_QSV_DECODER) += qsvdec.o +@@ -402,6 +407,14 @@ OBJS-$(CONFIG_HEVC_QSV_DECODER) += qsvdec.o OBJS-$(CONFIG_HEVC_QSV_ENCODER) += qsvenc_hevc.o hevc_ps_enc.o \ hevc_data.o OBJS-$(CONFIG_HEVC_RKMPP_DECODER) += rkmppdec.o @@ -960,14 +1298,6 @@ index b3d284d7d0..e93c842047 100644 OBJS-$(CONFIG_HEVC_VAAPI_ENCODER) += vaapi_encode_h265.o h265_profile_level.o OBJS-$(CONFIG_HEVC_V4L2M2M_DECODER) += v4l2_m2m_dec.o OBJS-$(CONFIG_HEVC_V4L2M2M_ENCODER) += v4l2_m2m_enc.o -@@ -874,7 +888,6 @@ OBJS-$(CONFIG_ADPCM_G726_ENCODER) += g726.o - OBJS-$(CONFIG_ADPCM_G726LE_DECODER) += g726.o - OBJS-$(CONFIG_ADPCM_G726LE_ENCODER) += g726.o - OBJS-$(CONFIG_ADPCM_IMA_AMV_DECODER) += adpcm.o adpcm_data.o --OBJS-$(CONFIG_ADPCM_IMA_AMV_ENCODER) += adpcmenc.o adpcm_data.o - OBJS-$(CONFIG_ADPCM_IMA_ALP_DECODER) += adpcm.o adpcm_data.o - OBJS-$(CONFIG_ADPCM_IMA_ALP_ENCODER) += adpcmenc.o adpcm_data.o - OBJS-$(CONFIG_ADPCM_IMA_APC_DECODER) += adpcm.o adpcm_data.o @@ -941,6 +954,10 @@ OBJS-$(CONFIG_HEVC_D3D11VA_HWACCEL) += dxva2_hevc.o OBJS-$(CONFIG_HEVC_DXVA2_HWACCEL) += dxva2_hevc.o OBJS-$(CONFIG_HEVC_NVDEC_HWACCEL) += nvdec_hevc.o @@ -2883,54 +3213,6 @@ index 0000000000..9a96c2523c +91: sub w1, w1, #32 + b 90b +endfunc -diff --git a/libavcodec/aasc.c b/libavcodec/aasc.c -index 86cb9e85a1..26570f49e5 100644 ---- a/libavcodec/aasc.c -+++ b/libavcodec/aasc.c -@@ -104,26 +104,26 @@ static int aasc_decode_frame(AVCodecContext *avctx, - ff_msrle_decode(avctx, s->frame, 8, &s->gb); - break; - case MKTAG('A', 'A', 'S', 'C'): -- switch (compr) { -- case 0: -- stride = (avctx->width * psize + psize) & ~psize; -- if (buf_size < stride * avctx->height) -- return AVERROR_INVALIDDATA; -- for (i = avctx->height - 1; i >= 0; i--) { -- memcpy(s->frame->data[0] + i * s->frame->linesize[0], buf, avctx->width * psize); -- buf += stride; -- buf_size -= stride; -- } -- break; -- case 1: -- bytestream2_init(&s->gb, buf, buf_size); -- ff_msrle_decode(avctx, s->frame, 8, &s->gb); -- break; -- default: -- av_log(avctx, AV_LOG_ERROR, "Unknown compression type %d\n", compr); -+ switch (compr) { -+ case 0: -+ stride = (avctx->width * psize + psize) & ~psize; -+ if (buf_size < stride * avctx->height) - return AVERROR_INVALIDDATA; -+ for (i = avctx->height - 1; i >= 0; i--) { -+ memcpy(s->frame->data[0] + i * s->frame->linesize[0], buf, avctx->width * psize); -+ buf += stride; -+ buf_size -= stride; - } - break; -+ case 1: -+ bytestream2_init(&s->gb, buf, buf_size); -+ ff_msrle_decode(avctx, s->frame, 8, &s->gb); -+ break; -+ default: -+ av_log(avctx, AV_LOG_ERROR, "Unknown compression type %d\n", compr); -+ return AVERROR_INVALIDDATA; -+ } -+ break; - default: - av_log(avctx, AV_LOG_ERROR, "Unknown FourCC: %X\n", avctx->codec_tag); - return -1; diff --git a/libavcodec/ac3.h b/libavcodec/ac3.h index 1fb900ecb1..e358f8d9e3 100644 --- a/libavcodec/ac3.h @@ -2951,25 +3233,21 @@ index 1fb900ecb1..e358f8d9e3 100644 typedef float SHORTFLOAT; #endif /* USE_FIXED */ -diff --git a/libavcodec/alacdsp.c b/libavcodec/alacdsp.c -index b3c1c424f3..9996eb4319 100644 ---- a/libavcodec/alacdsp.c -+++ b/libavcodec/alacdsp.c -@@ -29,12 +29,12 @@ static void decorrelate_stereo(int32_t *buffer[2], int nb_samples, - int i; +diff --git a/libavcodec/ac3_parser.c b/libavcodec/ac3_parser.c +index 6cb6038833..ba171653ef 100644 +--- a/libavcodec/ac3_parser.c ++++ b/libavcodec/ac3_parser.c +@@ -179,9 +179,7 @@ int av_ac3_parse_header(const uint8_t *buf, size_t size, + AC3HeaderInfo hdr; + int err; - for (i = 0; i < nb_samples; i++) { -- uint32_t a, b; -+ int32_t a, b; - - a = buffer[0][i]; - b = buffer[1][i]; - -- a -= (int)(b * decorr_left_weight) >> decorr_shift; -+ a -= (b * decorr_left_weight) >> decorr_shift; - b += a; - - buffer[0][i] = b; +- err = init_get_bits8(&gb, buf, size); +- if (err < 0) +- return AVERROR_INVALIDDATA; ++ init_get_bits8(&gb, buf, size); + err = ff_ac3_parse_header(&gb, &hdr); + if (err < 0) + return AVERROR_INVALIDDATA; diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c index 2e9a3581de..d9571b437f 100644 --- a/libavcodec/allcodecs.c @@ -3038,92 +3316,77 @@ index 917e7b6264..e736905a76 100644 // read block type flag and read the samples accordingly diff --git a/libavcodec/apedec.c b/libavcodec/apedec.c -index de5627ad02..4684e40a46 100644 +index ffdd6beaf4..de5627ad02 100644 --- a/libavcodec/apedec.c +++ b/libavcodec/apedec.c -@@ -102,7 +102,7 @@ typedef struct APEFilter { - int16_t *historybuffer; ///< filter memory - int16_t *delay; ///< filtered values - -- uint32_t avg; -+ int avg; - } APEFilter; - - typedef struct APERice { -@@ -930,7 +930,7 @@ static av_always_inline int filter_3800(APEPredictor *p, - p->coeffsB[filter][0] += (((d3 >> 29) & 4) - 2) * sign; - p->coeffsB[filter][1] -= (((d4 >> 30) & 2) - 1) * sign; - -- p->filterB[filter] = p->lastA[filter] + (unsigned)(predictionB >> shift); -+ p->filterB[filter] = p->lastA[filter] + (predictionB >> shift); - p->filterA[filter] = p->filterB[filter] + (unsigned)((int)(p->filterA[filter] * 31U) >> 5); - - return p->filterA[filter]; -@@ -955,7 +955,7 @@ static void long_filter_high_3800(int32_t *buffer, int order, int shift, int len - dotprod += delay[j] * (unsigned)coeffs[j]; - coeffs[j] += ((delay[j] >> 31) | 1) * sign; - } -- buffer[i] -= (unsigned)(dotprod >> shift); -+ buffer[i] -= dotprod >> shift; - for (j = 0; j < order - 1; j++) - delay[j] = delay[j + 1]; - delay[order - 1] = buffer[i]; -@@ -1088,13 +1088,13 @@ static av_always_inline int predictor_update_3930(APEPredictor *p, - const int delayA) +@@ -1166,8 +1166,7 @@ static void predictor_decode_mono_3930(APEContext *ctx, int count) + static av_always_inline int predictor_update_filter(APEPredictor64 *p, + const int decoded, const int filter, + const int delayA, const int delayB, +- const int adaptA, const int adaptB, +- int compression_level) ++ const int adaptA, const int adaptB) { - int32_t predictionA, sign; -- uint32_t d0, d1, d2, d3; -+ int32_t d0, d1, d2, d3; + int64_t predictionA, predictionB; + int32_t sign; +@@ -1195,13 +1194,7 @@ static av_always_inline int predictor_update_filter(APEPredictor64 *p, + p->buf[delayB - 3] * p->coeffsB[filter][3] + + p->buf[delayB - 4] * p->coeffsB[filter][4]; - p->buf[delayA] = p->lastA[filter]; - d0 = p->buf[delayA ]; -- d1 = p->buf[delayA ] - (unsigned)p->buf[delayA - 1]; -- d2 = p->buf[delayA - 1] - (unsigned)p->buf[delayA - 2]; -- d3 = p->buf[delayA - 2] - (unsigned)p->buf[delayA - 3]; -+ d1 = p->buf[delayA ] - p->buf[delayA - 1]; -+ d2 = p->buf[delayA - 1] - p->buf[delayA - 2]; -+ d3 = p->buf[delayA - 2] - p->buf[delayA - 3]; - - predictionA = d0 * p->coeffsA[filter][0] + - d1 * p->coeffsA[filter][1] + -@@ -1105,10 +1105,10 @@ static av_always_inline int predictor_update_3930(APEPredictor *p, - p->filterA[filter] = p->lastA[filter] + ((int)(p->filterA[filter] * 31U) >> 5); +- if (compression_level < COMPRESSION_LEVEL_INSANE) { +- predictionA = (int32_t)predictionA; +- predictionB = (int32_t)predictionB; +- p->lastA[filter] = (int32_t)(decoded + (unsigned)((int32_t)(predictionA + (predictionB >> 1)) >> 10)); +- } else { +- p->lastA[filter] = decoded + ((int64_t)((uint64_t)predictionA + (predictionB >> 1)) >> 10); +- } ++ p->lastA[filter] = decoded + ((int64_t)((uint64_t)predictionA + (predictionB >> 1)) >> 10); + p->filterA[filter] = p->lastA[filter] + ((int64_t)(p->filterA[filter] * 31ULL) >> 5); sign = APESIGN(decoded); -- p->coeffsA[filter][0] += (((int32_t)d0 < 0) * 2 - 1) * sign; -- p->coeffsA[filter][1] += (((int32_t)d1 < 0) * 2 - 1) * sign; -- p->coeffsA[filter][2] += (((int32_t)d2 < 0) * 2 - 1) * sign; -- p->coeffsA[filter][3] += (((int32_t)d3 < 0) * 2 - 1) * sign; -+ p->coeffsA[filter][0] += ((d0 < 0) * 2 - 1) * sign; -+ p->coeffsA[filter][1] += ((d1 < 0) * 2 - 1) * sign; -+ p->coeffsA[filter][2] += ((d2 < 0) * 2 - 1) * sign; -+ p->coeffsA[filter][3] += ((d3 < 0) * 2 - 1) * sign; +@@ -1229,12 +1222,10 @@ static void predictor_decode_stereo_3950(APEContext *ctx, int count) + while (count--) { + /* Predictor Y */ + *decoded0 = predictor_update_filter(p, *decoded0, 0, YDELAYA, YDELAYB, +- YADAPTCOEFFSA, YADAPTCOEFFSB, +- ctx->compression_level); ++ YADAPTCOEFFSA, YADAPTCOEFFSB); + decoded0++; + *decoded1 = predictor_update_filter(p, *decoded1, 1, XDELAYA, XDELAYB, +- XADAPTCOEFFSA, XADAPTCOEFFSB, +- ctx->compression_level); ++ XADAPTCOEFFSA, XADAPTCOEFFSB); + decoded1++; - return p->filterA[filter]; - } -@@ -1587,7 +1587,7 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data, - for (ch = 0; ch < s->channels; ch++) { - sample8 = (uint8_t *)frame->data[ch]; - for (i = 0; i < blockstodecode; i++) -- *sample8++ = (s->decoded[ch][i] + 0x80U) & 0xff; -+ *sample8++ = (s->decoded[ch][i] + 0x80) & 0xff; - } - break; - case 16: -diff --git a/libavcodec/argo.c b/libavcodec/argo.c -index 8f58e682f6..f633ec2691 100644 ---- a/libavcodec/argo.c -+++ b/libavcodec/argo.c -@@ -608,9 +608,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, - uint32_t chunk; - int ret; + /* Combined */ +@@ -1618,24 +1609,13 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data, + s->samples -= blockstodecode; -- if (avpkt->size < 4) -- return AVERROR_INVALIDDATA; + if (avctx->err_recognition & AV_EF_CRCCHECK && +- s->fileversion >= 3900) { ++ s->fileversion >= 3900 && s->bps < 24) { + uint32_t crc = s->CRC_state; + const AVCRC *crc_tab = av_crc_get_table(AV_CRC_32_IEEE_LE); +- int stride = s->bps == 24 ? 4 : (s->bps>>3); +- int offset = s->bps == 24; +- int bytes = s->bps >> 3; - - bytestream2_init(gb, avpkt->data, avpkt->size); + for (i = 0; i < blockstodecode; i++) { + for (ch = 0; ch < s->channels; ch++) { +-#if HAVE_BIGENDIAN +- uint8_t *smp_native = frame->data[ch] + i*stride; +- uint8_t smp[4]; +- for(int j = 0; jdata[ch] + i*stride; +-#endif +- crc = av_crc(crc_tab, crc, smp+offset, bytes); ++ uint8_t *smp = frame->data[ch] + (i*(s->bps >> 3)); ++ crc = av_crc(crc_tab, crc, smp, s->bps >> 3); + } + } - if ((ret = ff_reget_buffer(avctx, frame, 0)) < 0) diff --git a/libavcodec/arm/Makefile b/libavcodec/arm/Makefile index c4ab93aeeb..cd926f7b33 100644 --- a/libavcodec/arm/Makefile @@ -17913,267 +18176,6 @@ index 0000000000..af8c4c03f0 + bx lr + +endfunc -diff --git a/libavcodec/arm/sbcdsp_neon.S b/libavcodec/arm/sbcdsp_neon.S -index 914abfb6cc..d83d21d202 100644 ---- a/libavcodec/arm/sbcdsp_neon.S -+++ b/libavcodec/arm/sbcdsp_neon.S -@@ -38,49 +38,49 @@ function ff_sbc_analyze_4_neon, export=1 - /* TODO: merge even and odd cases (or even merge all four calls to this - * function) in order to have only aligned reads from 'in' array - * and reduce number of load instructions */ -- vld1.16 {d16, d17}, [r0, :64]! -- vld1.16 {d20, d21}, [r2, :128]! -+ vld1.16 {d4, d5}, [r0, :64]! -+ vld1.16 {d8, d9}, [r2, :128]! - -- vmull.s16 q0, d16, d20 -- vld1.16 {d18, d19}, [r0, :64]! -- vmull.s16 q1, d17, d21 -- vld1.16 {d22, d23}, [r2, :128]! -+ vmull.s16 q0, d4, d8 -+ vld1.16 {d6, d7}, [r0, :64]! -+ vmull.s16 q1, d5, d9 -+ vld1.16 {d10, d11}, [r2, :128]! - -- vmlal.s16 q0, d18, d22 -- vld1.16 {d16, d17}, [r0, :64]! -- vmlal.s16 q1, d19, d23 -- vld1.16 {d20, d21}, [r2, :128]! -+ vmlal.s16 q0, d6, d10 -+ vld1.16 {d4, d5}, [r0, :64]! -+ vmlal.s16 q1, d7, d11 -+ vld1.16 {d8, d9}, [r2, :128]! - -- vmlal.s16 q0, d16, d20 -- vld1.16 {d18, d19}, [r0, :64]! -- vmlal.s16 q1, d17, d21 -- vld1.16 {d22, d23}, [r2, :128]! -+ vmlal.s16 q0, d4, d8 -+ vld1.16 {d6, d7}, [r0, :64]! -+ vmlal.s16 q1, d5, d9 -+ vld1.16 {d10, d11}, [r2, :128]! - -- vmlal.s16 q0, d18, d22 -- vld1.16 {d16, d17}, [r0, :64]! -- vmlal.s16 q1, d19, d23 -- vld1.16 {d20, d21}, [r2, :128]! -+ vmlal.s16 q0, d6, d10 -+ vld1.16 {d4, d5}, [r0, :64]! -+ vmlal.s16 q1, d7, d11 -+ vld1.16 {d8, d9}, [r2, :128]! - -- vmlal.s16 q0, d16, d20 -- vmlal.s16 q1, d17, d21 -+ vmlal.s16 q0, d4, d8 -+ vmlal.s16 q1, d5, d9 - - vpadd.s32 d0, d0, d1 - vpadd.s32 d1, d2, d3 - - vrshrn.s32 d0, q0, SBC_PROTO_FIXED_SCALE - -- vld1.16 {d16, d17, d18, d19}, [r2, :128]! -+ vld1.16 {d2, d3, d4, d5}, [r2, :128]! - - vdup.i32 d1, d0[1] /* TODO: can be eliminated */ - vdup.i32 d0, d0[0] /* TODO: can be eliminated */ - -- vmull.s16 q10, d16, d0 -- vmull.s16 q11, d17, d0 -- vmlal.s16 q10, d18, d1 -- vmlal.s16 q11, d19, d1 -+ vmull.s16 q3, d2, d0 -+ vmull.s16 q4, d3, d0 -+ vmlal.s16 q3, d4, d1 -+ vmlal.s16 q4, d5, d1 - -- vpadd.s32 d0, d20, d21 /* TODO: can be eliminated */ -- vpadd.s32 d1, d22, d23 /* TODO: can be eliminated */ -+ vpadd.s32 d0, d6, d7 /* TODO: can be eliminated */ -+ vpadd.s32 d1, d8, d9 /* TODO: can be eliminated */ - - vst1.32 {d0, d1}, [r1, :128] - -@@ -91,57 +91,57 @@ function ff_sbc_analyze_8_neon, export=1 - /* TODO: merge even and odd cases (or even merge all four calls to this - * function) in order to have only aligned reads from 'in' array - * and reduce number of load instructions */ -- vld1.16 {d16, d17}, [r0, :64]! -- vld1.16 {d20, d21}, [r2, :128]! -- -- vmull.s16 q12, d16, d20 -- vld1.16 {d18, d19}, [r0, :64]! -- vmull.s16 q13, d17, d21 -- vld1.16 {d22, d23}, [r2, :128]! -- vmull.s16 q14, d18, d22 -- vld1.16 {d16, d17}, [r0, :64]! -- vmull.s16 q15, d19, d23 -- vld1.16 {d20, d21}, [r2, :128]! -- -- vmlal.s16 q12, d16, d20 -- vld1.16 {d18, d19}, [r0, :64]! -- vmlal.s16 q13, d17, d21 -- vld1.16 {d22, d23}, [r2, :128]! -- vmlal.s16 q14, d18, d22 -- vld1.16 {d16, d17}, [r0, :64]! -- vmlal.s16 q15, d19, d23 -- vld1.16 {d20, d21}, [r2, :128]! -- -- vmlal.s16 q12, d16, d20 -- vld1.16 {d18, d19}, [r0, :64]! -- vmlal.s16 q13, d17, d21 -- vld1.16 {d22, d23}, [r2, :128]! -- vmlal.s16 q14, d18, d22 -- vld1.16 {d16, d17}, [r0, :64]! -- vmlal.s16 q15, d19, d23 -- vld1.16 {d20, d21}, [r2, :128]! -- -- vmlal.s16 q12, d16, d20 -- vld1.16 {d18, d19}, [r0, :64]! -- vmlal.s16 q13, d17, d21 -- vld1.16 {d22, d23}, [r2, :128]! -- vmlal.s16 q14, d18, d22 -- vld1.16 {d16, d17}, [r0, :64]! -- vmlal.s16 q15, d19, d23 -- vld1.16 {d20, d21}, [r2, :128]! -- -- vmlal.s16 q12, d16, d20 -- vld1.16 {d18, d19}, [r0, :64]! -- vmlal.s16 q13, d17, d21 -- vld1.16 {d22, d23}, [r2, :128]! -- -- vmlal.s16 q14, d18, d22 -- vmlal.s16 q15, d19, d23 -- -- vpadd.s32 d0, d24, d25 -- vpadd.s32 d1, d26, d27 -- vpadd.s32 d2, d28, d29 -- vpadd.s32 d3, d30, d31 -+ vld1.16 {d4, d5}, [r0, :64]! -+ vld1.16 {d8, d9}, [r2, :128]! -+ -+ vmull.s16 q6, d4, d8 -+ vld1.16 {d6, d7}, [r0, :64]! -+ vmull.s16 q7, d5, d9 -+ vld1.16 {d10, d11}, [r2, :128]! -+ vmull.s16 q8, d6, d10 -+ vld1.16 {d4, d5}, [r0, :64]! -+ vmull.s16 q9, d7, d11 -+ vld1.16 {d8, d9}, [r2, :128]! -+ -+ vmlal.s16 q6, d4, d8 -+ vld1.16 {d6, d7}, [r0, :64]! -+ vmlal.s16 q7, d5, d9 -+ vld1.16 {d10, d11}, [r2, :128]! -+ vmlal.s16 q8, d6, d10 -+ vld1.16 {d4, d5}, [r0, :64]! -+ vmlal.s16 q9, d7, d11 -+ vld1.16 {d8, d9}, [r2, :128]! -+ -+ vmlal.s16 q6, d4, d8 -+ vld1.16 {d6, d7}, [r0, :64]! -+ vmlal.s16 q7, d5, d9 -+ vld1.16 {d10, d11}, [r2, :128]! -+ vmlal.s16 q8, d6, d10 -+ vld1.16 {d4, d5}, [r0, :64]! -+ vmlal.s16 q9, d7, d11 -+ vld1.16 {d8, d9}, [r2, :128]! -+ -+ vmlal.s16 q6, d4, d8 -+ vld1.16 {d6, d7}, [r0, :64]! -+ vmlal.s16 q7, d5, d9 -+ vld1.16 {d10, d11}, [r2, :128]! -+ vmlal.s16 q8, d6, d10 -+ vld1.16 {d4, d5}, [r0, :64]! -+ vmlal.s16 q9, d7, d11 -+ vld1.16 {d8, d9}, [r2, :128]! -+ -+ vmlal.s16 q6, d4, d8 -+ vld1.16 {d6, d7}, [r0, :64]! -+ vmlal.s16 q7, d5, d9 -+ vld1.16 {d10, d11}, [r2, :128]! -+ -+ vmlal.s16 q8, d6, d10 -+ vmlal.s16 q9, d7, d11 -+ -+ vpadd.s32 d0, d12, d13 -+ vpadd.s32 d1, d14, d15 -+ vpadd.s32 d2, d16, d17 -+ vpadd.s32 d3, d18, d19 - - vrshr.s32 q0, q0, SBC_PROTO_FIXED_SCALE - vrshr.s32 q1, q1, SBC_PROTO_FIXED_SCALE -@@ -153,38 +153,38 @@ function ff_sbc_analyze_8_neon, export=1 - vdup.i32 d1, d0[1] /* TODO: can be eliminated */ - vdup.i32 d0, d0[0] /* TODO: can be eliminated */ - -- vld1.16 {d16, d17}, [r2, :128]! -- vmull.s16 q12, d16, d0 -- vld1.16 {d18, d19}, [r2, :128]! -- vmull.s16 q13, d17, d0 -- vmull.s16 q14, d18, d0 -- vmull.s16 q15, d19, d0 -- -- vld1.16 {d16, d17}, [r2, :128]! -- vmlal.s16 q12, d16, d1 -- vld1.16 {d18, d19}, [r2, :128]! -- vmlal.s16 q13, d17, d1 -- vmlal.s16 q14, d18, d1 -- vmlal.s16 q15, d19, d1 -- -- vld1.16 {d16, d17}, [r2, :128]! -- vmlal.s16 q12, d16, d2 -- vld1.16 {d18, d19}, [r2, :128]! -- vmlal.s16 q13, d17, d2 -- vmlal.s16 q14, d18, d2 -- vmlal.s16 q15, d19, d2 -- -- vld1.16 {d16, d17}, [r2, :128]! -- vmlal.s16 q12, d16, d3 -- vld1.16 {d18, d19}, [r2, :128]! -- vmlal.s16 q13, d17, d3 -- vmlal.s16 q14, d18, d3 -- vmlal.s16 q15, d19, d3 -- -- vpadd.s32 d0, d24, d25 /* TODO: can be eliminated */ -- vpadd.s32 d1, d26, d27 /* TODO: can be eliminated */ -- vpadd.s32 d2, d28, d29 /* TODO: can be eliminated */ -- vpadd.s32 d3, d30, d31 /* TODO: can be eliminated */ -+ vld1.16 {d4, d5}, [r2, :128]! -+ vmull.s16 q6, d4, d0 -+ vld1.16 {d6, d7}, [r2, :128]! -+ vmull.s16 q7, d5, d0 -+ vmull.s16 q8, d6, d0 -+ vmull.s16 q9, d7, d0 -+ -+ vld1.16 {d4, d5}, [r2, :128]! -+ vmlal.s16 q6, d4, d1 -+ vld1.16 {d6, d7}, [r2, :128]! -+ vmlal.s16 q7, d5, d1 -+ vmlal.s16 q8, d6, d1 -+ vmlal.s16 q9, d7, d1 -+ -+ vld1.16 {d4, d5}, [r2, :128]! -+ vmlal.s16 q6, d4, d2 -+ vld1.16 {d6, d7}, [r2, :128]! -+ vmlal.s16 q7, d5, d2 -+ vmlal.s16 q8, d6, d2 -+ vmlal.s16 q9, d7, d2 -+ -+ vld1.16 {d4, d5}, [r2, :128]! -+ vmlal.s16 q6, d4, d3 -+ vld1.16 {d6, d7}, [r2, :128]! -+ vmlal.s16 q7, d5, d3 -+ vmlal.s16 q8, d6, d3 -+ vmlal.s16 q9, d7, d3 -+ -+ vpadd.s32 d0, d12, d13 /* TODO: can be eliminated */ -+ vpadd.s32 d1, d14, d15 /* TODO: can be eliminated */ -+ vpadd.s32 d2, d16, d17 /* TODO: can be eliminated */ -+ vpadd.s32 d3, d18, d19 /* TODO: can be eliminated */ - - vst1.32 {d0, d1, d2, d3}, [r1, :128] - diff --git a/libavcodec/arm/vc1dsp_init_neon.c b/libavcodec/arm/vc1dsp_init_neon.c index 2cca784f5a..48cb816b70 100644 --- a/libavcodec/arm/vc1dsp_init_neon.c @@ -19055,166 +19057,28 @@ index 93f043bf08..96014fbebc 100644 +91: sub r1, r1, #16 + b 90b +endfunc -diff --git a/libavcodec/av1.h b/libavcodec/av1.h -index 951a18ecb2..0f99ae4829 100644 ---- a/libavcodec/av1.h -+++ b/libavcodec/av1.h -@@ -114,13 +114,6 @@ enum { - AV1_WARP_MODEL_TRANSLATION = 1, - AV1_WARP_MODEL_ROTZOOM = 2, - AV1_WARP_MODEL_AFFINE = 3, -- AV1_WARP_PARAM_REDUCE_BITS = 6, -- -- AV1_DIV_LUT_BITS = 8, -- AV1_DIV_LUT_PREC_BITS = 14, -- AV1_DIV_LUT_NUM = 257, -- -- AV1_MAX_LOOP_FILTER = 63, - }; - - diff --git a/libavcodec/av1dec.c b/libavcodec/av1dec.c -index a3301f454f..a75d6744d3 100644 +index d04bc55883..a3301f454f 100644 --- a/libavcodec/av1dec.c +++ b/libavcodec/av1dec.c -@@ -28,34 +28,6 @@ - #include "internal.h" - #include "profiles.h" +@@ -166,7 +166,7 @@ static uint8_t get_shear_params_valid(AV1DecContext *s, int idx) + int16_t alpha, beta, gamma, delta, divf, divs; + int64_t v, w; + int32_t *param = &s->cur_frame.gm_params[idx][0]; +- if (param[2] <= 0) ++ if (param[2] < 0) + return 0; --/**< same with Div_Lut defined in spec 7.11.3.7 */ --static const uint16_t div_lut[AV1_DIV_LUT_NUM] = { -- 16384, 16320, 16257, 16194, 16132, 16070, 16009, 15948, 15888, 15828, 15768, -- 15709, 15650, 15592, 15534, 15477, 15420, 15364, 15308, 15252, 15197, 15142, -- 15087, 15033, 14980, 14926, 14873, 14821, 14769, 14717, 14665, 14614, 14564, -- 14513, 14463, 14413, 14364, 14315, 14266, 14218, 14170, 14122, 14075, 14028, -- 13981, 13935, 13888, 13843, 13797, 13752, 13707, 13662, 13618, 13574, 13530, -- 13487, 13443, 13400, 13358, 13315, 13273, 13231, 13190, 13148, 13107, 13066, -- 13026, 12985, 12945, 12906, 12866, 12827, 12788, 12749, 12710, 12672, 12633, -- 12596, 12558, 12520, 12483, 12446, 12409, 12373, 12336, 12300, 12264, 12228, -- 12193, 12157, 12122, 12087, 12053, 12018, 11984, 11950, 11916, 11882, 11848, -- 11815, 11782, 11749, 11716, 11683, 11651, 11619, 11586, 11555, 11523, 11491, -- 11460, 11429, 11398, 11367, 11336, 11305, 11275, 11245, 11215, 11185, 11155, -- 11125, 11096, 11067, 11038, 11009, 10980, 10951, 10923, 10894, 10866, 10838, -- 10810, 10782, 10755, 10727, 10700, 10673, 10645, 10618, 10592, 10565, 10538, -- 10512, 10486, 10460, 10434, 10408, 10382, 10356, 10331, 10305, 10280, 10255, -- 10230, 10205, 10180, 10156, 10131, 10107, 10082, 10058, 10034, 10010, 9986, -- 9963, 9939, 9916, 9892, 9869, 9846, 9823, 9800, 9777, 9754, 9732, -- 9709, 9687, 9664, 9642, 9620, 9598, 9576, 9554, 9533, 9511, 9489, -- 9468, 9447, 9425, 9404, 9383, 9362, 9341, 9321, 9300, 9279, 9259, -- 9239, 9218, 9198, 9178, 9158, 9138, 9118, 9098, 9079, 9059, 9039, -- 9020, 9001, 8981, 8962, 8943, 8924, 8905, 8886, 8867, 8849, 8830, -- 8812, 8793, 8775, 8756, 8738, 8720, 8702, 8684, 8666, 8648, 8630, -- 8613, 8595, 8577, 8560, 8542, 8525, 8508, 8490, 8473, 8456, 8439, -- 8422, 8405, 8389, 8372, 8355, 8339, 8322, 8306, 8289, 8273, 8257, -- 8240, 8224, 8208, 8192 --}; -- - static uint32_t inverse_recenter(int r, uint32_t v) - { - if (v > 2 * r) -@@ -125,70 +97,6 @@ static void read_global_param(AV1DecContext *s, int type, int ref, int idx) - -mx, mx + 1, r) << prec_diff) + round; - } - --static uint64_t round_two(uint64_t x, uint16_t n) --{ -- if (n == 0) -- return x; -- return ((x + ((uint64_t)1 << (n - 1))) >> n); --} -- --static int64_t round_two_signed(int64_t x, uint16_t n) --{ -- return ((x<0) ? -((int64_t)round_two(-x, n)) : (int64_t)round_two(x, n)); --} -- --/** -- * Resolve divisor process. -- * see spec 7.11.3.7 -- */ --static int16_t resolve_divisor(uint32_t d, uint16_t *shift) --{ -- int32_t e, f; -- -- *shift = av_log2(d); -- e = d - (1 << (*shift)); -- if (*shift > AV1_DIV_LUT_BITS) -- f = round_two(e, *shift - AV1_DIV_LUT_BITS); -- else -- f = e << (AV1_DIV_LUT_BITS - (*shift)); -- -- *shift += AV1_DIV_LUT_PREC_BITS; -- -- return div_lut[f]; --} -- --/** -- * check if global motion params is valid. -- * see spec 7.11.3.6 -- */ --static uint8_t get_shear_params_valid(AV1DecContext *s, int idx) --{ -- int16_t alpha, beta, gamma, delta, divf, divs; -- int64_t v, w; -- int32_t *param = &s->cur_frame.gm_params[idx][0]; -- if (param[2] < 0) -- return 0; -- -- alpha = av_clip_int16(param[2] - (1 << AV1_WARPEDMODEL_PREC_BITS)); -- beta = av_clip_int16(param[3]); -- divf = resolve_divisor(abs(param[2]), &divs); -- v = (int64_t)param[4] * (1 << AV1_WARPEDMODEL_PREC_BITS); -- w = (int64_t)param[3] * param[4]; -- gamma = av_clip_int16((int)round_two_signed((v * divf), divs)); -- delta = av_clip_int16(param[5] - (int)round_two_signed((w * divf), divs) - (1 << AV1_WARPEDMODEL_PREC_BITS)); -- -- alpha = round_two_signed(alpha, AV1_WARP_PARAM_REDUCE_BITS) << AV1_WARP_PARAM_REDUCE_BITS; -- beta = round_two_signed(beta, AV1_WARP_PARAM_REDUCE_BITS) << AV1_WARP_PARAM_REDUCE_BITS; -- gamma = round_two_signed(gamma, AV1_WARP_PARAM_REDUCE_BITS) << AV1_WARP_PARAM_REDUCE_BITS; -- delta = round_two_signed(delta, AV1_WARP_PARAM_REDUCE_BITS) << AV1_WARP_PARAM_REDUCE_BITS; -- -- if ((4 * abs(alpha) + 7 * abs(beta)) >= (1 << AV1_WARPEDMODEL_PREC_BITS) || -- (4 * abs(gamma) + 4 * abs(delta)) >= (1 << AV1_WARPEDMODEL_PREC_BITS)) -- return 0; -- -- return 1; --} -- - /** - * update gm type/params, since cbs already implemented part of this funcation, - * so we don't need to full implement spec. -@@ -236,9 +144,6 @@ static void global_motion_params(AV1DecContext *s) - read_global_param(s, type, ref, 0); - read_global_param(s, type, ref, 1); - } -- if (type <= AV1_WARP_MODEL_AFFINE) { -- s->cur_frame.gm_invalid[ref] = !get_shear_params_valid(s, ref); -- } - } - } - -@@ -604,9 +509,6 @@ static int av1_frame_ref(AVCodecContext *avctx, AV1Frame *dst, const AV1Frame *s - - dst->spatial_id = src->spatial_id; - dst->temporal_id = src->temporal_id; -- memcpy(dst->gm_invalid, -- src->gm_invalid, -- AV1_NUM_REF_FRAMES * sizeof(uint8_t)); - memcpy(dst->gm_type, - src->gm_type, - AV1_NUM_REF_FRAMES * sizeof(uint8_t)); -diff --git a/libavcodec/av1dec.h b/libavcodec/av1dec.h -index 4e140588b9..248a68750f 100644 ---- a/libavcodec/av1dec.h -+++ b/libavcodec/av1dec.h -@@ -42,7 +42,6 @@ typedef struct AV1Frame { - int temporal_id; - int spatial_id; - -- uint8_t gm_invalid[AV1_NUM_REF_FRAMES]; - uint8_t gm_type[AV1_NUM_REF_FRAMES]; - int32_t gm_params[AV1_NUM_REF_FRAMES][6]; + alpha = av_clip_int16(param[2] - (1 << AV1_WARPEDMODEL_PREC_BITS)); +@@ -661,7 +661,7 @@ static int set_context_with_sequence(AVCodecContext *avctx, + avctx->color_range = + seq->color_config.color_range ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG; + avctx->color_primaries = seq->color_config.color_primaries; +- avctx->colorspace = seq->color_config.matrix_coefficients; ++ avctx->colorspace = seq->color_config.color_primaries; + avctx->color_trc = seq->color_config.transfer_characteristics; + switch (seq->color_config.chroma_sample_position) { diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h index 8a71c04230..53644506e5 100644 --- a/libavcodec/avcodec.h @@ -19238,7 +19102,7 @@ index 8a71c04230..53644506e5 100644 /** diff --git a/libavcodec/bink.c b/libavcodec/bink.c -index f04017d4b4..5efd24e9c3 100644 +index f04017d4b4..c7d76d1d14 100644 --- a/libavcodec/bink.c +++ b/libavcodec/bink.c @@ -869,7 +869,7 @@ static int binkb_decode_plane(BinkContext *c, AVFrame *frame, GetBitContext *gb, @@ -19277,15 +19141,6 @@ index f04017d4b4..5efd24e9c3 100644 av_log(c->avctx, AV_LOG_WARNING, "Reference block is out of bounds\n"); } else if (ref + 8*stride < dst || ref >= dst + 8*stride) { c->put_pixels_tab(dst, ref, stride, 8); -@@ -1086,7 +1086,7 @@ static int bink_decode_plane(BinkContext *c, AVFrame *frame, GetBitContext *gb, - for (bx = 0; bx < bw; bx++, dst += 8, prev += 8) { - blk = get_value(c, BINK_SRC_BLOCK_TYPES); - // 16x16 block type on odd line means part of the already decoded block, so skip it -- if (((by & 1) || (bx & 1)) && blk == SCALED_BLOCK) { -+ if ((by & 1) && blk == SCALED_BLOCK) { - bx++; - dst += 8; - prev += 8; diff --git a/libavcodec/cabac.h b/libavcodec/cabac.h index 38d06b2842..bbf5d70560 100644 --- a/libavcodec/cabac.h @@ -19301,54 +19156,106 @@ index 38d06b2842..bbf5d70560 100644 }CABACContext; int ff_init_cabac_decoder(CABACContext *c, const uint8_t *buf, int buf_size); -diff --git a/libavcodec/cbs_av1_syntax_template.c b/libavcodec/cbs_av1_syntax_template.c -index d98d3d42de..6fe6e9a4f3 100644 ---- a/libavcodec/cbs_av1_syntax_template.c -+++ b/libavcodec/cbs_av1_syntax_template.c -@@ -355,7 +355,7 @@ static int FUNC(set_frame_refs)(CodedBitstreamContext *ctx, RWContext *rw, - AV1_REF_FRAME_ALTREF2, AV1_REF_FRAME_ALTREF - }; - int8_t ref_frame_idx[AV1_REFS_PER_FRAME], used_frame[AV1_NUM_REF_FRAMES]; -- int16_t shifted_order_hints[AV1_NUM_REF_FRAMES]; -+ int8_t shifted_order_hints[AV1_NUM_REF_FRAMES]; - int cur_frame_hint, latest_order_hint, earliest_order_hint, ref; - int i, j; +diff --git a/libavcodec/cbs_av1.c b/libavcodec/cbs_av1.c +index 615132f961..302e1f38f5 100644 +--- a/libavcodec/cbs_av1.c ++++ b/libavcodec/cbs_av1.c +@@ -37,7 +37,7 @@ static int cbs_av1_read_uvlc(CodedBitstreamContext *ctx, GetBitContext *gbc, + position = get_bits_count(gbc); -diff --git a/libavcodec/cdgraphics.c b/libavcodec/cdgraphics.c -index b452baa7d8..263459d0f2 100644 ---- a/libavcodec/cdgraphics.c -+++ b/libavcodec/cdgraphics.c -@@ -239,7 +239,7 @@ static void cdg_scroll(CDGraphicsContext *cc, uint8_t *data, - for (y = FFMAX(0, vinc); y < FFMIN(CDG_FULL_HEIGHT + vinc, CDG_FULL_HEIGHT); y++) - memcpy(out + FFMAX(0, hinc) + stride * y, - in + FFMAX(0, hinc) - hinc + (y - vinc) * stride, -- FFABS(stride) - FFABS(hinc)); -+ FFMIN(stride + hinc, stride)); + zeroes = 0; +- while (zeroes < 32) { ++ while (1) { + if (get_bits_left(gbc) < 1) { + av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid uvlc code at " + "%s: bitstream ended.\n", name); +@@ -50,18 +50,7 @@ static int cbs_av1_read_uvlc(CodedBitstreamContext *ctx, GetBitContext *gbc, + } - if (vinc > 0) - cdg_fill_wrapper(0, 0, out, -diff --git a/libavcodec/cfhd.c b/libavcodec/cfhd.c -index b61d1e7222..6f13207cc1 100644 ---- a/libavcodec/cfhd.c -+++ b/libavcodec/cfhd.c -@@ -838,7 +838,7 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame, - const uint16_t q = s->quantisation; + if (zeroes >= 32) { +- // The spec allows at least thirty-two zero bits followed by a +- // one to mean 2^32-1, with no constraint on the number of +- // zeroes. The libaom reference decoder does not match this, +- // instead reading thirty-two zeroes but not the following one +- // to mean 2^32-1. These two interpretations are incompatible +- // and other implementations may follow one or the other. +- // Therefore we reject thirty-two zeroes because the intended +- // behaviour is not clear. +- av_log(ctx->log_ctx, AV_LOG_ERROR, "Thirty-two zero bits in " +- "%s uvlc code: considered invalid due to conflicting " +- "standard and reference decoder behaviour.\n", name); +- return AVERROR_INVALIDDATA; ++ value = MAX_UINT_BITS(32); + } else { + if (get_bits_left(gbc) < zeroes) { + av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid uvlc code at " +@@ -390,7 +379,7 @@ static int cbs_av1_write_increment(CodedBitstreamContext *ctx, PutBitContext *pb + } - for (i = 0; i < run; i++) { -- *coeff_data |= coeff * 256U; -+ *coeff_data |= coeff * 256; - *coeff_data++ *= q; - } - } else { -@@ -869,7 +869,7 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame, - const uint16_t q = s->quantisation; + if (len > 0) +- put_bits(pbc, len, (1U << len) - 1 - (value != range_max)); ++ put_bits(pbc, len, (1 << len) - 1 - (value != range_max)); + + return 0; + } +diff --git a/libavcodec/cbs_jpeg.c b/libavcodec/cbs_jpeg.c +index dc5a1cc4d5..7d3e10fcc8 100644 +--- a/libavcodec/cbs_jpeg.c ++++ b/libavcodec/cbs_jpeg.c +@@ -166,13 +166,13 @@ static int cbs_jpeg_split_fragment(CodedBitstreamContext *ctx, + } + } else { + i = start; +- if (i > frag->data_size - 2) { ++ if (i + 2 > frag->data_size) { + av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid JPEG image: " + "truncated at %02x marker.\n", marker); + return AVERROR_INVALIDDATA; + } + length = AV_RB16(frag->data + i); +- if (length > frag->data_size - i) { ++ if (i + length > frag->data_size) { + av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid JPEG image: " + "truncated at %02x marker segment.\n", marker); + return AVERROR_INVALIDDATA; +diff --git a/libavcodec/celp_math.h b/libavcodec/celp_math.h +index 99a0470719..18888a425d 100644 +--- a/libavcodec/celp_math.h ++++ b/libavcodec/celp_math.h +@@ -78,7 +78,7 @@ int64_t ff_dot_product(const int16_t *a, const int16_t *b, int length); + * + * @return value << offset, if offset>=0; value >> -offset - otherwise + */ +-static inline unsigned bidir_sal(unsigned value, int offset) ++static inline int bidir_sal(int value, int offset) + { + if(offset < 0) return value >> -offset; + else return value << offset; +diff --git a/libavcodec/cfhdenc.c b/libavcodec/cfhdenc.c +index f73bb7154f..42bbf99c96 100644 +--- a/libavcodec/cfhdenc.c ++++ b/libavcodec/cfhdenc.c +@@ -258,11 +258,6 @@ static av_cold int cfhd_encode_init(AVCodecContext *avctx) + if (ret < 0) + return ret; + +- if (avctx->height < 32) { +- av_log(avctx, AV_LOG_ERROR, "Height must be >= 32.\n"); +- return AVERROR_INVALIDDATA; +- } +- + if (avctx->width & 15) { + av_log(avctx, AV_LOG_ERROR, "Width must be multiple of 16.\n"); + return AVERROR_INVALIDDATA; +@@ -552,7 +547,7 @@ static int cfhd_encode_frame(AVCodecContext *avctx, AVPacket *pkt, + width, height * 2); + } + +- ret = ff_alloc_packet2(avctx, pkt, 256LL + s->planes * (4LL * avctx->width * (avctx->height + 15) + 2048LL), 0); ++ ret = ff_alloc_packet2(avctx, pkt, 64LL + s->planes * (2LL * avctx->width * avctx->height + 1000LL), 0); + if (ret < 0) + return ret; - for (i = 0; i < run; i++) { -- *coeff_data |= coeff * 256U; -+ *coeff_data |= coeff * 256; - *coeff_data++ *= q; - } - } else { diff --git a/libavcodec/codec.h b/libavcodec/codec.h index 50a22f6e3c..5acf572ef4 100644 --- a/libavcodec/codec.h @@ -19371,90 +19278,70 @@ index 50a22f6e3c..5acf572ef4 100644 /** * Find a registered decoder with the specified name. * -diff --git a/libavcodec/diracdec.c b/libavcodec/diracdec.c -index cf7fc2c56c..b9999cde01 100644 ---- a/libavcodec/diracdec.c -+++ b/libavcodec/diracdec.c -@@ -1432,8 +1432,8 @@ static void global_mv(DiracContext *s, DiracBlock *block, int x, int y, int ref) - int *c = s->globalmc[ref].perspective; +diff --git a/libavcodec/cri.c b/libavcodec/cri.c +index d2d80b6f1c..0558d0c8dd 100644 +--- a/libavcodec/cri.c ++++ b/libavcodec/cri.c +@@ -236,14 +236,10 @@ static int cri_decode_frame(AVCodecContext *avctx, void *data, + s->data_size = length; + goto skip; + case 105: +- if (length <= 0) +- return AVERROR_INVALIDDATA; + hflip = bytestream2_get_byte(gb) != 0; + length--; + goto skip; + case 106: +- if (length <= 0) +- return AVERROR_INVALIDDATA; + vflip = bytestream2_get_byte(gb) != 0; + length--; + goto skip; +diff --git a/libavcodec/cscd.c b/libavcodec/cscd.c +index 9230f9edf3..f5c93e9912 100644 +--- a/libavcodec/cscd.c ++++ b/libavcodec/cscd.c +@@ -71,9 +71,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, + int buf_size = avpkt->size; + CamStudioContext *c = avctx->priv_data; + int ret; +- int bpp = avctx->bits_per_coded_sample / 8; +- int bugdelta = FFALIGN(avctx->width * bpp, 4) * avctx->height +- - (avctx->width & ~3) * bpp * avctx->height; - int64_t m = (1<u.mv[ref][0] = (mx + (1<<(ez+ep))) >> (ez+ep); - block->u.mv[ref][1] = (my + (1<<(ez+ep))) >> (ez+ep); -diff --git a/libavcodec/dnxhdenc.c b/libavcodec/dnxhdenc.c -index 31ae147433..2461c51727 100644 ---- a/libavcodec/dnxhdenc.c -+++ b/libavcodec/dnxhdenc.c -@@ -1353,7 +1353,7 @@ static av_cold int dnxhd_encode_end(AVCodecContext *avctx) - av_freep(&ctx->qmatrix_c16); - av_freep(&ctx->qmatrix_l16); - -- if (ctx->thread[1]) { -+ if (avctx->active_thread_type == FF_THREAD_SLICE) { - for (i = 1; i < avctx->thread_count; i++) - av_freep(&ctx->thread[i]); - } -diff --git a/libavcodec/dstdec.c b/libavcodec/dstdec.c -index 819a037c69..84d19b91aa 100644 ---- a/libavcodec/dstdec.c -+++ b/libavcodec/dstdec.c -@@ -215,7 +215,7 @@ static uint8_t prob_dst_x_bit(int c) - return (ff_reverse[c & 127] >> 1) + 1; - } - --static int build_filter(int16_t table[DST_MAX_ELEMENTS][16][256], const Table *fsets) -+static void build_filter(int16_t table[DST_MAX_ELEMENTS][16][256], const Table *fsets) - { - int i, j, k, l; - -@@ -226,17 +226,14 @@ static int build_filter(int16_t table[DST_MAX_ELEMENTS][16][256], const Table *f - int total = av_clip(length - j * 8, 0, 8); - - for (k = 0; k < 256; k++) { -- int64_t v = 0; -+ int v = 0; - - for (l = 0; l < total; l++) - v += (((k >> l) & 1) * 2 - 1) * fsets->coeff[i][j * 8 + l]; -- if ((int16_t)v != v) -- return AVERROR_INVALIDDATA; - table[i][j][k] = v; - } + if (buf_size < 2) { + av_log(avctx, AV_LOG_ERROR, "coded frame too small\n"); +@@ -87,7 +84,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, + switch ((buf[0] >> 1) & 7) { + case 0: { // lzo compression + int outlen = c->decomp_size, inlen = buf_size - 2; +- if (av_lzo1x_decode(c->decomp_buf, &outlen, &buf[2], &inlen) || (outlen && outlen != bugdelta)) { ++ if (av_lzo1x_decode(c->decomp_buf, &outlen, &buf[2], &inlen) || outlen) { + av_log(avctx, AV_LOG_ERROR, "error during lzo decompression\n"); + return AVERROR_INVALIDDATA; } - } -- return 0; - } +@@ -96,7 +93,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, + case 1: { // zlib compression + #if CONFIG_ZLIB + unsigned long dlen = c->decomp_size; +- if (uncompress(c->decomp_buf, &dlen, &buf[2], buf_size - 2) != Z_OK || (dlen != c->decomp_size && dlen != c->decomp_size - bugdelta)) { ++ if (uncompress(c->decomp_buf, &dlen, &buf[2], buf_size - 2) != Z_OK || dlen != c->decomp_size) { + av_log(avctx, AV_LOG_ERROR, "error during zlib decompression\n"); + return AVERROR_INVALIDDATA; + } +diff --git a/libavcodec/dxv.c b/libavcodec/dxv.c +index 69263cd30d..71d85208d8 100644 +--- a/libavcodec/dxv.c ++++ b/libavcodec/dxv.c +@@ -440,7 +440,7 @@ static int get_opcodes(GetByteContext *gb, uint32_t *table, uint8_t *dst, int op - static int decode_frame(AVCodecContext *avctx, void *data, -@@ -332,9 +329,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, + size_in_bits = bytestream2_get_le32(gb); + endoffset = ((size_in_bits + 7) >> 3) - 4; +- if ((int)endoffset <= 0 || bytestream2_get_bytes_left(gb) < endoffset) ++ if (endoffset <= 0 || bytestream2_get_bytes_left(gb) < endoffset) return AVERROR_INVALIDDATA; - ac_init(ac, gb); -- ret = build_filter(s->filter, &s->fsets); -- if (ret < 0) -- return ret; -+ build_filter(s->filter, &s->fsets); - - memset(s->status, 0xAA, sizeof(s->status)); - memset(dsd, 0, frame->nb_samples * 4 * channels); -diff --git a/libavcodec/dxva2_av1.c b/libavcodec/dxva2_av1.c -index 8a912bf6c1..c30b57799c 100644 ---- a/libavcodec/dxva2_av1.c -+++ b/libavcodec/dxva2_av1.c -@@ -139,7 +139,7 @@ static int fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *c - pp->frame_refs[i].Index = ref_frame->buf[0] ? ref_idx : 0xFF; - - /* Global Motion */ -- pp->frame_refs[i].wminvalid = h->cur_frame.gm_invalid[AV1_REF_FRAME_LAST + i]; -+ pp->frame_refs[i].wminvalid = (h->cur_frame.gm_type[AV1_REF_FRAME_LAST + i] == AV1_WARP_MODEL_IDENTITY); - pp->frame_refs[i].wmtype = h->cur_frame.gm_type[AV1_REF_FRAME_LAST + i]; - for (j = 0; j < 6; ++j) { - pp->frame_refs[i].wmmat[j] = h->cur_frame.gm_params[AV1_REF_FRAME_LAST + i][j]; + offset = endoffset; diff --git a/libavcodec/eac3dec.c b/libavcodec/eac3dec.c index 33b9c88bb2..3a5c7989b9 100644 --- a/libavcodec/eac3dec.c @@ -19526,7 +19413,7 @@ index 85f40a5c54..197ba6fc6e 100644 bytestream2_skip(&s->gb, mode); } else { diff --git a/libavcodec/escape124.c b/libavcodec/escape124.c -index 58278ecaa7..94c2a961e6 100644 +index 388fbaef47..94c2a961e6 100644 --- a/libavcodec/escape124.c +++ b/libavcodec/escape124.c @@ -88,6 +88,11 @@ static CodeBook unpack_codebook(GetBitContext* gb, unsigned depth, @@ -19541,6 +19428,15 @@ index 58278ecaa7..94c2a961e6 100644 cb.blocks = av_malloc(size ? size * sizeof(MacroBlock) : 1); if (!cb.blocks) return cb; +@@ -157,7 +162,7 @@ static MacroBlock decode_macroblock(Escape124Context* s, GetBitContext* gb, + + // This condition can occur with invalid bitstreams and + // *codebook_index == 2 +- if (block_index >= s->codebooks[*codebook_index].size || !s->codebooks[*codebook_index].blocks) ++ if (block_index >= s->codebooks[*codebook_index].size) + return (MacroBlock) { { 0 } }; + + return s->codebooks[*codebook_index].blocks[block_index]; @@ -221,7 +226,7 @@ static int escape124_decode_frame(AVCodecContext *avctx, // represent a lower bound of the space needed for skipped superblocks. Non // skipped SBs need more space. @@ -19550,6 +19446,15 @@ index 58278ecaa7..94c2a961e6 100644 frame_flags = get_bits_long(&gb, 32); frame_size = get_bits_long(&gb, 32); +@@ -238,7 +243,7 @@ static int escape124_decode_frame(AVCodecContext *avctx, + if ((ret = av_frame_ref(frame, s->frame)) < 0) + return ret; + +- return 0; ++ return frame_size; + } + + for (i = 0; i < 3; i++) { @@ -272,14 +277,9 @@ static int escape124_decode_frame(AVCodecContext *avctx, } @@ -19566,61 +19471,71 @@ index 58278ecaa7..94c2a961e6 100644 } } +@@ -372,7 +372,7 @@ static int escape124_decode_frame(AVCodecContext *avctx, + + *got_frame = 1; + +- return 0; ++ return frame_size; + } + + diff --git a/libavcodec/exr.c b/libavcodec/exr.c -index e3effad2e7..49ba7fd6de 100644 +index e3585e85fa..e3effad2e7 100644 --- a/libavcodec/exr.c +++ b/libavcodec/exr.c -@@ -1240,8 +1240,7 @@ static int decode_block(AVCodecContext *avctx, void *tdata, - td->ysize = FFMIN(s->tile_attr.ySize, s->ydelta - tile_y * s->tile_attr.ySize); - td->xsize = FFMIN(s->tile_attr.xSize, s->xdelta - tile_x * s->tile_attr.xSize); +@@ -333,10 +333,7 @@ static int huf_unpack_enc_table(GetByteContext *gb, + return ret; -- if (td->xsize * (uint64_t)s->current_channel_offset > INT_MAX || -- av_image_check_size2(td->xsize, td->ysize, s->avctx->max_pixels, AV_PIX_FMT_NONE, 0, s->avctx) < 0) -+ if (td->xsize * (uint64_t)s->current_channel_offset > INT_MAX) - return AVERROR_INVALIDDATA; + for (; im <= iM; im++) { +- int l; +- if (get_bits_left(&gbit) < 6) +- return AVERROR_INVALIDDATA; +- l = freq[im] = get_bits(&gbit, 6); ++ uint64_t l = freq[im] = get_bits(&gbit, 6); - td->channel_line_size = td->xsize * s->current_channel_offset;/* uncompress size of one line */ -@@ -1265,8 +1264,7 @@ static int decode_block(AVCodecContext *avctx, void *tdata, - td->ysize = FFMIN(s->scan_lines_per_block, s->ymax - line + 1); /* s->ydelta - line ?? */ - td->xsize = s->xdelta; + if (l == LONG_ZEROCODE_RUN) { + int zerun = get_bits(&gbit, 8) + SHORTEST_LONG_RUN; +@@ -1942,27 +1939,21 @@ static int decode_header(EXRContext *s, AVFrame *frame) -- if (td->xsize * (uint64_t)s->current_channel_offset > INT_MAX || -- av_image_check_size2(td->xsize, td->ysize, s->avctx->max_pixels, AV_PIX_FMT_NONE, 0, s->avctx) < 0) -+ if (td->xsize * (uint64_t)s->current_channel_offset > INT_MAX) - return AVERROR_INVALIDDATA; - - td->channel_line_size = td->xsize * s->current_channel_offset;/* uncompress size of one line */ -@@ -1831,8 +1829,8 @@ static int decode_header(EXRContext *s, AVFrame *frame) - dx = bytestream2_get_le32(gb); - dy = bytestream2_get_le32(gb); - -- s->w = (unsigned)dx - sx + 1; -- s->h = (unsigned)dy - sy + 1; -+ s->w = dx - sx + 1; -+ s->h = dy - sy + 1; + bytestream2_get_buffer(gb, key, FFMIN(sizeof(key) - 1, var_size)); + if (strncmp("scanlineimage", key, var_size) && +- strncmp("tiledimage", key, var_size)) { +- ret = AVERROR_PATCHWELCOME; +- goto fail; +- } ++ strncmp("tiledimage", key, var_size)) ++ return AVERROR_PATCHWELCOME; continue; - } else if ((var_size = check_header_variable(s, "lineOrder", -@@ -1947,12 +1945,9 @@ static int decode_header(EXRContext *s, AVFrame *frame) + } else if ((var_size = check_header_variable(s, "preview", "preview", 16)) >= 0) { uint32_t pw = bytestream2_get_le32(gb); uint32_t ph = bytestream2_get_le32(gb); -- uint64_t psize = pw * ph; -- if (psize > INT64_MAX / 4) -- return AVERROR_INVALIDDATA; -- psize *= 4; -+ int64_t psize = 4LL * pw * ph; +- uint64_t psize = pw * (uint64_t)ph; +- if (psize > INT64_MAX / 4) { +- ret = AVERROR_INVALIDDATA; +- goto fail; +- } ++ uint64_t psize = pw * ph; ++ if (psize > INT64_MAX / 4) ++ return AVERROR_INVALIDDATA; + psize *= 4; -- if ((int64_t)psize >= bytestream2_get_bytes_left(gb)) -+ if (psize >= bytestream2_get_bytes_left(gb)) - return AVERROR_INVALIDDATA; +- if ((int64_t)psize >= bytestream2_get_bytes_left(gb)) { +- ret = AVERROR_INVALIDDATA; +- goto fail; +- } ++ if ((int64_t)psize >= bytestream2_get_bytes_left(gb)) ++ return AVERROR_INVALIDDATA; bytestream2_skip(gb, psize); + diff --git a/libavcodec/ffv1dec.c b/libavcodec/ffv1dec.c -index 82a9c20853..8516fef5d7 100644 +index 82a9c20853..5a365a5e31 100644 --- a/libavcodec/ffv1dec.c +++ b/libavcodec/ffv1dec.c -@@ -166,34 +166,24 @@ static int decode_slice_header(FFV1Context *f, FFV1Context *fs) +@@ -166,31 +166,24 @@ static int decode_slice_header(FFV1Context *f, FFV1Context *fs) RangeCoder *c = &fs->c; uint8_t state[CONTEXT_SIZE]; unsigned ps, i, context_count; @@ -19649,9 +19564,6 @@ index 82a9c20853..8516fef5d7 100644 - (unsigned)fs->slice_height <= f->height); - av_assert0 ( (unsigned)fs->slice_x + (uint64_t)fs->slice_width <= f->width - && (unsigned)fs->slice_y + (uint64_t)fs->slice_height <= f->height); -- -- if (fs->ac == AC_GOLOMB_RICE && fs->slice_width >= (1<<23)) -- return AVERROR_INVALIDDATA; + fs->slice_x = get_symbol(c, state, 0) * f->width ; + fs->slice_y = get_symbol(c, state, 0) * f->height; + fs->slice_width = (get_symbol(c, state, 0) + 1) * f->width + fs->slice_x; @@ -19667,9 +19579,9 @@ index 82a9c20853..8516fef5d7 100644 + || (unsigned)fs->slice_y + (uint64_t)fs->slice_height > f->height) + return -1; - for (i = 0; i < f->plane_count; i++) { - PlaneContext * const p = &fs->plane[i]; -@@ -308,11 +298,8 @@ static int decode_slice(AVCodecContext *c, void *arg) + if (fs->ac == AC_GOLOMB_RICE && fs->slice_width >= (1<<23)) + return AVERROR_INVALIDDATA; +@@ -308,11 +301,8 @@ static int decode_slice(AVCodecContext *c, void *arg) } if ((ret = ff_ffv1_init_slice_state(f, fs)) < 0) return ret; @@ -19682,7 +19594,7 @@ index 82a9c20853..8516fef5d7 100644 width = fs->slice_width; height = fs->slice_height; -@@ -475,11 +462,6 @@ static int read_extra_header(FFV1Context *f) +@@ -475,11 +465,6 @@ static int read_extra_header(FFV1Context *f) return AVERROR_INVALIDDATA; } @@ -19694,7 +19606,7 @@ index 82a9c20853..8516fef5d7 100644 f->quant_table_count = get_symbol(c, state, 0); if (f->quant_table_count > (unsigned)MAX_QUANT_TABLES || !f->quant_table_count) { av_log(f->avctx, AV_LOG_ERROR, "quant table count %d is invalid\n", f->quant_table_count); -@@ -782,25 +764,21 @@ static int read_header(FFV1Context *f) +@@ -782,25 +767,21 @@ static int read_header(FFV1Context *f) fs->slice_damaged = 0; if (f->version == 2) { @@ -19733,147 +19645,219 @@ index 82a9c20853..8516fef5d7 100644 } for (i = 0; i < f->plane_count; i++) { -diff --git a/libavcodec/ffv1dec_template.c b/libavcodec/ffv1dec_template.c -index 9b1d65e825..0b1d176ba1 100644 ---- a/libavcodec/ffv1dec_template.c -+++ b/libavcodec/ffv1dec_template.c -@@ -93,11 +93,11 @@ static av_always_inline int RENAME(decode_line)(FFV1Context *s, int w, - run_count--; - } - } else { -- while (run_count > 1 && w-x > 1) { -- sample[1][x] = RENAME(predict)(sample[1] + x, sample[0] + x); -- x++; -- run_count--; -- } -+ while (run_count > 1 && w-x > 1) { -+ sample[1][x] = RENAME(predict)(sample[1] + x, sample[0] + x); -+ x++; -+ run_count--; -+ } - } - run_count--; - if (run_count < 0) { +diff --git a/libavcodec/flac_parser.c b/libavcodec/flac_parser.c +index c423398139..b13b3b646a 100644 +--- a/libavcodec/flac_parser.c ++++ b/libavcodec/flac_parser.c +@@ -359,8 +359,6 @@ static int check_header_mismatch(FLACParseContext *fpc, + for (i = 0; i < FLAC_MAX_SEQUENTIAL_HEADERS && curr != child; i++) + curr = curr->next; + +- av_assert0(i < FLAC_MAX_SEQUENTIAL_HEADERS); +- + if (header->link_penalty[i] < FLAC_HEADER_CRC_FAIL_PENALTY || + header->link_penalty[i] == FLAC_HEADER_NOT_PENALIZED_YET) { + FLACHeaderMarker *start, *end; diff --git a/libavcodec/fmvc.c b/libavcodec/fmvc.c -index 82a2822e07..3701b0849b 100644 +index 76b1355da2..82a2822e07 100644 --- a/libavcodec/fmvc.c +++ b/libavcodec/fmvc.c -@@ -401,17 +401,20 @@ static int decode_frame(AVCodecContext *avctx, void *data, - PutByteContext *pb = &s->pb; - AVFrame *frame = data; - int ret, y, x; -- int key_frame; +@@ -100,6 +100,7 @@ static int decode_type2(GetByteContext *gb, PutByteContext *pb) + continue; + } + } ++ repeat = 0; + } + repeat = 1; + } +diff --git a/libavcodec/g2meet.c b/libavcodec/g2meet.c +index f852d74f6c..da910c1e59 100644 +--- a/libavcodec/g2meet.c ++++ b/libavcodec/g2meet.c +@@ -145,8 +145,7 @@ typedef struct G2MContext { + int got_header; - if (avpkt->size < 8) - return AVERROR_INVALIDDATA; + uint8_t *framebuf; +- int framebuf_stride; +- unsigned int framebuf_allocated; ++ int framebuf_stride, old_width, old_height; -+ if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) -+ return ret; -+ - bytestream2_init(gb, avpkt->data, avpkt->size); - bytestream2_skip(gb, 2); + uint8_t *synth_tile, *jpeg_tile, *epic_buf, *epic_buf_base; + int tile_stride, epic_buf_stride, old_tile_w, old_tile_h; +@@ -1161,13 +1160,14 @@ static int g2m_init_buffers(G2MContext *c) + { + int aligned_height; -- key_frame = !!bytestream2_get_le16(gb); -+ frame->key_frame = !!bytestream2_get_le16(gb); -+ frame->pict_type = frame->key_frame ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; +- c->framebuf_stride = FFALIGN(c->width + 15, 16) * 3; +- aligned_height = c->height + 15; +- +- av_fast_mallocz(&c->framebuf, &c->framebuf_allocated, c->framebuf_stride * aligned_height); +- if (!c->framebuf) +- return AVERROR(ENOMEM); +- ++ if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) { ++ c->framebuf_stride = FFALIGN(c->width + 15, 16) * 3; ++ aligned_height = c->height + 15; ++ av_free(c->framebuf); ++ c->framebuf = av_mallocz_array(c->framebuf_stride, aligned_height); ++ if (!c->framebuf) ++ return AVERROR(ENOMEM); ++ } + if (!c->synth_tile || !c->jpeg_tile || + (c->compression == 2 && !c->epic_buf_base) || + c->old_tile_w < c->tile_width || +@@ -1619,7 +1619,6 @@ static av_cold int g2m_decode_end(AVCodecContext *avctx) + av_freep(&c->jpeg_tile); + av_freep(&c->cursor); + av_freep(&c->framebuf); +- c->framebuf_allocated = 0; -- if (key_frame) { -+ if (frame->key_frame) { - const uint8_t *src; - unsigned type, size; - uint8_t *dst; -@@ -431,12 +434,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, - return AVERROR_PATCHWELCOME; + return 0; + } +diff --git a/libavcodec/g729postfilter.c b/libavcodec/g729postfilter.c +index 7ca569530a..617744ec8e 100644 +--- a/libavcodec/g729postfilter.c ++++ b/libavcodec/g729postfilter.c +@@ -350,7 +350,7 @@ static int16_t long_term_filter(AudioDSPContext *adsp, int pitch_delay_int, + if (tmp > 0) + L_temp0 >>= tmp; + else +- L_temp1 >>= FFMIN(-tmp, 31); ++ L_temp1 >>= -tmp; + + /* Check if longer filter increases the values of R'(k). */ + if (L_temp1 > L_temp0) { +@@ -578,7 +578,7 @@ void ff_g729_postfilter(AudioDSPContext *adsp, int16_t* ht_prev_data, int* voici + int16_t ff_g729_adaptive_gain_control(int gain_before, int gain_after, int16_t *speech, + int subframe_size, int16_t gain_prev) + { +- unsigned gain; // (3.12) ++ int gain; // (3.12) + int n; + int exp_before, exp_after; + +@@ -600,7 +600,7 @@ int16_t ff_g729_adaptive_gain_control(int gain_before, int gain_after, int16_t * + gain = ((gain_before - gain_after) << 14) / gain_after + 0x4000; + gain = bidir_sal(gain, exp_after - exp_before); } +- gain = FFMIN(gain, 32767); ++ gain = av_clip_int16(gain); + gain = (gain * G729_AGC_FAC1 + 0x4000) >> 15; // gain * (1-0.9875) + } else + gain = 0; +diff --git a/libavcodec/golomb.h b/libavcodec/golomb.h +index defee7139b..4d531cf805 100644 +--- a/libavcodec/golomb.h ++++ b/libavcodec/golomb.h +@@ -404,7 +404,6 @@ static inline int get_ur_golomb(GetBitContext *gb, int k, int limit, + log = av_log2(buf); -- if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) -- return ret; -- -- frame->key_frame = 1; -- frame->pict_type = AV_PICTURE_TYPE_I; -- - src = s->buffer; - dst = frame->data[0] + (avctx->height - 1) * frame->linesize[0]; - for (y = 0; y < avctx->height; y++) { -@@ -517,12 +514,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, - dst = &rect[block_h * s->stride]; - } + if (log > 31 - limit) { +- av_assert2(log >= k); + buf >>= log - k; + buf += (30U - log) << k; + LAST_SKIP_BITS(re, gb, 32 + k - log); +@@ -427,8 +426,6 @@ static inline int get_ur_golomb(GetBitContext *gb, int k, int limit, -- if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) -- return ret; -- -- frame->key_frame = 0; -- frame->pict_type = AV_PICTURE_TYPE_P; -- - ssrc = s->buffer; - ddst = frame->data[0] + (avctx->height - 1) * frame->linesize[0]; - for (y = 0; y < avctx->height; y++) { -diff --git a/libavcodec/g729_parser.c b/libavcodec/g729_parser.c -index ef08b48bf3..010f688104 100644 ---- a/libavcodec/g729_parser.c -+++ b/libavcodec/g729_parser.c -@@ -49,9 +49,6 @@ static int g729_parse(AVCodecParserContext *s1, AVCodecContext *avctx, - s->block_size = (avctx->bit_rate < 8000) ? G729D_6K4_BLOCK_SIZE : G729_8K_BLOCK_SIZE; - if (avctx->codec_id == AV_CODEC_ID_ACELP_KELVIN) - s->block_size++; -- // channels > 2 is invalid, we pass the packet on unchanged -- if (avctx->channels > 2) -- s->block_size = 0; - s->block_size *= avctx->channels; - s->duration = avctx->frame_size; - } -diff --git a/libavcodec/h263.h b/libavcodec/h263.h -index f5355e7ced..491f2e0aac 100644 ---- a/libavcodec/h263.h -+++ b/libavcodec/h263.h -@@ -100,16 +100,15 @@ void ff_h263_encode_motion(PutBitContext *pb, int val, int f_code); - - - static inline int h263_get_motion_length(int val, int f_code){ -- int bit_size, code, sign; -+ int l, bit_size, code; - - if (val == 0) { - return ff_mvtab[0][1]; - } else { - bit_size = f_code - 1; - /* modulo encoding */ -- val = sign_extend(val, 6 + bit_size); -- sign = val >> 31; -- val = (val ^ sign) - sign; /* val = FFABS(val) */ -+ l= INT_BIT - 6 - bit_size; -+ val = (val<>l; - val--; - code = (val >> bit_size) + 1; + /** + * read unsigned golomb rice code (jpegls). +- * +- * @returns -1 on error + */ + static inline int get_ur_golomb_jpegls(GetBitContext *gb, int k, int limit, + int esc_len) +@@ -540,8 +537,6 @@ static inline int get_sr_golomb(GetBitContext *gb, int k, int limit, + /** + * read signed golomb rice code (flac). +- * +- * @returns INT_MIN on error + */ + static inline int get_sr_golomb_flac(GetBitContext *gb, int k, int limit, + int esc_len) diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c -index f6f7789cef..e8b4d83e6e 100644 +index e3feb6dd65..f6f7789cef 100644 --- a/libavcodec/h263dec.c +++ b/libavcodec/h263dec.c -@@ -545,8 +545,6 @@ retry: - avctx->has_b_frames = !s->low_delay; +@@ -301,7 +301,7 @@ static int decode_slice(MpegEncContext *s) + ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y, + s->mb_x, s->mb_y, ER_MB_ERROR & part_mask); - if (CONFIG_MPEG4_DECODER && avctx->codec_id == AV_CODEC_ID_MPEG4) { -- if (s->pict_type != AV_PICTURE_TYPE_B && s->mb_num/2 > get_bits_left(&s->gb)) -- return AVERROR_INVALIDDATA; - if (ff_mpeg4_workaround_bugs(avctx) == 1) - goto retry; - if (s->studio_profile != (s->idsp.idct == NULL)) -diff --git a/libavcodec/h264dec.c b/libavcodec/h264dec.c -index bf3ab88da4..485f47d36e 100644 ---- a/libavcodec/h264dec.c -+++ b/libavcodec/h264dec.c -@@ -654,10 +654,6 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size) - avpriv_request_sample(avctx, "data partitioning"); - break; - case H264_NAL_SEI: -- if (h->setup_finished) { -- avpriv_request_sample(avctx, "Late SEI"); -- break; -- } - ret = ff_h264_sei_decode(&h->sei, &nal->gb, &h->ps, avctx); - h->has_recovery_point = h->has_recovery_point || h->sei.recovery_point.recovery_frame_cnt != -1; - if (avctx->debug & FF_DEBUG_GREEN_MD) +- if ((s->avctx->err_recognition & AV_EF_IGNORE_ERR) && get_bits_left(&s->gb) > 0) ++ if (s->avctx->err_recognition & AV_EF_IGNORE_ERR) + continue; + return AVERROR_INVALIDDATA; + } +diff --git a/libavcodec/h2645_parse.h b/libavcodec/h2645_parse.h +index a3157adf5d..3e47f86c53 100644 +--- a/libavcodec/h2645_parse.h ++++ b/libavcodec/h2645_parse.h +@@ -122,7 +122,7 @@ static inline int get_nalsize(int nal_length_size, const uint8_t *buf, + + if (*buf_index >= buf_size - nal_length_size) { + // the end of the buffer is reached, refill it +- return AVERROR_INVALIDDATA; ++ return AVERROR(EAGAIN); + } + + for (i = 0; i < nal_length_size; i++) +diff --git a/libavcodec/h264_parser.c b/libavcodec/h264_parser.c +index 7cb7733116..aacd44cf3b 100644 +--- a/libavcodec/h264_parser.c ++++ b/libavcodec/h264_parser.c +@@ -625,10 +625,10 @@ static int h264_parse(AVCodecParserContext *s, + int64_t num = avctx->time_base.num * (int64_t)avctx->pkt_timebase.den; + if (s->dts != AV_NOPTS_VALUE) { + // got DTS from the stream, update reference timestamp +- p->reference_dts = av_sat_sub64(s->dts, av_rescale(s->dts_ref_dts_delta, num, den)); ++ p->reference_dts = s->dts - av_rescale(s->dts_ref_dts_delta, num, den); + } else if (p->reference_dts != AV_NOPTS_VALUE) { + // compute DTS based on reference timestamp +- s->dts = av_sat_add64(p->reference_dts, av_rescale(s->dts_ref_dts_delta, num, den)); ++ s->dts = p->reference_dts + av_rescale(s->dts_ref_dts_delta, num, den); + } + + if (p->reference_dts != AV_NOPTS_VALUE && s->pts == AV_NOPTS_VALUE) +diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c +index 411fe619db..7c69016338 100644 +--- a/libavcodec/h264_slice.c ++++ b/libavcodec/h264_slice.c +@@ -1465,7 +1465,7 @@ static int h264_field_start(H264Context *h, const H264SliceContext *sl, + + sps = h->ps.sps; + +- if (sps->bitstream_restriction_flag && ++ if (sps && sps->bitstream_restriction_flag && + h->avctx->has_b_frames < sps->num_reorder_frames) { + h->avctx->has_b_frames = sps->num_reorder_frames; + } +diff --git a/libavcodec/hcadec.c b/libavcodec/hcadec.c +index 3e543c62cd..9c3eda21af 100644 +--- a/libavcodec/hcadec.c ++++ b/libavcodec/hcadec.c +@@ -110,7 +110,6 @@ static av_cold int decode_init(AVCodecContext *avctx) + float scale = 1.f / 8.f; + unsigned b, chunk; + int version, ret; +- unsigned hfr_group_count; + + avctx->sample_fmt = AV_SAMPLE_FMT_FLTP; + c->crc_table = av_crc_get_table(AV_CRC_16_ANSI); +@@ -234,12 +233,11 @@ static av_cold int decode_init(AVCodecContext *avctx) + if (c->total_band_count < c->base_band_count) + return AVERROR_INVALIDDATA; + +- hfr_group_count = ceil2(c->total_band_count - (c->base_band_count + c->stereo_band_count), ++ c->hfr_group_count = ceil2(c->total_band_count - (c->base_band_count + c->stereo_band_count), + c->bands_per_hfr_group); + +- if (c->base_band_count + c->stereo_band_count + (uint64_t)hfr_group_count > 128ULL) ++ if (c->base_band_count + c->stereo_band_count + (unsigned long)c->hfr_group_count > 128ULL) + return AVERROR_INVALIDDATA; +- c->hfr_group_count = hfr_group_count; + + for (int i = 0; i < avctx->channels; i++) { + c->ch[i].chan_type = r[i]; diff --git a/libavcodec/hevc-ctrls-v1.h b/libavcodec/hevc-ctrls-v1.h new file mode 100644 index 0000000000..72cbba0953 @@ -21163,34 +21147,6 @@ index 0000000000..c02fdbe5a8 +}; + +#endif -diff --git a/libavcodec/hevc_filter.c b/libavcodec/hevc_filter.c -index a45cb6f0fb..6b9824088c 100644 ---- a/libavcodec/hevc_filter.c -+++ b/libavcodec/hevc_filter.c -@@ -145,22 +145,11 @@ int i, j; - - if (((intptr_t)dst | (intptr_t)src | stride_dst | stride_src) & 15) { - for (i = 0; i < height; i++) { -- for (j = 0; j < width - 7; j+=8) -+ for (j = 0; j < width; j+=8) - AV_COPY64U(dst+j, src+j); - dst += stride_dst; - src += stride_src; - } -- if (width&7) { -- dst += ((width>>3)<<3) - stride_dst * height; -- src += ((width>>3)<<3) - stride_src * height; -- width &= 7; -- for (i = 0; i < height; i++) { -- for (j = 0; j < width; j++) -- dst[j] = src[j]; -- dst += stride_dst; -- src += stride_src; -- } -- } - } else { - for (i = 0; i < height; i++) { - for (j = 0; j < width; j+=16) diff --git a/libavcodec/hevc_parser.c b/libavcodec/hevc_parser.c index 463d352055..7feff43c28 100644 --- a/libavcodec/hevc_parser.c @@ -21275,7 +21231,7 @@ index 4f6d985ae6..eefae71275 100644 return 0; } diff --git a/libavcodec/hevcdec.c b/libavcodec/hevcdec.c -index 19d6d517f3..7b05b41441 100644 +index 273eecca5b..fc5ce28718 100644 --- a/libavcodec/hevcdec.c +++ b/libavcodec/hevcdec.c @@ -333,6 +333,19 @@ static void export_stream_params(HEVCContext *s, const HEVCSPS *sps) @@ -21362,7 +21318,72 @@ index 19d6d517f3..7b05b41441 100644 ret = pic_arrays_init(s, sps); if (ret < 0) goto fail; -@@ -2901,11 +2939,13 @@ static int hevc_frame_start(HEVCContext *s) +@@ -609,10 +647,6 @@ static int hls_slice_header(HEVCContext *s) + + if (s->ps.pps->dependent_slice_segments_enabled_flag) + sh->dependent_slice_segment_flag = get_bits1(gb); +- if (sh->dependent_slice_segment_flag && !s->slice_initialized) { +- av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n"); +- return AVERROR_INVALIDDATA; +- } + + slice_address_length = av_ceil_log2(s->ps.sps->ctb_width * + s->ps.sps->ctb_height); +@@ -881,6 +915,9 @@ static int hls_slice_header(HEVCContext *s) + } else { + sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag; + } ++ } else if (!s->slice_initialized) { ++ av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n"); ++ return AVERROR_INVALIDDATA; + } + + sh->num_entry_point_offsets = 0; +@@ -1499,8 +1536,7 @@ static void luma_mc_uni(HEVCContext *s, uint8_t *dst, ptrdiff_t dststride, + + if (x_off < QPEL_EXTRA_BEFORE || y_off < QPEL_EXTRA_AFTER || + x_off >= pic_width - block_w - QPEL_EXTRA_AFTER || +- y_off >= pic_height - block_h - QPEL_EXTRA_AFTER || +- ref == s->frame) { ++ y_off >= pic_height - block_h - QPEL_EXTRA_AFTER) { + const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift; + int offset = QPEL_EXTRA_BEFORE * srcstride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift); + int buf_offset = QPEL_EXTRA_BEFORE * edge_emu_stride + (QPEL_EXTRA_BEFORE << s->ps.sps->pixel_shift); +@@ -1648,7 +1684,6 @@ static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0, + intptr_t my = av_mod_uintp2(mv->y, 2 + vshift); + intptr_t _mx = mx << (1 - hshift); + intptr_t _my = my << (1 - vshift); +- int emu = src0 == s->frame->data[1] || src0 == s->frame->data[2]; + + x_off += mv->x >> (2 + hshift); + y_off += mv->y >> (2 + vshift); +@@ -1656,8 +1691,7 @@ static void chroma_mc_uni(HEVCContext *s, uint8_t *dst0, + + if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER || + x_off >= pic_width - block_w - EPEL_EXTRA_AFTER || +- y_off >= pic_height - block_h - EPEL_EXTRA_AFTER || +- emu) { ++ y_off >= pic_height - block_h - EPEL_EXTRA_AFTER) { + const int edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift; + int offset0 = EPEL_EXTRA_BEFORE * (srcstride + (1 << s->ps.sps->pixel_shift)); + int buf_offset0 = EPEL_EXTRA_BEFORE * +@@ -1896,13 +1930,13 @@ static void hls_prediction_unit(HEVCContext *s, int x0, int y0, + + if (current_mv.pred_flag & PF_L0) { + ref0 = refPicList[0].ref[current_mv.ref_idx[0]]; +- if (!ref0 || !ref0->frame) ++ if (!ref0) + return; + hevc_await_progress(s, ref0, ¤t_mv.mv[0], y0, nPbH); + } + if (current_mv.pred_flag & PF_L1) { + ref1 = refPicList[1].ref[current_mv.ref_idx[1]]; +- if (!ref1 || !ref1->frame) ++ if (!ref1) + return; + hevc_await_progress(s, ref1, ¤t_mv.mv[1], y0, nPbH); + } +@@ -2905,11 +2939,13 @@ static int hevc_frame_start(HEVCContext *s) ((s->ps.sps->height >> s->ps.sps->log2_min_cb_size) + 1); int ret; @@ -21381,7 +21402,20 @@ index 19d6d517f3..7b05b41441 100644 s->is_decoded = 0; s->first_nal_type = s->nal_unit_type; -@@ -3327,7 +3367,14 @@ static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output, +@@ -3039,11 +3075,8 @@ static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal) + case HEVC_NAL_RASL_N: + case HEVC_NAL_RASL_R: + ret = hls_slice_header(s); +- if (ret < 0) { +- // hls_slice_header() does not cleanup on failure thus the state now is inconsistant so we cannot use it on depandant slices +- s->slice_initialized = 0; ++ if (ret < 0) + return ret; +- } + if (ret == 1) { + ret = AVERROR_INVALIDDATA; + goto fail; +@@ -3334,7 +3367,14 @@ static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output, s->ref = NULL; ret = decode_nal_units(s, avpkt->data, avpkt->size); if (ret < 0) @@ -21396,16 +21430,7 @@ index 19d6d517f3..7b05b41441 100644 if (avctx->hwaccel) { if (s->ref && (ret = avctx->hwaccel->end_frame(avctx)) < 0) { -@@ -3338,7 +3385,7 @@ static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output, - } - } else { - /* verify the SEI checksum */ -- if (avctx->err_recognition & AV_EF_CRCCHECK && s->ref && s->is_decoded && -+ if (avctx->err_recognition & AV_EF_CRCCHECK && s->is_decoded && - s->sei.picture_hash.is_md5) { - ret = verify_md5(s, s->ref->frame); - if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) { -@@ -3370,15 +3417,19 @@ static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src) +@@ -3377,15 +3417,19 @@ static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src) if (ret < 0) return ret; @@ -21433,7 +21458,7 @@ index 19d6d517f3..7b05b41441 100644 dst->rpl_buf = av_buffer_ref(src->rpl_buf); if (!dst->rpl_buf) -@@ -3697,6 +3748,15 @@ AVCodec ff_hevc_decoder = { +@@ -3704,6 +3748,15 @@ AVCodec ff_hevc_decoder = { #if CONFIG_HEVC_NVDEC_HWACCEL HWACCEL_NVDEC(hevc), #endif @@ -21449,18 +21474,30 @@ index 19d6d517f3..7b05b41441 100644 #if CONFIG_HEVC_VAAPI_HWACCEL HWACCEL_VAAPI(hevc), #endif -diff --git a/libavcodec/hevcdsp_template.c b/libavcodec/hevcdsp_template.c -index 61425975cd..56cd9e605d 100644 ---- a/libavcodec/hevcdsp_template.c -+++ b/libavcodec/hevcdsp_template.c -@@ -313,7 +313,7 @@ static void FUNC(sao_band_filter)(uint8_t *_dst, uint8_t *_src, - offset_table[(k + sao_left_class) & 31] = sao_offset_val[k + 1]; - for (y = 0; y < height; y++) { - for (x = 0; x < width; x++) -- dst[x] = av_clip_pixel(src[x] + offset_table[(src[x] >> shift) & 31]); -+ dst[x] = av_clip_pixel(src[x] + offset_table[src[x] >> shift]); - dst += stride_dst; - src += stride_src; +diff --git a/libavcodec/huffyuvdec.c b/libavcodec/huffyuvdec.c +index 1cc4abe406..e713b91e4d 100644 +--- a/libavcodec/huffyuvdec.c ++++ b/libavcodec/huffyuvdec.c +@@ -662,9 +662,9 @@ static void decode_422_bitstream(HYuvContext *s, int count) + /* TODO instead of restarting the read when the code isn't in the first level + * of the joint table, jump into the 2nd level of the individual table. */ + #define READ_2PIX_PLANE16(dst0, dst1, plane){\ +- dst0 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)*4;\ ++ dst0 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;\ + dst0 += get_bits(&s->gb, 2);\ +- dst1 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)*4;\ ++ dst1 = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2;\ + dst1 += get_bits(&s->gb, 2);\ + } + static void decode_plane_bitstream(HYuvContext *s, int width, int plane) +@@ -722,7 +722,7 @@ static void decode_plane_bitstream(HYuvContext *s, int width, int plane) + } + } + if( width&1 && get_bits_left(&s->gb)>0 ) { +- int dst = (unsigned)get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2; ++ int dst = get_vlc2(&s->gb, s->vlc[plane].table, VLC_BITS, 3)<<2; + s->temp16[0][width-1] = dst + get_bits(&s->gb, 2); + } } diff --git a/libavcodec/hwaccels.h b/libavcodec/hwaccels.h index 8e54cf73f9..2277aadf75 100644 @@ -21501,305 +21538,523 @@ index f421dc909f..f93283b893 100644 #define HWACCEL_VAAPI(codec) \ HW_CONFIG_HWACCEL(1, 1, 1, VAAPI, VAAPI, ff_ ## codec ## _vaapi_hwaccel) #define HWACCEL_VDPAU(codec) \ +diff --git a/libavcodec/iff.c b/libavcodec/iff.c +index 629c58dfbd..76d3696bb3 100644 +--- a/libavcodec/iff.c ++++ b/libavcodec/iff.c +@@ -583,7 +583,7 @@ static int decode_byterun2(uint8_t *dst, int height, int line_size, + GetByteContext *gb) + { + GetByteContext cmds; +- int count; ++ unsigned count; + int i, y_pos = 0, x_pos = 0; + + if (bytestream2_get_be32(gb) != MKBETAG('V', 'D', 'A', 'T')) +@@ -591,7 +591,7 @@ static int decode_byterun2(uint8_t *dst, int height, int line_size, + + bytestream2_skip(gb, 4); + count = bytestream2_get_be16(gb) - 2; +- if (count < 0 || bytestream2_get_bytes_left(gb) < count) ++ if (bytestream2_get_bytes_left(gb) < count) + return 0; + + bytestream2_init(&cmds, gb->buffer, count); +diff --git a/libavcodec/ilbcdec.c b/libavcodec/ilbcdec.c +index 9961bab985..33f4e2c1eb 100644 +--- a/libavcodec/ilbcdec.c ++++ b/libavcodec/ilbcdec.c +@@ -1092,6 +1092,12 @@ static void do_plc(int16_t *plc_residual, /* (o) concealed residual */ + + if (s->consPLICount * s->block_samples > 320) { + use_gain = 29491; /* 0.9 in Q15 */ ++ } else if (s->consPLICount * s->block_samples > 640) { ++ use_gain = 22938; /* 0.7 in Q15 */ ++ } else if (s->consPLICount * s->block_samples > 960) { ++ use_gain = 16384; /* 0.5 in Q15 */ ++ } else if (s->consPLICount * s->block_samples > 1280) { ++ use_gain = 0; /* 0.0 in Q15 */ + } + + /* Compute mixing factor of picth repeatition and noise: +diff --git a/libavcodec/imm4.c b/libavcodec/imm4.c +index 6f6858864d..636130dd54 100644 +--- a/libavcodec/imm4.c ++++ b/libavcodec/imm4.c +@@ -220,15 +220,12 @@ static int decode_intra(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame + + for (y = 0; y < avctx->height; y += 16) { + for (x = 0; x < avctx->width; x += 16) { +- unsigned flag, cbplo; +- int cbphi; ++ unsigned flag, cbphi, cbplo; + + cbplo = get_vlc2(gb, cbplo_tab.table, CBPLO_VLC_BITS, 1); + flag = get_bits1(gb); + + cbphi = get_cbphi(gb, 1); +- if (cbphi < 0) +- return cbphi; + + ret = decode_blocks(avctx, gb, cbplo | (cbphi << 2), 0, offset, flag); + if (ret < 0) +@@ -276,8 +273,7 @@ static int decode_inter(AVCodecContext *avctx, GetBitContext *gb, + for (y = 0; y < avctx->height; y += 16) { + for (x = 0; x < avctx->width; x += 16) { + int reverse, intra_block, value; +- unsigned cbplo, flag2 = 0; +- int cbphi; ++ unsigned cbphi, cbplo, flag2 = 0; + + if (get_bits1(gb)) { + copy_block16(frame->data[0] + y * frame->linesize[0] + x, +@@ -303,9 +299,6 @@ static int decode_inter(AVCodecContext *avctx, GetBitContext *gb, + + cbplo = value >> 4; + cbphi = get_cbphi(gb, reverse); +- if (cbphi < 0) +- return cbphi; +- + if (intra_block) { + ret = decode_blocks(avctx, gb, cbplo | (cbphi << 2), 0, offset, flag2); + if (ret < 0) +diff --git a/libavcodec/indeo3.c b/libavcodec/indeo3.c +index 134e1647fa..5257d983c2 100644 +--- a/libavcodec/indeo3.c ++++ b/libavcodec/indeo3.c +@@ -169,9 +169,6 @@ static av_cold int allocate_frame_buffers(Indeo3DecodeContext *ctx, + int luma_size, chroma_size; + ptrdiff_t luma_pitch, chroma_pitch; + +- luma_width = FFALIGN(luma_width , 2); +- luma_height = FFALIGN(luma_height, 2); +- + if (luma_width < 16 || luma_width > 640 || + luma_height < 16 || luma_height > 480 || + luma_width & 1 || luma_height & 1) { +diff --git a/libavcodec/j2kenc.c b/libavcodec/j2kenc.c +index b776e275f7..212b9601c4 100644 +--- a/libavcodec/j2kenc.c ++++ b/libavcodec/j2kenc.c +@@ -719,10 +719,11 @@ static void encode_cblk(Jpeg2000EncoderContext *s, Jpeg2000T1Context *t1, Jpeg20 + + if (max == 0){ + cblk->nonzerobits = 0; ++ bpno = 0; + } else{ + cblk->nonzerobits = av_log2(max) + 1 - NMSEDEC_FRACBITS; ++ bpno = cblk->nonzerobits - 1; + } +- bpno = cblk->nonzerobits - 1; + + cblk->data[0] = 0; + ff_mqc_initenc(&t1->mqc, cblk->data + 1); +@@ -1530,7 +1531,6 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, + int tileno, ret; + Jpeg2000EncoderContext *s = avctx->priv_data; + uint8_t *chunkstart, *jp2cstart, *jp2hstart; +- const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt); + + if ((ret = ff_alloc_packet2(avctx, pkt, avctx->width*avctx->height*9 + AV_INPUT_BUFFER_MIN_SIZE, 0)) < 0) + return ret; +@@ -1543,7 +1543,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, + + s->lambda = s->picture->quality * LAMBDA_SCALE; + +- if (s->cbps[0] > 8) ++ if (avctx->pix_fmt == AV_PIX_FMT_BGR48 || avctx->pix_fmt == AV_PIX_FMT_GRAY16) + copy_frame_16(s); + else + copy_frame_8(s); +@@ -1587,7 +1587,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, + bytestream_put_byte(&s->buf, 1); + bytestream_put_byte(&s->buf, 0); + bytestream_put_byte(&s->buf, 0); +- if ((desc->flags & AV_PIX_FMT_FLAG_RGB) || avctx->pix_fmt == AV_PIX_FMT_PAL8) { ++ if (avctx->pix_fmt == AV_PIX_FMT_RGB24 || avctx->pix_fmt == AV_PIX_FMT_PAL8) { + bytestream_put_be32(&s->buf, 16); + } else if (s->ncomponents == 1) { + bytestream_put_be32(&s->buf, 17); +@@ -1717,7 +1717,6 @@ static av_cold int j2kenc_init(AVCodecContext *avctx) + Jpeg2000EncoderContext *s = avctx->priv_data; + Jpeg2000CodingStyle *codsty = &s->codsty; + Jpeg2000QuantStyle *qntsty = &s->qntsty; +- const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt); + + s->avctx = avctx; + av_log(s->avctx, AV_LOG_DEBUG, "init\n"); +@@ -1737,7 +1736,7 @@ FF_ENABLE_DEPRECATION_WARNINGS + + if (avctx->pix_fmt == AV_PIX_FMT_PAL8 && (s->pred != FF_DWT97_INT || s->format != CODEC_JP2)) { + av_log(s->avctx, AV_LOG_WARNING, "Forcing lossless jp2 for pal8\n"); +- s->pred = 1; ++ s->pred = FF_DWT97_INT; + s->format = CODEC_JP2; + } + +@@ -1767,13 +1766,20 @@ FF_ENABLE_DEPRECATION_WARNINGS + s->width = avctx->width; + s->height = avctx->height; + +- s->ncomponents = desc->nb_components; + for (i = 0; i < 3; i++) { +- s->cbps[i] = desc->comp[i].depth; ++ if (avctx->pix_fmt == AV_PIX_FMT_GRAY16 || avctx->pix_fmt == AV_PIX_FMT_RGB48) ++ s->cbps[i] = 16; ++ else ++ s->cbps[i] = 8; + } + +- if ((desc->flags & AV_PIX_FMT_FLAG_PLANAR) && s->ncomponents > 1) { ++ if (avctx->pix_fmt == AV_PIX_FMT_RGB24 || avctx->pix_fmt == AV_PIX_FMT_RGB48){ ++ s->ncomponents = 3; ++ } else if (avctx->pix_fmt == AV_PIX_FMT_GRAY8 || avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY16){ ++ s->ncomponents = 1; ++ } else{ // planar YUV + s->planar = 1; ++ s->ncomponents = 3; + ret = av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, + s->chroma_shift, s->chroma_shift + 1); + if (ret) +@@ -1813,7 +1819,7 @@ static const AVOption options[] = { + { "tile_height", "Tile Height", OFFSET(tile_height), AV_OPT_TYPE_INT, { .i64 = 256 }, 1, 1<<30, VE, }, + { "pred", "DWT Type", OFFSET(pred), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE, "pred" }, + { "dwt97int", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, "pred" }, +- { "dwt53", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, INT_MIN, INT_MAX, VE, "pred" }, ++ { "dwt53", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, INT_MIN, INT_MAX, VE, "pred" }, + { "sop", "SOP marker", OFFSET(sop), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE, }, + { "eph", "EPH marker", OFFSET(eph), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE, }, + { "prog", "Progression Order", OFFSET(prog), AV_OPT_TYPE_INT, { .i64 = 0 }, JPEG2000_PGOD_LRCP, JPEG2000_PGOD_CPRL, VE, "prog" }, +diff --git a/libavcodec/jfdctint_template.c b/libavcodec/jfdctint_template.c +index 2f4e28b3a3..67fb77b5e1 100644 +--- a/libavcodec/jfdctint_template.c ++++ b/libavcodec/jfdctint_template.c +@@ -69,7 +69,7 @@ + #define GLOBAL(x) x + #define RIGHT_SHIFT(x, n) ((x) >> (n)) + #define MULTIPLY16C16(var,const) ((var)*(const)) +-#define DESCALE(x,n) RIGHT_SHIFT((int)(x) + (1 << ((n) - 1)), n) ++#define DESCALE(x,n) RIGHT_SHIFT((x) + (1 << ((n) - 1)), n) + + + /* +@@ -175,7 +175,7 @@ + #if BITS_IN_JSAMPLE == 8 && CONST_BITS<=13 && PASS1_BITS<=2 + #define MULTIPLY(var,const) MULTIPLY16C16(var,const) + #else +-#define MULTIPLY(var,const) (int)((var) * (unsigned)(const)) ++#define MULTIPLY(var,const) ((var) * (const)) + #endif + + +@@ -261,7 +261,7 @@ FUNC(ff_jpeg_fdct_islow)(int16_t *data) + { + int tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; + int tmp10, tmp11, tmp12, tmp13; +- unsigned z1, z2, z3, z4, z5; ++ int z1, z2, z3, z4, z5; + int16_t *dataptr; + int ctr; + +diff --git a/libavcodec/jpeg2000dec.c b/libavcodec/jpeg2000dec.c +index 9b3654dbc6..0d7ade5ce8 100644 +--- a/libavcodec/jpeg2000dec.c ++++ b/libavcodec/jpeg2000dec.c +@@ -323,16 +323,6 @@ static int get_siz(Jpeg2000DecoderContext *s) + return AVERROR_INVALIDDATA; + } + +- if (s->image_offset_x >= s->width || s->image_offset_y >= s->height) { +- av_log(s->avctx, AV_LOG_ERROR, "image offsets outside image"); +- return AVERROR_INVALIDDATA; +- } +- +- if (s->reduction_factor && (s->image_offset_x || s->image_offset_y) ){ +- av_log(s->avctx, AV_LOG_ERROR, "reduction factor with image offsets is not fully implemented"); +- return AVERROR_PATCHWELCOME; +- } +- + s->ncomponents = ncomponents; + + if (s->tile_width <= 0 || s->tile_height <= 0) { +@@ -911,6 +901,9 @@ static int get_tlm(Jpeg2000DecoderContext *s, int n) + case 2: + bytestream2_get_be16(&s->g); + break; ++ case 3: ++ bytestream2_get_be32(&s->g); ++ break; + } + if (SP == 0) { + bytestream2_get_be16(&s->g); diff --git a/libavcodec/jpeglsdec.c b/libavcodec/jpeglsdec.c -index fe0b3c3c40..c4ffa81f7d 100644 +index 1bc94c5070..fe0b3c3c40 100644 --- a/libavcodec/jpeglsdec.c +++ b/libavcodec/jpeglsdec.c -@@ -67,7 +67,7 @@ int ff_jpegls_decode_lse(MJpegDecodeContext *s) - s->t3 = get_bits(&s->gb, 16); - s->reset = get_bits(&s->gb, 16); - -- if (s->avctx->debug & FF_DEBUG_PICT_INFO) { -+ if(s->avctx->debug & FF_DEBUG_PICT_INFO) { - av_log(s->avctx, AV_LOG_DEBUG, "Coding parameters maxval:%d T1:%d T2:%d T3:%d reset:%d\n", - s->maxval, s->t1, s->t2, s->t3, s->reset); - } -@@ -96,7 +96,7 @@ int ff_jpegls_decode_lse(MJpegDecodeContext *s) - else - maxtab = 65530/wt - 1; - -- if (s->avctx->debug & FF_DEBUG_PICT_INFO) { -+ if(s->avctx->debug & FF_DEBUG_PICT_INFO) { - av_log(s->avctx, AV_LOG_DEBUG, "LSE palette %d tid:%d wt:%d maxtab:%d\n", id, tid, wt, maxtab); - } - if (maxtab >= 256) { -@@ -186,7 +186,7 @@ static inline int ls_get_code_runterm(GetBitContext *gb, JLSState *state, - if (RItype) - temp += state->N[Q] >> 1; - -- for (k = 0; ((unsigned)state->N[Q] << k) < temp; k++) -+ for (k = 0; (state->N[Q] << k) < temp; k++) - ; - - #ifdef JLS_BROKEN -@@ -195,8 +195,6 @@ static inline int ls_get_code_runterm(GetBitContext *gb, JLSState *state, - #endif - ret = get_ur_golomb_jpegls(gb, k, state->limit - limit_add - 1, - state->qbpp); -- if (ret < 0) -- return -0x10000; - - /* decode mapped error */ - map = 0; -@@ -211,7 +209,7 @@ static inline int ls_get_code_runterm(GetBitContext *gb, JLSState *state, - ret = ret >> 1; - } - -- if (FFABS(ret) > 0xFFFF) -+ if(FFABS(ret) > 0xFFFF) - return -0x10000; - /* update state */ - state->A[Q] += FFABS(ret) - RItype; -@@ -478,19 +476,19 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, - for (i = 0; i < s->height; i++) { - switch(s->xfrm) { - case 1: -- for (x = off; x + 2 < w; x += 3) { -+ for (x = off; x < w; x += 3) { - src[x ] += src[x+1] + 128; - src[x+2] += src[x+1] + 128; - } - break; - case 2: -- for (x = off; x + 2 < w; x += 3) { -+ for (x = off; x < w; x += 3) { - src[x ] += src[x+1] + 128; - src[x+2] += ((src[x ] + src[x+1])>>1) + 128; - } - break; - case 3: -- for (x = off; x + 2 < w; x += 3) { -+ for (x = off; x < w; x += 3) { - int g = src[x+0] - ((src[x+2]+src[x+1])>>2) + 64; - src[x+0] = src[x+2] + g + 128; - src[x+2] = src[x+1] + g + 128; -@@ -498,7 +496,7 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, - } - break; - case 4: -- for (x = off; x + 2 < w; x += 3) { -+ for (x = off; x < w; x += 3) { - int r = src[x+0] - (( 359 * (src[x+2]-128) + 490) >> 8); - int g = src[x+0] - (( 88 * (src[x+1]-128) - 183 * (src[x+2]-128) + 30) >> 8); - int b = src[x+0] + ((454 * (src[x+1]-128) + 574) >> 8); -diff --git a/libavcodec/lagarith.c b/libavcodec/lagarith.c -index 1b08e9308e..d81e55cf4c 100644 ---- a/libavcodec/lagarith.c -+++ b/libavcodec/lagarith.c -@@ -408,9 +408,6 @@ output_zeros: - if (zero_run) { - zero_run = 0; - i += esc_count; -- if (i > end - dst || -- i >= src_end - src) -- return AVERROR_INVALIDDATA; - memcpy(dst, src, i); - dst += i; - l->zeros_rem = lag_calc_zero_run(src[i]); -diff --git a/libavcodec/libdav1d.c b/libavcodec/libdav1d.c -index a9b57f526d..a9c983eaca 100644 ---- a/libavcodec/libdav1d.c -+++ b/libavcodec/libdav1d.c -@@ -127,11 +127,7 @@ static av_cold int libdav1d_init(AVCodecContext *c) - { - Libdav1dContext *dav1d = c->priv_data; - Dav1dSettings s; --#if FF_DAV1D_VERSION_AT_LEAST(6,0) -- int threads = c->thread_count; --#else - int threads = (c->thread_count ? c->thread_count : av_cpu_count()) * 3 / 2; --#endif - int res; - - av_log(c, AV_LOG_INFO, "libdav1d %s\n", dav1d_version()); -@@ -157,7 +153,7 @@ static av_cold int libdav1d_init(AVCodecContext *c) - s.n_threads = FFMAX(dav1d->frame_threads, dav1d->tile_threads); - else - s.n_threads = FFMIN(threads, DAV1D_MAX_THREADS); -- s.max_frame_delay = (c->flags & AV_CODEC_FLAG_LOW_DELAY) ? 1 : 0; -+ s.max_frame_delay = (c->flags & AV_CODEC_FLAG_LOW_DELAY) ? 1 : s.n_threads; - av_log(c, AV_LOG_DEBUG, "Using %d threads, %d max_frame_delay\n", - s.n_threads, s.max_frame_delay); - #else -@@ -248,10 +244,8 @@ static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame) - if (res < 0) { - if (res == AVERROR(EINVAL)) - res = AVERROR_INVALIDDATA; -- if (res != AVERROR(EAGAIN)) { -- dav1d_data_unref(data); -+ if (res != AVERROR(EAGAIN)) - return res; -- } - } - - res = dav1d_get_picture(dav1d->c, p); -diff --git a/libavcodec/libkvazaar.c b/libavcodec/libkvazaar.c -index 7389265415..4432649853 100644 ---- a/libavcodec/libkvazaar.c -+++ b/libavcodec/libkvazaar.c -@@ -210,19 +210,13 @@ static int libkvazaar_encode(AVCodecContext *avctx, - - // Copy pixels from frame to input_pic. - { -- uint8_t *dst[4] = { -- input_pic->data[0], -- input_pic->data[1], -- input_pic->data[2], -- NULL, -- }; - int dst_linesizes[4] = { - frame->width, - frame->width / 2, - frame->width / 2, - 0 - }; -- av_image_copy(dst, dst_linesizes, -+ av_image_copy(input_pic->data, dst_linesizes, - (const uint8_t **)frame->data, frame->linesize, - frame->format, frame->width, frame->height); - } -diff --git a/libavcodec/libopenh264dec.c b/libavcodec/libopenh264dec.c -index dcd781dd84..c7aa7fa19c 100644 ---- a/libavcodec/libopenh264dec.c -+++ b/libavcodec/libopenh264dec.c -@@ -91,8 +91,8 @@ static int svc_decode_frame(AVCodecContext *avctx, void *data, - { - SVCContext *s = avctx->priv_data; - SBufferInfo info = { 0 }; -- uint8_t *ptrs[4] = { NULL }; -- int ret, linesize[4]; -+ uint8_t* ptrs[3]; -+ int ret, linesize[3]; - AVFrame *avframe = data; - DECODING_STATE state; - #if OPENH264_VER_AT_LEAST(1, 7) -@@ -140,7 +140,6 @@ static int svc_decode_frame(AVCodecContext *avctx, void *data, - - linesize[0] = info.UsrData.sSystemBuffer.iStride[0]; - linesize[1] = linesize[2] = info.UsrData.sSystemBuffer.iStride[1]; -- linesize[3] = 0; - av_image_copy(avframe->data, avframe->linesize, (const uint8_t **) ptrs, linesize, avctx->pix_fmt, avctx->width, avctx->height); - - avframe->pts = info.uiOutYuvTimeStamp; -diff --git a/libavcodec/libuavs3d.c b/libavcodec/libuavs3d.c -index 59b50a2843..be03da39e2 100644 ---- a/libavcodec/libuavs3d.c -+++ b/libavcodec/libuavs3d.c -@@ -208,9 +208,7 @@ static int libuavs3d_decode_frame(AVCodecContext *avctx, void *data, int *got_fr - } - avctx->has_b_frames = !seqh->low_delay; - avctx->pix_fmt = seqh->bit_depth_internal == 8 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUV420P10LE; -- ret = ff_set_dimensions(avctx, seqh->horizontal_size, seqh->vertical_size); -- if (ret < 0) -- return ret; -+ ff_set_dimensions(avctx, seqh->horizontal_size, seqh->vertical_size); - h->got_seqhdr = 1; - - if (seqh->colour_description) { -diff --git a/libavcodec/libxavs2.c b/libavcodec/libxavs2.c -index f33240f300..2a4a3e36bd 100644 ---- a/libavcodec/libxavs2.c -+++ b/libavcodec/libxavs2.c -@@ -205,7 +205,7 @@ static int xavs2_encode_frame(AVCodecContext *avctx, AVPacket *pkt, - ret = cae->api->encoder_encode(cae->encoder, &pic, &cae->packet); - - if (ret) { -- av_log(avctx, AV_LOG_ERROR, "Encoding error occurred.\n"); -+ av_log(avctx, AV_LOG_ERROR, "Encoding error occured.\n"); - return AVERROR_EXTERNAL; - } - -diff --git a/libavcodec/midivid.c b/libavcodec/midivid.c -index 3e6a9ca3d9..2200440e2c 100644 ---- a/libavcodec/midivid.c -+++ b/libavcodec/midivid.c -@@ -202,7 +202,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, - bytestream2_skip(gb, 8); - uncompressed = bytestream2_get_le32(gb); - -- if (!uncompressed) { -+ if ((ret = ff_reget_buffer(avctx, s->frame, 0)) < 0) -+ return ret; -+ -+ if (uncompressed) { -+ ret = decode_mvdv(s, avctx, frame); -+ } else { - av_fast_padded_malloc(&s->uncompressed, &s->uncompressed_size, 16LL * (avpkt->size - 12)); - if (!s->uncompressed) - return AVERROR(ENOMEM); -@@ -211,13 +216,9 @@ static int decode_frame(AVCodecContext *avctx, void *data, - if (ret < 0) - return ret; - bytestream2_init(gb, s->uncompressed, ret); -+ ret = decode_mvdv(s, avctx, frame); - } - -- if ((ret = ff_reget_buffer(avctx, s->frame, 0)) < 0) -- return ret; +@@ -376,19 +376,6 @@ int ff_jpegls_decode_picture(MJpegDecodeContext *s, int near, + state->T3 = s->t3; + state->reset = s->reset; + ff_jpegls_reset_coding_parameters(state, 0); - -- ret = decode_mvdv(s, avctx, frame); -- - if (ret < 0) - return ret; - key = ret; -diff --git a/libavcodec/mjpegbdec.c b/libavcodec/mjpegbdec.c -index 3fab4a66bc..19875a2ddb 100644 ---- a/libavcodec/mjpegbdec.c -+++ b/libavcodec/mjpegbdec.c -@@ -57,7 +57,6 @@ static int mjpegb_decode_frame(AVCodecContext *avctx, - buf_end = buf + buf_size; - s->got_picture = 0; - s->adobe_transform = -1; -- s->buf_size = buf_size; - - read_header: - /* reset on every SOI */ -diff --git a/libavcodec/mjpegdec.c b/libavcodec/mjpegdec.c -index 7135c95bda..afb117cfc6 100644 ---- a/libavcodec/mjpegdec.c -+++ b/libavcodec/mjpegdec.c -@@ -1082,10 +1082,6 @@ static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int p - return AVERROR_INVALIDDATA; - if (s->v_max != 1 || s->h_max != 1 || !s->lossless) - return AVERROR_INVALIDDATA; -- if (s->bayer) { -- if (s->rct || s->pegasus_rct) -- return AVERROR_INVALIDDATA; +- /* Testing parameters here, we cannot test in LSE or SOF because +- * these interdepend and are allowed in either order +- */ +- if (state->maxval >= (1<bpp) || +- state->T1 > state->T2 || +- state->T2 > state->T3 || +- state->T3 > state->maxval || +- state->reset > FFMAX(255, state->maxval)) { +- ret = AVERROR_INVALIDDATA; +- goto end; - } +- + ff_jpegls_init_state(state); - - s->restart_count = s->restart_interval; -@@ -1202,8 +1198,6 @@ static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int p - ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1]; + if (s->bits <= 8) +diff --git a/libavcodec/lcldec.c b/libavcodec/lcldec.c +index 9317f8ef85..d281733fdd 100644 +--- a/libavcodec/lcldec.c ++++ b/libavcodec/lcldec.c +@@ -148,8 +148,6 @@ static int zlib_decomp(AVCodecContext *avctx, const uint8_t *src, int src_len, i + if (expected != (unsigned int)c->zstream.total_out) { + av_log(avctx, AV_LOG_ERROR, "Decoded size differs (%d != %lu)\n", + expected, c->zstream.total_out); +- if (expected > (unsigned int)c->zstream.total_out) +- return (unsigned int)c->zstream.total_out; + return AVERROR_UNKNOWN; + } + return c->zstream.total_out; +@@ -168,8 +166,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac + int row, col; + unsigned char *encoded = avpkt->data, *outptr; + uint8_t *y_out, *u_out, *v_out; +- int width = avctx->width; // Real image width +- int height = avctx->height; // Real image height ++ unsigned int width = avctx->width; // Real image width ++ unsigned int height = avctx->height; // Real image height + unsigned int mszh_dlen; + unsigned char yq, y1q, uq, vq; + int uqvq, ret; +@@ -226,19 +224,16 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac + break; + case COMP_MSZH_NOCOMP: { + int bppx2; +- int aligned_width = width; + switch (c->imgtype) { + case IMGTYPE_YUV111: + case IMGTYPE_RGB24: + bppx2 = 6; + break; + case IMGTYPE_YUV422: +- aligned_width &= ~3; + case IMGTYPE_YUV211: + bppx2 = 4; + break; + case IMGTYPE_YUV411: +- aligned_width &= ~3; + case IMGTYPE_YUV420: + bppx2 = 3; + break; +@@ -246,7 +241,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac + bppx2 = 0; // will error out below + break; } - } else if (s->bayer) { -- if (s->bits <= 8) -- return AVERROR_PATCHWELCOME; - if (nb_components == 1) { - /* Leave decoding to the TIFF/DNG decoder (see comment in ff_mjpeg_decode_sof) */ - for (mb_x = 0; mb_x < width; mb_x++) -@@ -1938,8 +1932,6 @@ static int mjpeg_decode_app(MJpegDecodeContext *s) +- if (len < ((aligned_width * height * bppx2) >> 1)) ++ if (len < ((width * height * bppx2) >> 1)) + return AVERROR_INVALIDDATA; + break; } +@@ -278,13 +273,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac + ret = zlib_decomp(avctx, buf + 8 + mthread_inlen, len - 8 - mthread_inlen, + mthread_outlen, mthread_outlen); + if (ret < 0) return ret; +- len = c->decomp_size; + } else { + int ret = zlib_decomp(avctx, buf, len, 0, c->decomp_size); + if (ret < 0) return ret; +- len = ret; + } + encoded = c->decomp_buf; ++ len = c->decomp_size; + break; + #endif + default: +@@ -312,8 +306,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac + } + break; + case IMGTYPE_YUV422: +- pixel_ptr = 0; + for (row = 0; row < height; row++) { ++ pixel_ptr = row * width * 2; + yq = uq = vq =0; + for (col = 0; col < width/4; col++) { + encoded[pixel_ptr] = yq -= encoded[pixel_ptr]; +@@ -329,8 +323,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac + } + break; + case IMGTYPE_YUV411: +- pixel_ptr = 0; + for (row = 0; row < height; row++) { ++ pixel_ptr = row * width / 2 * 3; + yq = uq = vq =0; + for (col = 0; col < width/4; col++) { + encoded[pixel_ptr] = yq -= encoded[pixel_ptr]; +@@ -406,11 +400,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac + v_out[ col >> 1 ] = *encoded++ + 128; + v_out[(col >> 1) + 1] = *encoded++ + 128; + } +- if (col && col < width) { +- u_out[ col >> 1 ] = u_out[(col>>1) - 1]; +- v_out[ col >> 1 ] = v_out[(col>>1) - 1]; +- } +- + y_out -= frame->linesize[0]; + u_out -= frame->linesize[1]; + v_out -= frame->linesize[2]; +@@ -432,10 +421,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac + u_out[col >> 2] = *encoded++ + 128; + v_out[col >> 2] = *encoded++ + 128; + } +- if (col && col < width) { +- u_out[col >> 2] = u_out[(col>>2) - 1]; +- v_out[col >> 2] = v_out[(col>>2) - 1]; +- } + y_out -= frame->linesize[0]; + u_out -= frame->linesize[1]; + v_out -= frame->linesize[2]; +@@ -493,7 +478,6 @@ static av_cold int decode_init(AVCodecContext *avctx) + FFALIGN(avctx->height, 4); + unsigned int max_decomp_size; + int subsample_h, subsample_v; +- int partial_h_supported = 0; - len -= 9; -- if (s->bayer) -- goto out; - if (s->got_picture) - if (rgb != s->rgb || pegasus_rct != s->pegasus_rct) { - av_log(s->avctx, AV_LOG_WARNING, "Mismatching LJIF tag\n"); -diff --git a/libavcodec/mjpegenc_common.c b/libavcodec/mjpegenc_common.c -index 0845814834..12dd7be2e8 100644 ---- a/libavcodec/mjpegenc_common.c -+++ b/libavcodec/mjpegenc_common.c -@@ -247,7 +247,7 @@ void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, - default: av_assert0(0); + if (avctx->extradata_size < 8) { + av_log(avctx, AV_LOG_ERROR, "Extradata size too small.\n"); +@@ -515,24 +499,26 @@ static av_cold int decode_init(AVCodecContext *avctx) + av_log(avctx, AV_LOG_DEBUG, "Image type is YUV 1:1:1.\n"); + break; + case IMGTYPE_YUV422: +- c->decomp_size = (avctx->width & ~3) * avctx->height * 2; ++ c->decomp_size = basesize * 2; + max_decomp_size = max_basesize * 2; + avctx->pix_fmt = AV_PIX_FMT_YUV422P; + av_log(avctx, AV_LOG_DEBUG, "Image type is YUV 4:2:2.\n"); +- partial_h_supported = 1; ++ if (avctx->width % 4) { ++ avpriv_request_sample(avctx, "Unsupported dimensions"); ++ return AVERROR_INVALIDDATA; ++ } + break; + case IMGTYPE_RGB24: +- c->decomp_size = FFALIGN(avctx->width*3, 4) * avctx->height; ++ c->decomp_size = basesize * 3; + max_decomp_size = max_basesize * 3; + avctx->pix_fmt = AV_PIX_FMT_BGR24; + av_log(avctx, AV_LOG_DEBUG, "Image type is RGB 24.\n"); + break; + case IMGTYPE_YUV411: +- c->decomp_size = (avctx->width & ~3) * avctx->height / 2 * 3; ++ c->decomp_size = basesize / 2 * 3; + max_decomp_size = max_basesize / 2 * 3; + avctx->pix_fmt = AV_PIX_FMT_YUV411P; + av_log(avctx, AV_LOG_DEBUG, "Image type is YUV 4:1:1.\n"); +- partial_h_supported = 1; + break; + case IMGTYPE_YUV211: + c->decomp_size = basesize * 2; +@@ -552,7 +538,7 @@ static av_cold int decode_init(AVCodecContext *avctx) } -- put_bits(pb, 16, 8 + 3 * components); -+ put_bits(pb, 16, 17); - if (lossless && ( avctx->pix_fmt == AV_PIX_FMT_BGR0 - || avctx->pix_fmt == AV_PIX_FMT_BGRA - || avctx->pix_fmt == AV_PIX_FMT_BGR24)) + av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &subsample_h, &subsample_v); +- if ((avctx->width % (1<height % (1<width % (1<height % (1<source_width * config->source_height * +- (config->encoder_bit_depth > 8 ? 2 : 1); ++ const int pack_mode_10bit = ++ (config->encoder_bit_depth > 8) && (config->compressed_ten_bit_format == 0) ? 1 : 0; ++ const size_t luma_size_8bit = ++ config->source_width * config->source_height * (1 << pack_mode_10bit); ++ const size_t luma_size_10bit = ++ (config->encoder_bit_depth > 8 && pack_mode_10bit == 0) ? luma_size_8bit : 0; + + EbSvtIOFormat *in_data; + +- svt_enc->raw_size = luma_size * 3 / 2; ++ svt_enc->raw_size = (luma_size_8bit + luma_size_10bit) * 3 / 2; + + // allocate buffer for in and out + svt_enc->in_buf = av_mallocz(sizeof(*svt_enc->in_buf)); +diff --git a/libavcodec/loco.c b/libavcodec/loco.c +index 92a5e8f2ce..99e3a1d021 100644 +--- a/libavcodec/loco.c ++++ b/libavcodec/loco.c +@@ -91,15 +91,10 @@ static inline int loco_get_rice(RICEContext *r) + if (get_bits_left(&r->gb) < 1) + return INT_MIN; + v = get_ur_golomb_jpegls(&r->gb, loco_get_rice_param(r), INT_MAX, 0); +- if (v == -1) +- return INT_MIN; + loco_update_rice_param(r, (v + 1) >> 1); + if (!v) { + if (r->save >= 0) { +- int run = get_ur_golomb_jpegls(&r->gb, 2, INT_MAX, 0); +- if (run == -1) +- return INT_MIN; +- r->run = run; ++ r->run = get_ur_golomb_jpegls(&r->gb, 2, INT_MAX, 0); + if (r->run > 1) + r->save += r->run + 1; + else +@@ -156,8 +151,6 @@ static int loco_decode_plane(LOCOContext *l, uint8_t *data, int width, int heigh + + /* restore top left pixel */ + val = loco_get_rice(&rc); +- if (val == INT_MIN) +- return AVERROR_INVALIDDATA; + data[0] = 128 + val; + /* restore top line */ + for (i = 1; i < width; i++) { +diff --git a/libavcodec/lpc.c b/libavcodec/lpc.c +index f9c4bcaf98..3ed61563ee 100644 +--- a/libavcodec/lpc.c ++++ b/libavcodec/lpc.c +@@ -244,10 +244,8 @@ int ff_lpc_calc_coefs(LPCContext *s, + double av_uninit(weight); + memset(var, 0, FFALIGN(MAX_LPC_ORDER+1,4)*sizeof(*var)); + +- /* Avoids initializing with an unused value when lpc_passes == 1 */ +- if (lpc_passes > 1) +- for(j=0; j #include "avcodec.h" -diff --git a/libavcodec/mobiclip.c b/libavcodec/mobiclip.c -index 4baf347446..bf47a5bc41 100644 ---- a/libavcodec/mobiclip.c -+++ b/libavcodec/mobiclip.c -@@ -329,7 +329,7 @@ static av_cold int mobiclip_init(AVCodecContext *avctx) - return 0; - } - --static int setup_qtables(AVCodecContext *avctx, int64_t quantizer) -+static int setup_qtables(AVCodecContext *avctx, int quantizer) - { - MobiClipContext *s = avctx->priv_data; - int qx, qy; -@@ -1256,7 +1256,7 @@ static int mobiclip_decode(AVCodecContext *avctx, void *data, - frame->key_frame = 0; - s->dct_tab_idx = 0; - -- ret = setup_qtables(avctx, s->quantizer + (int64_t)get_se_golomb(gb)); -+ ret = setup_qtables(avctx, s->quantizer + get_se_golomb(gb)); - if (ret < 0) - return ret; - diff --git a/libavcodec/motion_est.c b/libavcodec/motion_est.c -index b79e22c422..5b0958733c 100644 +index 26da3e3a2f..b79e22c422 100644 --- a/libavcodec/motion_est.c +++ b/libavcodec/motion_est.c -@@ -1614,7 +1614,7 @@ int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type) - for(y=0; ymb_height; y++){ - int x; - int xy= y*s->mb_stride; -- for(x=0; xmb_width; x++, xy++){ -+ for(x=0; xmb_width; x++){ - if(s->mb_type[xy] & type){ - int mx= mv_table[xy][0]; - int my= mv_table[xy][1]; -@@ -1622,15 +1622,16 @@ int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type) - fcode_tab[my + MAX_MV]); - int j; +@@ -1442,7 +1442,7 @@ static inline int direct_search(MpegEncContext * s, int mb_x, int mb_y) + s->b_direct_mv_table[mot_xy][0]= 0; + s->b_direct_mv_table[mot_xy][1]= 0; -- if (mx >= range || mx < -range || -- my >= range || my < -range) -- continue; -+ if(mx >= range || mx < -range || -+ my >= range || my < -range) -+ continue; - - for(j=0; jpict_type==AV_PICTURE_TYPE_B || s->current_picture.mc_mb_var[xy] < s->current_picture.mb_var[xy]) - score[j]-= 170; - } - } -+ xy++; - } - } +- return 256*256*256*64-1; ++ return 256*256*256*64; + } + c->xmin= xmin; diff --git a/libavcodec/motionpixels.c b/libavcodec/motionpixels.c index 07febd3c22..b08a2f624b 100644 --- a/libavcodec/motionpixels.c @@ -21903,42 +22116,8 @@ index 07febd3c22..b08a2f624b 100644 } static void mp_set_rgb_from_yuv(MotionPixelsContext *mp, int x, int y, const YuvPixel *p) -diff --git a/libavcodec/movtextenc.c b/libavcodec/movtextenc.c -index b36354b14e..cf30adbd0a 100644 ---- a/libavcodec/movtextenc.c -+++ b/libavcodec/movtextenc.c -@@ -85,7 +85,7 @@ typedef struct { - uint8_t box_flags; - StyleBox d; - uint16_t text_pos; -- unsigned byte_count; -+ uint16_t byte_count; - char **fonts; - int font_count; - double font_scale_factor; -@@ -585,9 +585,9 @@ static void mov_text_cancel_overrides_cb(void *priv, const char *style_name) - mov_text_ass_style_set(s, style); - } - --static unsigned utf8_strlen(const char *text, int len) -+static uint16_t utf8_strlen(const char *text, int len) - { -- unsigned i = 0, ret = 0; -+ uint16_t i = 0, ret = 0; - while (i < len) { - char c = text[i]; - if ((c & 0x80) == 0) -@@ -607,7 +607,7 @@ static unsigned utf8_strlen(const char *text, int len) - - static void mov_text_text_cb(void *priv, const char *text, int len) - { -- unsigned utf8_len = utf8_strlen(text, len); -+ uint16_t utf8_len = utf8_strlen(text, len); - MovTextContext *s = priv; - av_bprint_append_data(&s->buffer, text, len); - // If it's not utf-8, just use the byte length diff --git a/libavcodec/mpeg12dec.c b/libavcodec/mpeg12dec.c -index e8f99dc5cf..09bf01247d 100644 +index b6a2b44bc6..09bf01247d 100644 --- a/libavcodec/mpeg12dec.c +++ b/libavcodec/mpeg12dec.c @@ -2999,10 +2999,6 @@ static int ipu_decode_frame(AVCodecContext *avctx, void *data, @@ -21946,72 +22125,296 @@ index e8f99dc5cf..09bf01247d 100644 int ret; - // Check for minimal intra MB size (considering mb header, luma & chroma dc VLC, ac EOB VLC) -- if (avpkt->size*8LL < (avctx->width+15)/16 * ((avctx->height+15)/16) * (2 + 3*4 + 2*2 + 2*6)) +- if (avpkt->size*8LL < (avctx->width+15)/16 * ((avctx->height+15)/16) * (2LL + 3*4 + 2*2 + 2*6)) - return AVERROR_INVALIDDATA; - ret = ff_get_buffer(avctx, frame, 0); if (ret < 0) return ret; -diff --git a/libavcodec/mpegaudiodec_template.c b/libavcodec/mpegaudiodec_template.c -index 642fa5ac79..4fd9e3a690 100644 ---- a/libavcodec/mpegaudiodec_template.c -+++ b/libavcodec/mpegaudiodec_template.c -@@ -372,7 +372,7 @@ static int handle_crc(MPADecodeContext *s, int sec_len) - crc_val = av_crc(crc_tab, crc_val, &buf[6], sec_byte_len); - - AV_WB32(tmp_buf, -- ((buf[6 + sec_byte_len] & (0xFF00U >> sec_rem_bits)) << 24) + -+ ((buf[6 + sec_byte_len] & (0xFF00 >> sec_rem_bits)) << 24) + - ((s->crc << 16) >> sec_rem_bits)); - - crc_val = av_crc(crc_tab, crc_val, tmp_buf, 3); -diff --git a/libavcodec/mss4.c b/libavcodec/mss4.c -index 4ad653c443..7f11f30dc8 100644 ---- a/libavcodec/mss4.c -+++ b/libavcodec/mss4.c -@@ -26,7 +26,6 @@ - */ - - #include "libavutil/thread.h" --#include "libavutil/imgutils.h" - - #include "avcodec.h" - #include "bytestream.h" -@@ -477,9 +476,6 @@ static int mss4_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, - width, height); - return AVERROR_INVALIDDATA; +diff --git a/libavcodec/mpeg4videodec.c b/libavcodec/mpeg4videodec.c +index f7f08d5eca..2c440a5026 100644 +--- a/libavcodec/mpeg4videodec.c ++++ b/libavcodec/mpeg4videodec.c +@@ -349,8 +349,6 @@ static int mpeg4_decode_sprite_trajectory(Mpeg4DecContext *ctx, GetBitContext *g + ctx->sprite_shift[0] = alpha + beta + rho - min_ab; + ctx->sprite_shift[1] = alpha + beta + rho - min_ab + 2; + break; +- default: +- av_assert0(0); } -- if (av_image_check_size2(width, height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx) < 0) -- return AVERROR_INVALIDDATA; + /* try to simplify the situation */ + if (sprite_delta[0][0] == a << ctx->sprite_shift[0] && +@@ -616,7 +614,7 @@ static inline int get_amv(Mpeg4DecContext *ctx, int n) + for (y = 0; y < 16; y++) { + int v; + +- v = mb_v + (unsigned)dy * y; ++ v = mb_v + dy * y; + // FIXME optimize + for (x = 0; x < 16; x++) { + sum += v >> shift; +@@ -1189,7 +1187,7 @@ static inline int mpeg4_decode_block(Mpeg4DecContext *ctx, int16_t *block, + if (SHOW_UBITS(re, &s->gb, 1) == 0) { + av_log(s->avctx, AV_LOG_ERROR, + "1. marker bit missing in 3. esc\n"); +- if (!(s->avctx->err_recognition & AV_EF_IGNORE_ERR) || get_bits_left(&s->gb) <= 0) ++ if (!(s->avctx->err_recognition & AV_EF_IGNORE_ERR)) + return AVERROR_INVALIDDATA; + } + SKIP_CACHE(re, &s->gb, 1); +@@ -1200,7 +1198,7 @@ static inline int mpeg4_decode_block(Mpeg4DecContext *ctx, int16_t *block, + if (SHOW_UBITS(re, &s->gb, 1) == 0) { + av_log(s->avctx, AV_LOG_ERROR, + "2. marker bit missing in 3. esc\n"); +- if (!(s->avctx->err_recognition & AV_EF_IGNORE_ERR) || get_bits_left(&s->gb) <= 0) ++ if (!(s->avctx->err_recognition & AV_EF_IGNORE_ERR)) + return AVERROR_INVALIDDATA; + } + +diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c +index 977a1c1f7a..cee3fdb36b 100644 +--- a/libavcodec/mpegvideo_enc.c ++++ b/libavcodec/mpegvideo_enc.c +@@ -1259,12 +1259,12 @@ static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg) + &v_chroma_shift); + + for (i = 0; i < 3; i++) { +- ptrdiff_t src_stride = pic_arg->linesize[i]; +- ptrdiff_t dst_stride = i ? s->uvlinesize : s->linesize; ++ int src_stride = pic_arg->linesize[i]; ++ int dst_stride = i ? s->uvlinesize : s->linesize; + int h_shift = i ? h_chroma_shift : 0; + int v_shift = i ? v_chroma_shift : 0; +- int w = AV_CEIL_RSHIFT(s->width , h_shift); +- int h = AV_CEIL_RSHIFT(s->height, v_shift); ++ int w = s->width >> h_shift; ++ int h = s->height >> v_shift; + uint8_t *src = pic_arg->data[i]; + uint8_t *dst = pic->f->data[i]; + int vpad = 16; +@@ -1278,7 +1278,7 @@ static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg) + dst += INPLACE_OFFSET; + + if (src_stride == dst_stride) +- memcpy(dst, src, src_stride * h - src_stride + w); ++ memcpy(dst, src, src_stride * h); + else { + int h2 = h; + uint8_t *dst2 = dst; +@@ -1321,8 +1321,6 @@ static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg) + /* shift buffer entries */ + for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++) + s->input_picture[i - flush_offset] = s->input_picture[i]; +- for (int i = MAX_B_FRAMES + 1 - flush_offset; i <= MAX_B_FRAMES; i++) +- s->input_picture[i] = NULL; + + s->input_picture[encoding_delay] = (Picture*) pic; + +@@ -1506,7 +1504,7 @@ static int estimate_best_b_count(MpegEncContext *s) + goto fail; + } + +- rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3); ++ rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3); + } + + /* get the delayed frames */ +@@ -1515,7 +1513,7 @@ static int estimate_best_b_count(MpegEncContext *s) + ret = out_size; + goto fail; + } +- rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3); ++ rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3); + + rd += c->error[0] + c->error[1] + c->error[2]; + +diff --git a/libavcodec/mscc.c b/libavcodec/mscc.c +index a2768dd7a1..fe02649623 100644 +--- a/libavcodec/mscc.c ++++ b/libavcodec/mscc.c +@@ -52,9 +52,6 @@ static int rle_uncompress(AVCodecContext *avctx, GetByteContext *gb, PutByteCont + unsigned run = bytestream2_get_byte(gb); + + if (run) { +- if (bytestream2_get_bytes_left_p(pb) < run * s->bpp) +- return AVERROR_INVALIDDATA; - - if (quality < 1 || quality > 100) { - av_log(avctx, AV_LOG_ERROR, "Invalid quality setting %d\n", quality); - return AVERROR_INVALIDDATA; + switch (avctx->bits_per_coded_sample) { + case 8: + fill = bytestream2_get_byte(gb); +@@ -103,9 +100,6 @@ static int rle_uncompress(AVCodecContext *avctx, GetByteContext *gb, PutByteCont + + bytestream2_seek_p(pb, y * avctx->width * s->bpp + x * s->bpp, SEEK_SET); + } else { +- if (bytestream2_get_bytes_left_p(pb) < copy * s->bpp) +- return AVERROR_INVALIDDATA; +- + for (j = 0; j < copy; j++) { + switch (avctx->bits_per_coded_sample) { + case 8: +diff --git a/libavcodec/mwsc.c b/libavcodec/mwsc.c +index 94554ebd4a..4db7642e85 100644 +--- a/libavcodec/mwsc.c ++++ b/libavcodec/mwsc.c +@@ -50,10 +50,6 @@ static int rle_uncompress(GetByteContext *gb, PutByteContext *pb, GetByteContext + + if (run == 0) { + run = bytestream2_get_le32(gb); +- +- if (bytestream2_tell_p(pb) + width - w < run) +- return AVERROR_INVALIDDATA; +- + for (int j = 0; j < run; j++, w++) { + if (w == width) { + w = 0; +@@ -65,10 +61,6 @@ static int rle_uncompress(GetByteContext *gb, PutByteContext *pb, GetByteContext + int pos = bytestream2_tell_p(pb); + + bytestream2_seek(gbp, pos, SEEK_SET); +- +- if (pos + width - w < fill) +- return AVERROR_INVALIDDATA; +- + for (int j = 0; j < fill; j++, w++) { + if (w == width) { + w = 0; +@@ -80,9 +72,6 @@ static int rle_uncompress(GetByteContext *gb, PutByteContext *pb, GetByteContext + + intra = 0; + } else { +- if (bytestream2_tell_p(pb) + width - w < run) +- return AVERROR_INVALIDDATA; +- + for (int j = 0; j < run; j++, w++) { + if (w == width) { + w = 0; +diff --git a/libavcodec/noise_bsf.c b/libavcodec/noise_bsf.c +index 2a60b29023..6ebd369633 100644 +--- a/libavcodec/noise_bsf.c ++++ b/libavcodec/noise_bsf.c +@@ -42,11 +42,6 @@ static int noise(AVBSFContext *ctx, AVPacket *pkt) + if (amount <= 0) + return AVERROR(EINVAL); + +- if (ctx->par_in->codec_id == AV_CODEC_ID_WRAPPED_AVFRAME) { +- av_log(ctx, AV_LOG_ERROR, "Wrapped AVFrame noising is unsupported\n"); +- return AVERROR_PATCHWELCOME; +- } +- + ret = ff_bsf_get_packet_ref(ctx, pkt); + if (ret < 0) + return ret; +diff --git a/libavcodec/notchlc.c b/libavcodec/notchlc.c +index 33a64a2d6d..9a53cad9cb 100644 +--- a/libavcodec/notchlc.c ++++ b/libavcodec/notchlc.c +@@ -242,9 +242,7 @@ static int decode_blocks(AVCodecContext *avctx, AVFrame *p, ThreadFrame *frame, + + bytestream2_seek(&dgb, s->y_data_offset + row_offset, SEEK_SET); + +- ret = init_get_bits8(&bit, dgb.buffer, bytestream2_get_bytes_left(&dgb)); +- if (ret < 0) +- return ret; ++ init_get_bits8(&bit, dgb.buffer, bytestream2_get_bytes_left(&dgb)); + for (int x = 0; x < avctx->width; x += 4) { + unsigned item = bytestream2_get_le32(gb); + unsigned y_min = item & 4095; +diff --git a/libavcodec/nvdec.c b/libavcodec/nvdec.c +index caf9715707..d6b6608866 100644 +--- a/libavcodec/nvdec.c ++++ b/libavcodec/nvdec.c +@@ -263,8 +263,8 @@ int ff_nvdec_decode_uninit(AVCodecContext *avctx) + { + NVDECContext *ctx = avctx->internal->hwaccel_priv_data; + ++ av_freep(&ctx->bitstream); + av_freep(&ctx->bitstream_internal); +- ctx->bitstream = NULL; + ctx->bitstream_len = 0; + ctx->bitstream_allocated = 0; + +@@ -667,8 +667,6 @@ int ff_nvdec_simple_end_frame(AVCodecContext *avctx) + NVDECContext *ctx = avctx->internal->hwaccel_priv_data; + int ret = ff_nvdec_end_frame(avctx); + ctx->bitstream = NULL; +- ctx->bitstream_len = 0; +- ctx->nb_slices = 0; + return ret; + } + +diff --git a/libavcodec/nvdec_h264.c b/libavcodec/nvdec_h264.c +index a9ccd6d53b..116bd4fb5d 100644 +--- a/libavcodec/nvdec_h264.c ++++ b/libavcodec/nvdec_h264.c +@@ -137,11 +137,11 @@ static int nvdec_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, + const H264SliceContext *sl = &h->slice_ctx[0]; + void *tmp; + +- tmp = av_fast_realloc(ctx->bitstream_internal, &ctx->bitstream_allocated, ++ tmp = av_fast_realloc(ctx->bitstream, &ctx->bitstream_allocated, + ctx->bitstream_len + size + 3); + if (!tmp) + return AVERROR(ENOMEM); +- ctx->bitstream = ctx->bitstream_internal = tmp; ++ ctx->bitstream = tmp; + + tmp = av_fast_realloc(ctx->slice_offsets, &ctx->slice_offsets_allocated, + (ctx->nb_slices + 1) * sizeof(*ctx->slice_offsets)); +diff --git a/libavcodec/nvdec_hevc.c b/libavcodec/nvdec_hevc.c +index 1f2b5ae9d0..590278ba04 100644 +--- a/libavcodec/nvdec_hevc.c ++++ b/libavcodec/nvdec_hevc.c +@@ -273,11 +273,11 @@ static int nvdec_hevc_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, + NVDECContext *ctx = avctx->internal->hwaccel_priv_data; + void *tmp; + +- tmp = av_fast_realloc(ctx->bitstream_internal, &ctx->bitstream_allocated, ++ tmp = av_fast_realloc(ctx->bitstream, &ctx->bitstream_allocated, + ctx->bitstream_len + size + 3); + if (!tmp) + return AVERROR(ENOMEM); +- ctx->bitstream = ctx->bitstream_internal = tmp; ++ ctx->bitstream = tmp; + + tmp = av_fast_realloc(ctx->slice_offsets, &ctx->slice_offsets_allocated, + (ctx->nb_slices + 1) * sizeof(*ctx->slice_offsets)); +diff --git a/libavcodec/nvdec_mpeg12.c b/libavcodec/nvdec_mpeg12.c +index 746d720888..9a9030d8d3 100644 +--- a/libavcodec/nvdec_mpeg12.c ++++ b/libavcodec/nvdec_mpeg12.c +@@ -80,9 +80,8 @@ static int nvdec_mpeg12_start_frame(AVCodecContext *avctx, const uint8_t *buffer + }; + + for (i = 0; i < 64; ++i) { +- int n = s->idsp.idct_permutation[i]; +- ppc->QuantMatrixIntra[i] = s->intra_matrix[n]; +- ppc->QuantMatrixInter[i] = s->inter_matrix[n]; ++ ppc->QuantMatrixIntra[i] = s->intra_matrix[i]; ++ ppc->QuantMatrixInter[i] = s->inter_matrix[i]; + } + + return 0; +diff --git a/libavcodec/nvdec_mpeg4.c b/libavcodec/nvdec_mpeg4.c +index 5404e4e330..739b049933 100644 +--- a/libavcodec/nvdec_mpeg4.c ++++ b/libavcodec/nvdec_mpeg4.c +@@ -86,9 +86,8 @@ static int nvdec_mpeg4_start_frame(AVCodecContext *avctx, const uint8_t *buffer, + }; + + for (i = 0; i < 64; ++i) { +- int n = s->idsp.idct_permutation[i]; +- ppc->QuantMatrixIntra[i] = s->intra_matrix[n]; +- ppc->QuantMatrixInter[i] = s->inter_matrix[n]; ++ ppc->QuantMatrixIntra[i] = s->intra_matrix[i]; ++ ppc->QuantMatrixInter[i] = s->inter_matrix[i]; + } + + // We need to pass the full frame buffer and not just the slice diff --git a/libavcodec/nvenc.c b/libavcodec/nvenc.c -index c6498864c8..b09ddbe0fa 100644 +index c6498864c8..0ce61041ef 100644 --- a/libavcodec/nvenc.c +++ b/libavcodec/nvenc.c -@@ -1025,9 +1025,8 @@ static av_cold void nvenc_setup_rate_control(AVCodecContext *avctx) - - av_log(avctx, AV_LOG_VERBOSE, "CQ(%d) mode enabled.\n", tmp_quality); - -- // CQ mode shall discard avg bitrate/vbv buffer size and honor only max bitrate -+ //CQ mode shall discard avg bitrate & honor max bitrate; - ctx->encode_config.rcParams.averageBitRate = avctx->bit_rate = 0; -- ctx->encode_config.rcParams.vbvBufferSize = avctx->rc_buffer_size = 0; - ctx->encode_config.rcParams.maxBitRate = avctx->rc_max_rate; - } - } -@@ -1761,7 +1760,7 @@ static int nvenc_register_frame(AVCodecContext *avctx, const AVFrame *frame) - NV_ENCODE_API_FUNCTION_LIST *p_nvenc = &dl_fn->nvenc_funcs; - - AVHWFramesContext *frames_ctx = (AVHWFramesContext*)frame->hw_frames_ctx->data; -- NV_ENC_REGISTER_RESOURCE reg = { 0 }; -+ NV_ENC_REGISTER_RESOURCE reg; - int i, idx, ret; - - for (i = 0; i < ctx->nb_registered_frames; i++) { -@@ -1926,7 +1925,7 @@ static int nvenc_set_timestamp(AVCodecContext *avctx, +@@ -1926,7 +1926,7 @@ static int nvenc_set_timestamp(AVCodecContext *avctx, pkt->pts = params->outputTimeStamp; pkt->dts = timestamp_queue_dequeue(ctx->timestamp_list); @@ -22020,20 +22423,6 @@ index c6498864c8..b09ddbe0fa 100644 return 0; } -diff --git a/libavcodec/opus_silk.c b/libavcodec/opus_silk.c -index 8523b55ada..913053c5e2 100644 ---- a/libavcodec/opus_silk.c -+++ b/libavcodec/opus_silk.c -@@ -198,8 +198,7 @@ static inline int silk_is_lpc_stable(const int16_t lpc[16], int order) - } - } - --static void silk_lsp2poly(const int32_t lsp[/* 2 * half_order - 1 */], -- int32_t pol[/* half_order + 1 */], int half_order) -+static void silk_lsp2poly(const int32_t lsp[16], int32_t pol[16], int half_order) - { - int i, j; - diff --git a/libavcodec/pictordec.c b/libavcodec/pictordec.c index 4d81c311c3..6340902526 100644 --- a/libavcodec/pictordec.c @@ -22048,29 +22437,22 @@ index 4d81c311c3..6340902526 100644 if (bits_per_plane == 8) { picmemset_8bpp(s, frame, val, run, &x, &y); diff --git a/libavcodec/pixlet.c b/libavcodec/pixlet.c -index febee5c31d..ad9d830af7 100644 +index 488672f921..febee5c31d 100644 --- a/libavcodec/pixlet.c +++ b/libavcodec/pixlet.c -@@ -405,7 +405,7 @@ static void filterfn(int16_t *dest, int16_t *tmp, unsigned size, int64_t scale) - (int64_t) low [i - 1] * -INT64_C(325392907) + - (int64_t) high[i + 0] * INT64_C(1518500249) + - (int64_t) high[i - 1] * INT64_C(1518500249); -- dest[i * 2] = av_clip_int16(((value >> 32) * (uint64_t)scale) >> 32); -+ dest[i * 2] = av_clip_int16(((value >> 32) * scale) >> 32); - } - - for (i = 0; i < hsize; i++) { -@@ -416,7 +416,7 @@ static void filterfn(int16_t *dest, int16_t *tmp, unsigned size, int64_t scale) - (int64_t) high[i + 1] * INT64_C(303700064) + - (int64_t) high[i + 0] * -INT64_C(3644400640) + - (int64_t) high[i - 1] * INT64_C(303700064); -- dest[i * 2 + 1] = av_clip_int16(((value >> 32) * (uint64_t)scale) >> 32); -+ dest[i * 2 + 1] = av_clip_int16(((value >> 32) * scale) >> 32); - } - } - +@@ -231,8 +231,8 @@ static int read_high_coeffs(AVCodecContext *avctx, uint8_t *src, int16_t *dst, + if (cnt1 >= length) { + cnt1 = get_bits(bc, nbits); + } else { +- pfx = FFMIN(value, 14); +- if (pfx < 1) ++ pfx = 14 + ((((uint64_t)(value - 14)) >> 32) & (value - 14)); ++ if (pfx < 1 || pfx > 25) + return AVERROR_INVALIDDATA; + cnt1 *= (1 << pfx) - 1; + shbits = show_bits(bc, pfx); diff --git a/libavcodec/pngdec.c b/libavcodec/pngdec.c -index 6301080832..6aa3c1b436 100644 +index 9e9f5c2219..6aa3c1b436 100644 --- a/libavcodec/pngdec.c +++ b/libavcodec/pngdec.c @@ -322,7 +322,7 @@ void ff_png_filter_row(PNGDSPContext *dsp, uint8_t *dst, int filter_type, @@ -22082,24 +22464,115 @@ index 6301080832..6aa3c1b436 100644 int g = dst [i + 1]; \ dst[i + 0] += g; \ dst[i + 2] += g; \ +@@ -643,8 +643,6 @@ static int decode_idat_chunk(AVCodecContext *avctx, PNGDecContext *s, + int ret; + size_t byte_depth = s->bit_depth > 8 ? 2 : 1; + +- if (!p) +- return AVERROR_INVALIDDATA; + if (!(s->hdr_state & PNG_IHDR)) { + av_log(avctx, AV_LOG_ERROR, "IDAT without IHDR\n"); + return AVERROR_INVALIDDATA; +@@ -861,7 +859,7 @@ static int decode_trns_chunk(AVCodecContext *avctx, PNGDecContext *s, + return 0; + } + +-static int decode_iccp_chunk(PNGDecContext *s, int length) ++static int decode_iccp_chunk(PNGDecContext *s, int length, AVFrame *f) + { + int ret, cnt = 0; + AVBPrint bp; +@@ -1024,7 +1022,7 @@ static int decode_fctl_chunk(AVCodecContext *avctx, PNGDecContext *s, + return AVERROR_INVALIDDATA; + } + +- if ((sequence_number == 0 || !s->last_picture.f) && ++ if ((sequence_number == 0 || !s->last_picture.f->data[0]) && + dispose_op == APNG_DISPOSE_OP_PREVIOUS) { + // No previous frame to revert to for the first frame + // Spec says to just treat it as a APNG_DISPOSE_OP_BACKGROUND +@@ -1324,7 +1322,7 @@ static int decode_frame_common(AVCodecContext *avctx, PNGDecContext *s, + break; + } + case MKTAG('i', 'C', 'C', 'P'): { +- if ((ret = decode_iccp_chunk(s, length)) < 0) ++ if ((ret = decode_iccp_chunk(s, length, p)) < 0) + goto fail; + break; + } +@@ -1377,9 +1375,6 @@ skip_tag: + } + exit_loop: + +- if (!p) +- return AVERROR_INVALIDDATA; +- + if (avctx->codec_id == AV_CODEC_ID_PNG && + avctx->skip_frame == AVDISCARD_ALL) { + return 0; +@@ -1619,7 +1614,7 @@ static int decode_frame_apng(AVCodecContext *avctx, + s->zstream.zfree = ff_png_zfree; + + bytestream2_init(&s->gb, avctx->extradata, avctx->extradata_size); +- if ((ret = decode_frame_common(avctx, s, NULL, avpkt)) < 0) ++ if ((ret = decode_frame_common(avctx, s, p, avpkt)) < 0) + goto end; + } + +diff --git a/libavcodec/pnmdec.c b/libavcodec/pnmdec.c +index 21e5247b05..4d5ce0bcb5 100644 +--- a/libavcodec/pnmdec.c ++++ b/libavcodec/pnmdec.c +@@ -256,7 +256,7 @@ static int pnm_decode_frame(AVCodecContext *avctx, void *data, + } + break; + case AV_PIX_FMT_GBRPF32: +- if (avctx->width * avctx->height * 12LL > s->bytestream_end - s->bytestream) ++ if (avctx->width * avctx->height * 12 > s->bytestream_end - s->bytestream) + return AVERROR_INVALIDDATA; + scale = 1.f / s->scale; + if (s->endian) { +diff --git a/libavcodec/proresdec2.c b/libavcodec/proresdec2.c +index 3c6cd85267..5a01c89c48 100644 +--- a/libavcodec/proresdec2.c ++++ b/libavcodec/proresdec2.c +@@ -490,7 +490,7 @@ static av_always_inline int decode_ac_coeffs(AVCodecContext *avctx, GetBitContex + + for (pos = block_mask;;) { + bits_left = gb->size_in_bits - re_index; +- if (bits_left <= 0 || (bits_left < 32 && !SHOW_UBITS(re, gb, bits_left))) ++ if (!bits_left || (bits_left < 32 && !SHOW_UBITS(re, gb, bits_left))) + break; + + DECODE_CODEWORD(run, run_to_cb[FFMIN(run, 15)], LAST_SKIP_BITS); +diff --git a/libavcodec/proresenc_kostya.c b/libavcodec/proresenc_kostya.c +index b00f5f5c5f..0e70163bcc 100644 +--- a/libavcodec/proresenc_kostya.c ++++ b/libavcodec/proresenc_kostya.c +@@ -3,6 +3,9 @@ + * + * Copyright (c) 2012 Konstantin Shishkov + * ++ * This encoder appears to be based on Anatoliy Wassermans considering ++ * similarities in the bugs. ++ * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or +@@ -340,7 +343,7 @@ static void get_slice_data(ProresContext *ctx, const uint16_t *src, + + static void get_alpha_data(ProresContext *ctx, const uint16_t *src, + ptrdiff_t linesize, int x, int y, int w, int h, +- uint16_t *blocks, int mbs_per_slice, int abits) ++ int16_t *blocks, int mbs_per_slice, int abits) + { + const int slice_width = 16 * mbs_per_slice; + int i, j, copy_w, copy_h; diff --git a/libavcodec/pthread_frame.c b/libavcodec/pthread_frame.c -index 6f48d2c208..0b0ff03c18 100644 +index 6f48d2c208..c4707d23d9 100644 --- a/libavcodec/pthread_frame.c +++ b/libavcodec/pthread_frame.c -@@ -145,12 +145,6 @@ typedef struct FrameThreadContext { - * Set for the first N packets, where N is the number of threads. - * While it is set, ff_thread_en/decode_frame won't return any results. - */ -- -- /* hwaccel state is temporarily stored here in order to transfer its ownership -- * to the next decoding thread without the need for extra synchronization */ -- const AVHWAccel *stash_hwaccel; -- void *stash_hwaccel_context; -- void *stash_hwaccel_priv; - } FrameThreadContext; - - #if FF_API_THREAD_SAFE_CALLBACKS -@@ -215,7 +209,8 @@ FF_ENABLE_DEPRECATION_WARNINGS +@@ -215,7 +215,8 @@ FF_ENABLE_DEPRECATION_WARNINGS /* if the previous thread uses hwaccel then we take the lock to ensure * the threads don't run concurrently */ @@ -22109,53 +22582,47 @@ index 6f48d2c208..0b0ff03c18 100644 pthread_mutex_lock(&p->parent->hwaccel_mutex); p->hwaccel_serializing = 1; } -@@ -235,17 +230,9 @@ FF_ENABLE_DEPRECATION_WARNINGS - ff_thread_finish_setup(avctx); - - if (p->hwaccel_serializing) { -- /* wipe hwaccel state to avoid stale pointers lying around; -- * the state was transferred to FrameThreadContext in -- * ff_thread_finish_setup(), so nothing is leaked */ -- avctx->hwaccel = NULL; -- avctx->hwaccel_context = NULL; -- avctx->internal->hwaccel_priv_data = NULL; -- +@@ -245,7 +246,7 @@ FF_ENABLE_DEPRECATION_WARNINGS p->hwaccel_serializing = 0; pthread_mutex_unlock(&p->parent->hwaccel_mutex); } - av_assert0(!avctx->hwaccel); ++ av_assert0(!avctx->hwaccel || (avctx->hwaccel->caps_internal & HWACCEL_CAP_MT_SAFE)); if (p->async_serializing) { p->async_serializing = 0; -@@ -307,10 +294,14 @@ static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src, - dst->color_range = src->color_range; - dst->chroma_sample_location = src->chroma_sample_location; - -+ dst->hwaccel = src->hwaccel; -+ dst->hwaccel_context = src->hwaccel_context; -+ - dst->channels = src->channels; - dst->sample_rate = src->sample_rate; - dst->sample_fmt = src->sample_fmt; - dst->channel_layout = src->channel_layout; -+ dst->internal->hwaccel_priv_data = src->internal->hwaccel_priv_data; - - if (!!dst->hw_frames_ctx != !!src->hw_frames_ctx || - (dst->hw_frames_ctx && dst->hw_frames_ctx->data != src->hw_frames_ctx->data)) { -@@ -456,12 +447,6 @@ static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx, +@@ -324,6 +325,12 @@ static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src, } + + dst->hwaccel_flags = src->hwaccel_flags; ++ if (src->hwaccel && ++ (src->hwaccel->caps_internal & HWACCEL_CAP_MT_SAFE)) { ++ dst->hwaccel = src->hwaccel; ++ dst->hwaccel_context = src->hwaccel_context; ++ dst->internal->hwaccel_priv_data = src->internal->hwaccel_priv_data; ++ } + + err = av_buffer_replace(&dst->internal->pool, src->internal->pool); + if (err < 0) +@@ -457,10 +464,13 @@ static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx, } -- /* transfer the stashed hwaccel state, if any */ + /* transfer the stashed hwaccel state, if any */ - av_assert0(!p->avctx->hwaccel); - FFSWAP(const AVHWAccel*, p->avctx->hwaccel, fctx->stash_hwaccel); - FFSWAP(void*, p->avctx->hwaccel_context, fctx->stash_hwaccel_context); - FFSWAP(void*, p->avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv); -- ++ av_assert0(!p->avctx->hwaccel || (p->avctx->hwaccel->caps_internal & HWACCEL_CAP_MT_SAFE)); ++ if (p->avctx->hwaccel && ++ !(p->avctx->hwaccel->caps_internal & HWACCEL_CAP_MT_SAFE)) { ++ FFSWAP(const AVHWAccel*, p->avctx->hwaccel, fctx->stash_hwaccel); ++ FFSWAP(void*, p->avctx->hwaccel_context, fctx->stash_hwaccel_context); ++ FFSWAP(void*, p->avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv); ++ } + av_packet_unref(p->avpkt); ret = av_packet_ref(p->avpkt, avpkt); - if (ret < 0) { -@@ -652,7 +637,9 @@ void ff_thread_finish_setup(AVCodecContext *avctx) { +@@ -652,7 +662,9 @@ void ff_thread_finish_setup(AVCodecContext *avctx) { if (!(avctx->active_thread_type&FF_THREAD_FRAME)) return; @@ -22166,132 +22633,102 @@ index 6f48d2c208..0b0ff03c18 100644 pthread_mutex_lock(&p->parent->hwaccel_mutex); p->hwaccel_serializing = 1; } -@@ -665,14 +652,6 @@ void ff_thread_finish_setup(AVCodecContext *avctx) { - async_lock(p->parent); - } - -- /* save hwaccel state for passing to the next thread; -- * this is done here so that this worker thread can wipe its own hwaccel -- * state after decoding, without requiring synchronization */ -- av_assert0(!p->parent->stash_hwaccel); +@@ -669,9 +681,12 @@ void ff_thread_finish_setup(AVCodecContext *avctx) { + * this is done here so that this worker thread can wipe its own hwaccel + * state after decoding, without requiring synchronization */ + av_assert0(!p->parent->stash_hwaccel); - p->parent->stash_hwaccel = avctx->hwaccel; - p->parent->stash_hwaccel_context = avctx->hwaccel_context; - p->parent->stash_hwaccel_priv = avctx->internal->hwaccel_priv_data; -- ++ if (avctx->hwaccel && ++ !(avctx->hwaccel->caps_internal & HWACCEL_CAP_MT_SAFE)) { ++ p->parent->stash_hwaccel = avctx->hwaccel; ++ p->parent->stash_hwaccel_context = avctx->hwaccel_context; ++ p->parent->stash_hwaccel_priv = avctx->internal->hwaccel_priv_data; ++ } + pthread_mutex_lock(&p->progress_mutex); if(atomic_load(&p->state) == STATE_SETUP_FINISHED){ - av_log(avctx, AV_LOG_WARNING, "Multiple ff_thread_finish_setup() calls\n"); -@@ -767,6 +746,13 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count) +@@ -767,12 +782,23 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count) park_frame_worker_threads(fctx, thread_count); -+ if (fctx->prev_thread && avctx->internal->hwaccel_priv_data != -+ fctx->prev_thread->avctx->internal->hwaccel_priv_data) { -+ if (update_context_from_thread(avctx, fctx->prev_thread->avctx, 1) < 0) { -+ av_log(avctx, AV_LOG_ERROR, "Failed to update user thread.\n"); -+ } +- if (fctx->prev_thread && fctx->prev_thread != fctx->threads) +- if (update_context_from_thread(fctx->threads->avctx, fctx->prev_thread->avctx, 0) < 0) { +- av_log(avctx, AV_LOG_ERROR, "Final thread update failed\n"); +- fctx->prev_thread->avctx->internal->is_copy = fctx->threads->avctx->internal->is_copy; +- fctx->threads->avctx->internal->is_copy = 1; ++ if (avctx->hwaccel && (avctx->hwaccel->caps_internal & HWACCEL_CAP_MT_SAFE)) { ++ if (fctx->prev_thread && ++ avctx->internal->hwaccel_priv_data != ++ fctx->prev_thread->avctx->internal->hwaccel_priv_data) { ++ if (update_context_from_thread(avctx, fctx->prev_thread->avctx, 1) < 0) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to update user thread.\n"); ++ } + } ++ } ++ else { ++ if (fctx->prev_thread && fctx->prev_thread != fctx->threads) ++ if (update_context_from_thread(fctx->threads->avctx, fctx->prev_thread->avctx, 0) < 0) { ++ av_log(avctx, AV_LOG_ERROR, "Final thread update failed\n"); ++ fctx->prev_thread->avctx->internal->is_copy = fctx->threads->avctx->internal->is_copy; ++ fctx->threads->avctx->internal->is_copy = 1; ++ } + } -+ - if (fctx->prev_thread && fctx->prev_thread != fctx->threads) - if (update_context_from_thread(fctx->threads->avctx, fctx->prev_thread->avctx, 0) < 0) { - av_log(avctx, AV_LOG_ERROR, "Final thread update failed\n"); -@@ -820,13 +806,6 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count) - av_freep(&fctx->threads); - free_pthread(fctx, thread_ctx_offsets); -- /* if we have stashed hwaccel state, move it to the user-facing context, -- * so it will be freed in avcodec_close() */ + for (i = 0; i < thread_count; i++) { + PerThreadContext *p = &fctx->threads[i]; +@@ -822,10 +848,13 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count) + + /* if we have stashed hwaccel state, move it to the user-facing context, + * so it will be freed in avcodec_close() */ - av_assert0(!avctx->hwaccel); - FFSWAP(const AVHWAccel*, avctx->hwaccel, fctx->stash_hwaccel); - FFSWAP(void*, avctx->hwaccel_context, fctx->stash_hwaccel_context); - FFSWAP(void*, avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv); -- ++ av_assert0(!avctx->hwaccel || (avctx->hwaccel->caps_internal & HWACCEL_CAP_MT_SAFE)); ++ if (avctx->hwaccel && ++ !(avctx->hwaccel->caps_internal & HWACCEL_CAP_MT_SAFE)) { ++ FFSWAP(const AVHWAccel*, avctx->hwaccel, fctx->stash_hwaccel); ++ FFSWAP(void*, avctx->hwaccel_context, fctx->stash_hwaccel_context); ++ FFSWAP(void*, avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv); ++ } + av_freep(&avctx->internal->thread_ctx); - if (avctx->priv_data && avctx->codec && avctx->codec->priv_class) -diff --git a/libavcodec/qdrw.c b/libavcodec/qdrw.c -index c04c756d71..65279c9805 100644 ---- a/libavcodec/qdrw.c -+++ b/libavcodec/qdrw.c -@@ -369,7 +369,7 @@ static int decode_frame(AVCodecContext *avctx, - bytestream2_skip(&gbc, 18); - colors = bytestream2_get_be16(&gbc); +diff --git a/libavcodec/qsvdec.c b/libavcodec/qsvdec.c +index ccc3de1e10..5f2e641373 100644 +--- a/libavcodec/qsvdec.c ++++ b/libavcodec/qsvdec.c +@@ -290,12 +290,9 @@ static int qsv_decode_init_context(AVCodecContext *avctx, QSVContext *q, mfxVide -- if (colors < 0 || colors > 255) { -+ if (colors < 0 || colors > 256) { - av_log(avctx, AV_LOG_ERROR, - "Error color count - %i(0x%X)\n", colors, colors); - return AVERROR_INVALIDDATA; -diff --git a/libavcodec/qpeldsp.c b/libavcodec/qpeldsp.c -index d99b8fd0ba..6e52b33657 100644 ---- a/libavcodec/qpeldsp.c -+++ b/libavcodec/qpeldsp.c -@@ -198,7 +198,7 @@ static void OPNAME ## qpel8_mc01_c(uint8_t *dst, const uint8_t *src, \ - uint8_t full[16 * 9]; \ - uint8_t half[64]; \ - \ -- copy_block8(full, src, 16, stride, 9); \ -+ copy_block9(full, src, 16, stride, 9); \ - put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16); \ - OPNAME ## pixels8_l2_8(dst, full, half, stride, 16, 8, 8); \ - } \ -@@ -208,7 +208,7 @@ static void OPNAME ## qpel8_mc02_c(uint8_t *dst, const uint8_t *src, \ - { \ - uint8_t full[16 * 9]; \ - \ -- copy_block8(full, src, 16, stride, 9); \ -+ copy_block9(full, src, 16, stride, 9); \ - OPNAME ## mpeg4_qpel8_v_lowpass(dst, full, stride, 16); \ - } \ - \ -@@ -218,7 +218,7 @@ static void OPNAME ## qpel8_mc03_c(uint8_t *dst, const uint8_t *src, \ - uint8_t full[16 * 9]; \ - uint8_t half[64]; \ - \ -- copy_block8(full, src, 16, stride, 9); \ -+ copy_block9(full, src, 16, stride, 9); \ - put ## RND ## mpeg4_qpel8_v_lowpass(half, full, 8, 16); \ - OPNAME ## pixels8_l2_8(dst, full + 16, half, stride, 16, 8, 8); \ - } \ -@@ -458,7 +458,7 @@ static void OPNAME ## qpel16_mc01_c(uint8_t *dst, const uint8_t *src, \ - uint8_t full[24 * 17]; \ - uint8_t half[256]; \ - \ -- copy_block16(full, src, 24, stride, 17); \ -+ copy_block17(full, src, 24, stride, 17); \ - put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24); \ - OPNAME ## pixels16_l2_8(dst, full, half, stride, 24, 16, 16); \ - } \ -@@ -468,7 +468,7 @@ static void OPNAME ## qpel16_mc02_c(uint8_t *dst, const uint8_t *src, \ - { \ - uint8_t full[24 * 17]; \ - \ -- copy_block16(full, src, 24, stride, 17); \ -+ copy_block17(full, src, 24, stride, 17); \ - OPNAME ## mpeg4_qpel16_v_lowpass(dst, full, stride, 24); \ - } \ - \ -@@ -478,7 +478,7 @@ static void OPNAME ## qpel16_mc03_c(uint8_t *dst, const uint8_t *src, \ - uint8_t full[24 * 17]; \ - uint8_t half[256]; \ - \ -- copy_block16(full, src, 24, stride, 17); \ -+ copy_block17(full, src, 24, stride, 17); \ - put ## RND ## mpeg4_qpel16_v_lowpass(half, full, 16, 24); \ - OPNAME ## pixels16_l2_8(dst, full + 24, half, stride, 24, 16, 16); \ - } \ -diff --git a/libavcodec/rasc.c b/libavcodec/rasc.c -index 5ed1333886..207d50c452 100644 ---- a/libavcodec/rasc.c -+++ b/libavcodec/rasc.c -@@ -722,7 +722,6 @@ static int decode_frame(AVCodecContext *avctx, - break; - default: - bytestream2_skip(gb, size); -- ret = 0; - } + q->frame_info = param->mfx.FrameInfo; - if (ret < 0) +- if (!avctx->hw_frames_ctx) { +- ret = av_image_get_buffer_size(avctx->pix_fmt, FFALIGN(avctx->width, 128), FFALIGN(avctx->height, 64), 1); +- if (ret < 0) +- return ret; +- q->pool = av_buffer_pool_init(ret, av_buffer_allocz); +- } ++ if (!avctx->hw_frames_ctx) ++ q->pool = av_buffer_pool_init(av_image_get_buffer_size(avctx->pix_fmt, ++ FFALIGN(avctx->width, 128), FFALIGN(avctx->height, 64), 1), av_buffer_allocz); + return 0; + } + +diff --git a/libavcodec/r210enc.c b/libavcodec/r210enc.c +index e5e0e775c5..be1943f5f9 100644 +--- a/libavcodec/r210enc.c ++++ b/libavcodec/r210enc.c +@@ -31,7 +31,7 @@ static av_cold int encode_init(AVCodecContext *avctx) + + avctx->bits_per_coded_sample = 32; + if (avctx->width > 0) +- avctx->bit_rate = av_rescale(ff_guess_coded_bitrate(avctx), aligned_width, avctx->width); ++ avctx->bit_rate = ff_guess_coded_bitrate(avctx) * aligned_width / avctx->width; + + return 0; + } diff --git a/libavcodec/raw.c b/libavcodec/raw.c index 079d5c5d10..0781f28615 100644 --- a/libavcodec/raw.c @@ -52634,20 +53071,18 @@ index b208753e2b..baf067c205 100644 block_counter++; } else { // FOUR COLOR BLOCK block_counter += encode_four_color_block(min_color, max_color, -diff --git a/libavcodec/sbrdsp_fixed.c b/libavcodec/sbrdsp_fixed.c -index 0d34a2a710..43fcc90ae5 100644 ---- a/libavcodec/sbrdsp_fixed.c -+++ b/libavcodec/sbrdsp_fixed.c -@@ -114,8 +114,8 @@ static void sbr_qmf_deint_neg_c(int *v, const int *src) - { - int i; - for (i = 0; i < 32; i++) { -- v[ i] = (int)(0x10U + src[63 - 2*i ]) >> 5; -- v[63 - i] = (int)(0x10U - src[63 - 2*i - 1]) >> 5; -+ v[ i] = ( src[63 - 2*i ] + 0x10) >> 5; -+ v[63 - i] = (-src[63 - 2*i - 1] + 0x10) >> 5; - } - } +diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c +index 6cd42e585b..99e580a09a 100644 +--- a/libavcodec/rv34.c ++++ b/libavcodec/rv34.c +@@ -96,8 +96,6 @@ static void rv34_gen_vlc(const uint8_t *bits, int size, VLC *vlc, const uint8_t + uint16_t cw[MAX_VLC_SIZE]; + int maxbits; + +- av_assert1(size > 0); +- + for (int i = 0; i < size; i++) + counts[bits[i]]++; diff --git a/libavcodec/scpr.c b/libavcodec/scpr.c index f78f43b5cd..2a0ebcecfc 100644 @@ -52664,10 +53099,35 @@ index f78f43b5cd..2a0ebcecfc 100644 ret = decode_value(s, s->op_model[ptype], 6, 1000, &ptype); if (ret < 0) diff --git a/libavcodec/scpr3.c b/libavcodec/scpr3.c -index 274f99ce71..78c58889cb 100644 +index 85524feafe..78c58889cb 100644 --- a/libavcodec/scpr3.c +++ b/libavcodec/scpr3.c -@@ -1168,9 +1168,6 @@ static int decompress_p3(AVCodecContext *avctx, +@@ -466,8 +466,6 @@ static int decode_adaptive6(PixelModel3 *m, uint32_t code, uint32_t *value, + return 0; + grow_dec(m); + c = add_dec(m, q, g, f); +- if (c < 0) +- return AVERROR_INVALIDDATA; + } + + incr_cntdec(m, c); +@@ -871,11 +869,11 @@ static int decode_unit3(SCPRContext *s, PixelModel3 *m, uint32_t code, uint32_t + sync_code3(gb, rc); + break; + case 6: +- ret = decode_adaptive6(m, code, value, &a, &b); +- if (!ret) ++ if (!decode_adaptive6(m, code, value, &a, &b)) { + ret = update_model6_to_7(m); +- if (ret < 0) +- return ret; ++ if (ret < 0) ++ return AVERROR_INVALIDDATA; ++ } + decode3(gb, rc, a, b); + sync_code3(gb, rc); + break; +@@ -1170,9 +1168,6 @@ static int decompress_p3(AVCodecContext *avctx, int run, bx = x * 16 + sx1, by = y * 16 + sy1; uint32_t clr, ptype = 0, r, g, b; @@ -52677,11 +53137,44 @@ index 274f99ce71..78c58889cb 100644 for (; by < y * 16 + sy2 && by < avctx->height;) { ret = decode_value3(s, 5, &s->op_model3[ptype].cntsum, s->op_model3[ptype].freqs[0], +diff --git a/libavcodec/sga.c b/libavcodec/sga.c +index 4ff700a0de..7e6bea530c 100644 +--- a/libavcodec/sga.c ++++ b/libavcodec/sga.c +@@ -72,7 +72,7 @@ static int decode_palette(GetByteContext *gb, uint32_t *pal) + return AVERROR_INVALIDDATA; + + memset(pal, 0, 16 * sizeof(*pal)); +- (void)init_get_bits8(&gbit, gb->buffer, 18); ++ init_get_bits8(&gbit, gb->buffer, 18); + + for (int RGBIndex = 0; RGBIndex < 3; RGBIndex++) { + for (int index = 0; index < 16; index++) { diff --git a/libavcodec/snowenc.c b/libavcodec/snowenc.c -index a0e8745ce3..16d2b7c302 100644 +index 2948c76cb0..16d2b7c302 100644 --- a/libavcodec/snowenc.c +++ b/libavcodec/snowenc.c -@@ -1544,10 +1544,10 @@ static void calculate_visual_weight(SnowContext *s, Plane *p){ +@@ -269,7 +269,6 @@ static int encode_q_branch(SnowContext *s, int level, int x, int y){ + int my_context= av_log2(2*FFABS(left->my - top->my)); + int s_context= 2*left->level + 2*top->level + tl->level + tr->level; + int ref, best_ref, ref_score, ref_mx, ref_my; +- int range = MAX_MV >> (1 + qpel); + + av_assert0(sizeof(s->block_state) >= 256); + if(s->keyframe){ +@@ -311,11 +310,6 @@ static int encode_q_branch(SnowContext *s, int level, int x, int y){ + c->xmax = - (x+1)*block_w + (w<<(LOG2_MB_SIZE - s->block_max_depth)) + 16-3; + c->ymax = - (y+1)*block_w + (h<<(LOG2_MB_SIZE - s->block_max_depth)) + 16-3; + +- c->xmin = FFMAX(c->xmin,-range); +- c->xmax = FFMIN(c->xmax, range); +- c->ymin = FFMAX(c->ymin,-range); +- c->ymax = FFMIN(c->ymax, range); +- + if(P_LEFT[0] > (c->xmax<xmax< (c->ymax<ymax< (c->xmax<xmax<spatial_decomposition_count; level++){ @@ -52693,7 +53186,7 @@ index a0e8745ce3..16d2b7c302 100644 memset(s->spatial_idwt_buffer, 0, sizeof(*s->spatial_idwt_buffer)*width*height); ibuf[b->width/2 + b->height/2*b->stride]= 256*16; -@@ -1558,13 +1558,9 @@ static void calculate_visual_weight(SnowContext *s, Plane *p){ +@@ -1564,13 +1558,9 @@ static void calculate_visual_weight(SnowContext *s, Plane *p){ error += d*d; } } @@ -52709,33 +53202,40 @@ index a0e8745ce3..16d2b7c302 100644 } diff --git a/libavcodec/sonic.c b/libavcodec/sonic.c -index 8662737837..c049f6aedc 100644 +index ea107d4ff6..8662737837 100644 --- a/libavcodec/sonic.c +++ b/libavcodec/sonic.c -@@ -1004,7 +1004,7 @@ static int sonic_decode_frame(AVCodecContext *avctx, +@@ -467,7 +467,7 @@ static void predictor_init_state(int *k, int *state, int order) - // dequantize - for (i = 0; i < s->num_taps; i++) -- s->predictor_k[i] *= (unsigned) s->tap_quant[i]; -+ s->predictor_k[i] *= s->tap_quant[i]; + static int predictor_calc_error(int *k, int *state, int order, int error) + { +- int i, x = error - (unsigned)shift_down(k[order-1] * (unsigned)state[order-1], LATTICE_SHIFT); ++ int i, x = error - shift_down(k[order-1] * (unsigned)state[order-1], LATTICE_SHIFT); + #if 1 + int *k_ptr = &(k[order-2]), +@@ -1009,7 +1009,7 @@ static int sonic_decode_frame(AVCodecContext *avctx, if (s->lossless) quant = 1; + else +- quant = get_symbol(&c, state, 0) * (unsigned)SAMPLE_FACTOR; ++ quant = get_symbol(&c, state, 0) * SAMPLE_FACTOR; + + // av_log(NULL, AV_LOG_INFO, "quant: %d\n", quant); + diff --git a/libavcodec/speedhq.c b/libavcodec/speedhq.c -index 5bf03a35e6..711bcd66d7 100644 +index 5bf03a35e6..5a201b3a6e 100644 --- a/libavcodec/speedhq.c +++ b/libavcodec/speedhq.c -@@ -498,9 +498,7 @@ static int speedhq_decode_frame(AVCodecContext *avctx, - uint32_t second_field_offset; - int ret; +@@ -500,8 +500,6 @@ static int speedhq_decode_frame(AVCodecContext *avctx, -- if (buf_size < 4 || avctx->width < 8 || avctx->width % 8 != 0) -- return AVERROR_INVALIDDATA; -- if (buf_size < avctx->width*avctx->height / 64 / 4) -+ if (buf_size < 4 || avctx->width < 8) + if (buf_size < 4 || avctx->width < 8 || avctx->width % 8 != 0) return AVERROR_INVALIDDATA; +- if (buf_size < avctx->width*avctx->height / 64 / 4) +- return AVERROR_INVALIDDATA; quality = buf[0]; + if (quality >= 100) { diff --git a/libavcodec/sunrast.c b/libavcodec/sunrast.c index 991915fa62..e1ec8a0832 100644 --- a/libavcodec/sunrast.c @@ -52795,19 +53295,131 @@ index 991915fa62..e1ec8a0832 100644 av_log(avctx, AV_LOG_WARNING, "invalid colormap length\n"); return AVERROR_INVALIDDATA; } +diff --git a/libavcodec/tak.c b/libavcodec/tak.c +index 7989afbd97..8aa956b661 100644 +--- a/libavcodec/tak.c ++++ b/libavcodec/tak.c +@@ -167,9 +167,6 @@ int ff_tak_decode_frame_header(AVCodecContext *avctx, GetBitContext *gb, + if (ti->flags & TAK_FRAME_FLAG_HAS_METADATA) + return AVERROR_INVALIDDATA; + +- if (get_bits_left(gb) < 24) +- return AVERROR_INVALIDDATA; +- + skip_bits(gb, 24); + + return 0; diff --git a/libavcodec/takdsp.c b/libavcodec/takdsp.c -index a8f9dba342..9cb8052596 100644 +index f5dc47988f..a8f9dba342 100644 --- a/libavcodec/takdsp.c +++ b/libavcodec/takdsp.c -@@ -65,7 +65,7 @@ static void decorrelate_sf(int32_t *p1, int32_t *p2, int length, int dshift, int +@@ -28,8 +28,8 @@ static void decorrelate_ls(int32_t *p1, int32_t *p2, int length) + int i; + for (i = 0; i < length; i++) { - int32_t a = p1[i]; - int32_t b = p2[i]; -- b = (unsigned)((int)(dfactor * (unsigned)(b >> dshift) + 128) >> 8) << dshift; -+ b = (unsigned)(dfactor * (b >> dshift) + 128 >> 8) << dshift; +- uint32_t a = p1[i]; +- uint32_t b = p2[i]; ++ int32_t a = p1[i]; ++ int32_t b = p2[i]; + p2[i] = a + b; + } + } +@@ -39,8 +39,8 @@ static void decorrelate_sr(int32_t *p1, int32_t *p2, int length) + int i; + + for (i = 0; i < length; i++) { +- uint32_t a = p1[i]; +- uint32_t b = p2[i]; ++ int32_t a = p1[i]; ++ int32_t b = p2[i]; p1[i] = b - a; } } +@@ -50,7 +50,7 @@ static void decorrelate_sm(int32_t *p1, int32_t *p2, int length) + int i; + + for (i = 0; i < length; i++) { +- uint32_t a = p1[i]; ++ int32_t a = p1[i]; + int32_t b = p2[i]; + a -= b >> 1; + p1[i] = a; +@@ -63,7 +63,7 @@ static void decorrelate_sf(int32_t *p1, int32_t *p2, int length, int dshift, int + int i; + + for (i = 0; i < length; i++) { +- uint32_t a = p1[i]; ++ int32_t a = p1[i]; + int32_t b = p2[i]; + b = (unsigned)((int)(dfactor * (unsigned)(b >> dshift) + 128) >> 8) << dshift; + p1[i] = b - a; +diff --git a/libavcodec/targaenc.c b/libavcodec/targaenc.c +index e939b90414..79030a012b 100644 +--- a/libavcodec/targaenc.c ++++ b/libavcodec/targaenc.c +@@ -21,7 +21,6 @@ + + #include + +-#include "libavutil/avassert.h" + #include "libavutil/imgutils.h" + #include "libavutil/internal.h" + #include "libavutil/intreadwrite.h" +@@ -89,11 +88,10 @@ static int targa_encode_frame(AVCodecContext *avctx, AVPacket *pkt, + TargaContext *s = avctx->priv_data; + int bpp, picsize, datasize = -1, ret, i; + uint8_t *out; +- int maxpal = 32*32; + + picsize = av_image_get_buffer_size(avctx->pix_fmt, + avctx->width, avctx->height, 1); +- if ((ret = ff_alloc_packet2(avctx, pkt, picsize + 45 + maxpal, 0)) < 0) ++ if ((ret = ff_alloc_packet2(avctx, pkt, picsize + 45, 0)) < 0) + return ret; + + /* zero out the header and only set applicable fields */ +@@ -126,7 +124,6 @@ static int targa_encode_frame(AVCodecContext *avctx, AVPacket *pkt, + AV_WL24(pkt->data + 18 + 3 * i, *(uint32_t *)(p->data[1] + i * 4)); + } + out += 32 * pal_bpp; /* skip past the palette we just output */ +- av_assert0(32 * pal_bpp <= maxpal); + break; + } + case AV_PIX_FMT_GRAY8: +diff --git a/libavcodec/tests/dct.c b/libavcodec/tests/dct.c +index 269b4993b3..eab39e9468 100644 +--- a/libavcodec/tests/dct.c ++++ b/libavcodec/tests/dct.c +@@ -223,8 +223,8 @@ static int dct_error(const struct algo *dct, int test, int is_idct, int speed, c + v = abs(err); + if (v > err_inf) + err_inf = v; +- err2_matrix[i] += v * (int64_t)v; +- err2 += v * (int64_t)v; ++ err2_matrix[i] += v * v; ++ err2 += v * v; + sysErr[i] += block[i] - block1[i]; + blockSumErr += v; + if (abs(block[i]) > maxout) +diff --git a/libavcodec/tests/jpeg2000dwt.c b/libavcodec/tests/jpeg2000dwt.c +index a6cf9f6824..80b33bee79 100644 +--- a/libavcodec/tests/jpeg2000dwt.c ++++ b/libavcodec/tests/jpeg2000dwt.c +@@ -47,12 +47,12 @@ static int test_dwt(int *array, int *ref, int border[2][2], int decomp_levels, i + return 1; + } + for (j = 0; j max_diff) { ++ if (FFABS(array[j] - ref[j]) > max_diff) { + fprintf(stderr, "missmatch at %d (%d != %d) decomp:%d border %d %d %d %d\n", + j, array[j], ref[j],decomp_levels, border[0][0], border[0][1], border[1][0], border[1][1]); + return 2; + } +- err2 += (array[j] - ref[j]) * (int64_t)(array[j] - ref[j]); ++ err2 += (array[j] - ref[j]) * (array[j] - ref[j]); + array[j] = ref[j]; + } + ff_dwt_destroy(s); diff --git a/libavcodec/tests/snowenc.c b/libavcodec/tests/snowenc.c index 65699158ca..d5f94e8a61 100644 --- a/libavcodec/tests/snowenc.c @@ -52898,27 +53510,8 @@ index 65699158ca..d5f94e8a61 100644 - return ret; + return 0; } -diff --git a/libavcodec/texturedspenc.c b/libavcodec/texturedspenc.c -index 5ce72cbd1e..3d68e0cf39 100644 ---- a/libavcodec/texturedspenc.c -+++ b/libavcodec/texturedspenc.c -@@ -255,11 +255,11 @@ static void optimize_colors(const uint8_t *block, ptrdiff_t stride, - - muv = minv = maxv = bp[0]; - for (y = 0; y < 4; y++) { -- for (x = 0; x < 4; x++) { -+ for (x = 4; x < 4; x += 4) { - muv += bp[x * 4 + y * stride]; -- if (bp[x * 4 + y * stride] < minv) -+ if (bp[x] < minv) - minv = bp[x * 4 + y * stride]; -- else if (bp[x * 4 + y * stride] > maxv) -+ else if (bp[x] > maxv) - maxv = bp[x * 4 + y * stride]; - } - } diff --git a/libavcodec/tiff.c b/libavcodec/tiff.c -index 05187dce51..f8c68f1e7d 100644 +index 9819f9924e..bef0c59d9d 100644 --- a/libavcodec/tiff.c +++ b/libavcodec/tiff.c @@ -100,6 +100,7 @@ typedef struct TiffContext { @@ -52929,60 +53522,27 @@ index 05187dce51..f8c68f1e7d 100644 int is_jpeg; -@@ -734,6 +735,19 @@ static int dng_decode_jpeg(AVCodecContext *avctx, AVFrame *frame, - return 0; - } +@@ -410,8 +411,7 @@ static void av_always_inline horizontal_fill(TiffContext *s, + uint8_t shift = is_dng ? 0 : 16 - bpp; + GetBitContext gb; -+static int dng_decode_strip(AVCodecContext *avctx, AVFrame *frame) -+{ -+ TiffContext *s = avctx->priv_data; -+ -+ s->jpgframe->width = s->width; -+ s->jpgframe->height = s->height; -+ -+ s->avctx_mjpeg->width = s->width; -+ s->avctx_mjpeg->height = s->height; -+ -+ return dng_decode_jpeg(avctx, frame, s->stripsize, 0, 0, s->width, s->height); -+} -+ - static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int stride, - const uint8_t *src, int size, int strip_start, int lines) - { -@@ -772,7 +786,6 @@ static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int strid - if (s->is_bayer) { - av_assert0(width == (s->bpp * s->width + 7) >> 3); - } -- av_assert0(!(s->is_bayer && is_yuv)); - if (p->format == AV_PIX_FMT_GRAY12) { - av_fast_padded_malloc(&s->yuv_line, &s->yuv_line_size, width); - if (s->yuv_line == NULL) { -@@ -856,9 +869,7 @@ static int tiff_unpack_strip(TiffContext *s, AVFrame *p, uint8_t *dst, int strid - av_log(s->avctx, AV_LOG_ERROR, "More than one DNG JPEG strips unsupported\n"); - return AVERROR_PATCHWELCOME; - } -- if (!s->is_bayer) -- return AVERROR_PATCHWELCOME; -- if ((ret = dng_decode_jpeg(s->avctx, p, s->stripsize, 0, 0, s->width, s->height)) < 0) -+ if ((ret = dng_decode_strip(s->avctx, p)) < 0) - return ret; - return 0; - } -@@ -976,8 +987,11 @@ static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, - int pos_x = 0, pos_y = 0; - int ret; +- int ret = init_get_bits8(&gb, src, width); +- av_assert1(ret >= 0); ++ init_get_bits8(&gb, src, width); + for (int i = 0; i < s->width; i++) { + dst16[i] = get_bits(&gb, bpp) << shift; + } +@@ -445,8 +445,7 @@ static void unpack_gray(TiffContext *s, AVFrame *p, + GetBitContext gb; + uint16_t *dst = (uint16_t *)(p->data[0] + lnum * p->linesize[0]); -- if (s->tile_width <= 0 || s->tile_length <= 0) -- return AVERROR_INVALIDDATA; -+ s->jpgframe->width = s->tile_width; -+ s->jpgframe->height = s->tile_length; -+ -+ s->avctx_mjpeg->width = s->tile_width; -+ s->avctx_mjpeg->height = s->tile_length; +- int ret = init_get_bits8(&gb, src, width); +- av_assert1(ret >= 0); ++ init_get_bits8(&gb, src, width); - has_width_leftover = (s->width % s->tile_width != 0); - has_height_leftover = (s->height % s->tile_length != 0); -@@ -987,7 +1001,7 @@ static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, + for (int i = 0; i < s->width; i++) { + dst[i] = get_bits(&gb, bpp); +@@ -989,7 +988,7 @@ static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame, tile_count_y = (s->height + s->tile_length - 1) / s->tile_length; /* Iterate over the number of tiles */ @@ -52991,7 +53551,21 @@ index 05187dce51..f8c68f1e7d 100644 tile_x = tile_idx % tile_count_x; tile_y = tile_idx / tile_count_x; -@@ -1407,6 +1421,7 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame) +@@ -1281,13 +1280,9 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame) + s->is_thumbnail = (value != 0); + break; + case TIFF_WIDTH: +- if (value > INT_MAX) +- return AVERROR_INVALIDDATA; + s->width = value; + break; + case TIFF_HEIGHT: +- if (value > INT_MAX) +- return AVERROR_INVALIDDATA; + s->height = value; + break; + case TIFF_BPP: +@@ -1413,24 +1408,19 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame) break; case TIFF_TILE_OFFSETS: s->tile_offsets_offset = off; @@ -52999,32 +53573,55 @@ index 05187dce51..f8c68f1e7d 100644 s->is_tiled = 1; break; case TIFF_TILE_BYTE_COUNTS: -@@ -1762,7 +1777,7 @@ static int decode_frame(AVCodecContext *avctx, - TiffContext *const s = avctx->priv_data; - AVFrame *const p = data; - ThreadFrame frame = { .f = data }; -- unsigned off, last_off = 0; -+ unsigned off, last_off; - int le, ret, plane, planes; - int i, j, entries, stride; - unsigned soff, ssize; -@@ -1827,6 +1842,7 @@ again: - /** whether we should process this multi-page IFD's next page */ - retry_for_page = s->get_page && s->cur_page + 1 < s->get_page; // get_page is 1-indexed - -+ last_off = off; - if (retry_for_page) { - // set offset to the next IFD - off = ff_tget_long(&s->gb, le); -@@ -1844,7 +1860,6 @@ again: - avpriv_request_sample(s->avctx, "non increasing IFD offset"); + s->tile_byte_counts_offset = off; + break; + case TIFF_TILE_LENGTH: +- if (value > INT_MAX) +- return AVERROR_INVALIDDATA; + s->tile_length = value; + break; + case TIFF_TILE_WIDTH: +- if (value > INT_MAX) +- return AVERROR_INVALIDDATA; + s->tile_width = value; + break; + case TIFF_PREDICTOR: +- if (value > INT_MAX) +- return AVERROR_INVALIDDATA; + s->predictor = value; + break; + case TIFF_SUB_IFDS: +@@ -1440,7 +1430,7 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame) + s->sub_ifd = ff_tget(&s->gb, TIFF_LONG, s->le); /** Only get the first SubIFD */ + break; + case DNG_LINEARIZATION_TABLE: +- if (count < 1 || count > FF_ARRAY_ELEMS(s->dng_lut)) ++ if (count > FF_ARRAY_ELEMS(s->dng_lut)) return AVERROR_INVALIDDATA; + for (int i = 0; i < count; i++) + s->dng_lut[i] = ff_tget(&s->gb, type, s->le); +@@ -1561,18 +1551,12 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame) } -- last_off = off; - if (off >= UINT_MAX - 14 || avpkt->size < off + 14) { - av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n"); - return AVERROR_INVALIDDATA; -@@ -1901,7 +1916,7 @@ again: + break; + case TIFF_T4OPTIONS: +- if (s->compr == TIFF_G3) { +- if (value > INT_MAX) +- return AVERROR_INVALIDDATA; ++ if (s->compr == TIFF_G3) + s->fax_opts = value; +- } + break; + case TIFF_T6OPTIONS: +- if (s->compr == TIFF_G4) { +- if (value > INT_MAX) +- return AVERROR_INVALIDDATA; ++ if (s->compr == TIFF_G4) + s->fax_opts = value; +- } + break; + #define ADD_METADATA(count, name, sep)\ + if ((ret = add_metadata(count, type, name, sep, s, frame)) < 0) {\ +@@ -1919,7 +1903,7 @@ again: return AVERROR_INVALIDDATA; } @@ -53033,38 +53630,37 @@ index 05187dce51..f8c68f1e7d 100644 has_strip_bits = s->strippos || s->strips || s->stripoff || s->rps || s->sot || s->sstype || s->stripsize || s->stripsizesoff; if (has_tile_bits && has_strip_bits) { -@@ -2154,7 +2169,6 @@ static av_cold int tiff_init(AVCodecContext *avctx) - s->avctx_mjpeg->flags2 = avctx->flags2; - s->avctx_mjpeg->dct_algo = avctx->dct_algo; - s->avctx_mjpeg->idct_algo = avctx->idct_algo; -- s->avctx_mjpeg->max_pixels = avctx->max_pixels; - ret = avcodec_open2(s->avctx_mjpeg, codec, NULL); - if (ret < 0) { - return ret; +diff --git a/libavcodec/truemotion1.c b/libavcodec/truemotion1.c +index 930b43b6ef..94782fef4b 100644 +--- a/libavcodec/truemotion1.c ++++ b/libavcodec/truemotion1.c +@@ -407,11 +407,6 @@ static int truemotion1_decode_header(TrueMotion1Context *s) + return AVERROR_PATCHWELCOME; + } + +- if (s->h & 3) { +- avpriv_request_sample(s->avctx, "Frame with height not being a multiple of 4"); +- return AVERROR_PATCHWELCOME; +- } +- + if (s->w != s->avctx->width || s->h != s->avctx->height || + new_pix_fmt != s->avctx->pix_fmt) { + av_frame_unref(s->frame); diff --git a/libavcodec/tta.c b/libavcodec/tta.c -index 3630afcfae..f1e159b03d 100644 +index f92be311c3..3630afcfae 100644 --- a/libavcodec/tta.c +++ b/libavcodec/tta.c -@@ -371,15 +371,8 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data, - case 3: { - // shift samples for 24-bit sample format - int32_t *samples = (int32_t *)frame->data[0]; -- int overflow = 0; -- -- for (i = 0; i < framelen * s->channels; i++) { -- int scaled = *samples * 256U; -- overflow += (scaled >> 8 != *samples); -- *samples++ = scaled; -- } -- if (overflow) -- av_log(avctx, AV_LOG_WARNING, "%d overflows occurred on 24bit upscale\n", overflow); -+ for (i = 0; i < framelen * s->channels; i++) -+ *samples++ *= 256; - // reset decode buffer - s->decode_buffer = NULL; - break; +@@ -335,7 +335,7 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data, + if (s->channels > 1) { + int32_t *r = p - 1; + for (*p += *r / 2; r > (int32_t*)p - s->channels; r--) +- *r = *(r + 1) - (unsigned)*r; ++ *r = *(r + 1) - *r; + } + cur_chan = 0; + i++; diff --git a/libavcodec/utils.c b/libavcodec/utils.c -index fdc3de1b1d..825094d2f3 100644 +index 2859493696..825094d2f3 100644 --- a/libavcodec/utils.c +++ b/libavcodec/utils.c @@ -236,8 +236,6 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, @@ -53076,7 +53672,14 @@ index fdc3de1b1d..825094d2f3 100644 break; case AV_PIX_FMT_YUV411P: case AV_PIX_FMT_YUVJ411P: -@@ -316,7 +314,6 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, +@@ -310,13 +308,12 @@ void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, + } + + if (s->codec_id == AV_CODEC_ID_IFF_ILBM) { +- w_align = FFMAX(w_align, 16); ++ w_align = FFMAX(w_align, 8); + } + *width = FFALIGN(*width, w_align); *height = FFALIGN(*height, h_align); if (s->codec_id == AV_CODEC_ID_H264 || s->lowres || @@ -53094,6 +53697,31 @@ index fdc3de1b1d..825094d2f3 100644 for (i = 0; i < 4; i++) linesize_align[i] = STRIDE_ALIGN; +@@ -691,9 +685,9 @@ static int get_audio_frame_duration(enum AVCodecID id, int sr, int ch, int ba, + if (sr > 0) { + /* calc from sample rate */ + if (id == AV_CODEC_ID_TTA) +- return 256ll * sr / 245; ++ return 256 * sr / 245; + else if (id == AV_CODEC_ID_DST) +- return 588ll * sr / 44100; ++ return 588 * sr / 44100; + else if (id == AV_CODEC_ID_BINKAUDIO_DCT) { + if (sr / 22050 > 22) + return 0; +diff --git a/libavcodec/utvideoenc.c b/libavcodec/utvideoenc.c +index 75983f8ad0..5c87eb50ac 100644 +--- a/libavcodec/utvideoenc.c ++++ b/libavcodec/utvideoenc.c +@@ -252,7 +252,7 @@ FF_ENABLE_DEPRECATION_WARNINGS + * - Compression mode (none/huff) + * And write the flags. + */ +- c->flags = (c->slices - 1U) << 24; ++ c->flags = (c->slices - 1) << 24; + c->flags |= 0 << 11; // bit field to signal interlaced encoding mode + c->flags |= c->compression; + diff --git a/libavcodec/v4l2_buffers.c b/libavcodec/v4l2_buffers.c index 4b2679eb38..8d80d19788 100644 --- a/libavcodec/v4l2_buffers.c @@ -56072,15 +56700,11 @@ index b67b216331..ded1478a49 100644 + #endif /* AVCODEC_V4L2_M2M_H */ diff --git a/libavcodec/v4l2_m2m_dec.c b/libavcodec/v4l2_m2m_dec.c -index ab07c0a24a..b25779fd3e 100644 +index ab07c0a24a..2bd113facb 100644 --- a/libavcodec/v4l2_m2m_dec.c +++ b/libavcodec/v4l2_m2m_dec.c -@@ -21,8 +21,14 @@ - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ +@@ -23,6 +23,10 @@ -+#include "config.h" -+ #include #include + @@ -56090,7 +56714,7 @@ index ab07c0a24a..b25779fd3e 100644 #include "libavutil/pixfmt.h" #include "libavutil/pixdesc.h" #include "libavutil/opt.h" -@@ -30,75 +36,274 @@ +@@ -30,75 +34,267 @@ #include "libavcodec/decode.h" #include "libavcodec/internal.h" @@ -56104,13 +56728,6 @@ index ab07c0a24a..b25779fd3e 100644 +#include "v4l2_req_dmabufs.h" -static int v4l2_try_start(AVCodecContext *avctx) -+#if CONFIG_H264_DECODER -+#include "h264_parse.h" -+#endif -+#if CONFIG_HEVC_DECODER -+#include "hevc_parse.h" -+#endif -+ +// Pick 64 for max last count - that is >1sec at 60fps +#define STATS_LAST_COUNT_MAX 64 +#define STATS_INTERVAL_MAX (1 << 30) @@ -56170,13 +56787,13 @@ index ab07c0a24a..b25779fd3e 100644 + for (i = 0; i != 8; ++i) { + *s++ = ' '; + s = len > i + offset ? hex2(s, *m++) : dash2(s); -+ } + } + *s++ = ' '; + *s++ = ':'; + for (; i != 16; ++i) { + *s++ = ' '; + s = len > i + offset ? hex2(s, *m++) : dash2(s); - } ++ } + *s++ = 0; +} @@ -56413,7 +57030,7 @@ index ab07c0a24a..b25779fd3e 100644 return 0; } -@@ -133,58 +338,742 @@ static int v4l2_prepare_decoder(V4L2m2mContext *s) +@@ -133,58 +329,548 @@ static int v4l2_prepare_decoder(V4L2m2mContext *s) return 0; } @@ -56621,23 +57238,23 @@ index ab07c0a24a..b25779fd3e 100644 + } + return NQ_DRAINING; + } -+ -+ if (!s->buf_pkt.size) -+ return NQ_NONE; -+ -+ if ((ret = check_output_streamon(avctx, s)) != 0) -+ return ret; - ret = ff_v4l2_context_enqueue_packet(output, &s->buf_pkt); - if (ret < 0 && ret != AVERROR(EAGAIN)) - goto fail; ++ if (!s->buf_pkt.size) ++ return NQ_NONE; + +- /* if EAGAIN don't unref packet and try to enqueue in the next iteration */ +- if (ret != AVERROR(EAGAIN)) ++ if ((ret = check_output_streamon(avctx, s)) != 0) ++ return ret; ++ + if (s->extdata_sent) + ret = ff_v4l2_context_enqueue_packet(&s->output, &s->buf_pkt, NULL, 0); + else + ret = ff_v4l2_context_enqueue_packet(&s->output, &s->buf_pkt, s->extdata_data, s->extdata_size); - -- /* if EAGAIN don't unref packet and try to enqueue in the next iteration */ -- if (ret != AVERROR(EAGAIN)) ++ + if (ret == AVERROR(EAGAIN)) { + // Out of input buffers - keep packet + ret = NQ_Q_FULL; @@ -56656,6 +57273,15 @@ index ab07c0a24a..b25779fd3e 100644 - goto fail; + av_log(avctx, AV_LOG_ERROR, "Packet enqueue failure: err=%d\n", ret); + return ret; ++ } ++ } ++ ++ // Start if we haven't ++ { ++ const int ret2 = v4l2_try_start(avctx); ++ if (ret2) { ++ av_log(avctx, AV_LOG_DEBUG, "Start failure: err=%d\n", ret2); ++ ret = (ret2 == AVERROR(ENOMEM)) ? ret2 : NQ_DEAD; } } @@ -56663,18 +57289,9 @@ index ab07c0a24a..b25779fd3e 100644 - return ff_v4l2_context_dequeue_frame(capture, frame, -1); -fail: - av_packet_unref(&s->buf_pkt); -+ // Start if we haven't -+ { -+ const int ret2 = v4l2_try_start(avctx); -+ if (ret2) { -+ av_log(avctx, AV_LOG_DEBUG, "Start failure: err=%d\n", ret2); -+ ret = (ret2 == AVERROR(ENOMEM)) ? ret2 : NQ_DEAD; -+ } -+ } -+ -+ return ret; -+} -+ + return ret; + } + +static int qbuf_wait(AVCodecContext * const avctx, V4L2Context * const ctx) +{ + int rv = 0; @@ -56831,130 +57448,9 @@ index ab07c0a24a..b25779fd3e 100644 + ret = v4l2_receive_frame2(avctx, frame); + done = us_time(); + av_log(avctx, AV_LOG_TRACE, ">>> %s: rx time=%" PRId64 ", rv=%d\n", __func__, done - now, ret); - return ret; - } -+#endif -+ -+static uint32_t -+avprofile_to_v4l2(const enum AVCodecID codec_id, const int avprofile) -+{ -+ switch (codec_id) { -+ case AV_CODEC_ID_H264: -+ switch (avprofile) { -+ case FF_PROFILE_H264_BASELINE: -+ return V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE; -+ case FF_PROFILE_H264_CONSTRAINED_BASELINE: -+ return V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE; -+ case FF_PROFILE_H264_MAIN: -+ return V4L2_MPEG_VIDEO_H264_PROFILE_MAIN; -+ case FF_PROFILE_H264_EXTENDED: -+ return V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED; -+ case FF_PROFILE_H264_HIGH: -+ return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH; -+ case FF_PROFILE_H264_HIGH_10: -+ return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10; -+ case FF_PROFILE_H264_HIGH_10_INTRA: -+ return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10_INTRA; -+ case FF_PROFILE_H264_MULTIVIEW_HIGH: -+ case FF_PROFILE_H264_HIGH_422: -+ return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422; -+ case FF_PROFILE_H264_HIGH_422_INTRA: -+ return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422_INTRA; -+ case FF_PROFILE_H264_STEREO_HIGH: -+ return V4L2_MPEG_VIDEO_H264_PROFILE_STEREO_HIGH; -+ case FF_PROFILE_H264_HIGH_444_PREDICTIVE: -+ return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE; -+ case FF_PROFILE_H264_HIGH_444_INTRA: -+ return V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_INTRA; -+ case FF_PROFILE_H264_CAVLC_444: -+ return V4L2_MPEG_VIDEO_H264_PROFILE_CAVLC_444_INTRA; -+ case FF_PROFILE_H264_HIGH_444: -+ default: -+ break; -+// V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_BASELINE = 12, -+// V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH = 13, -+// V4L2_MPEG_VIDEO_H264_PROFILE_SCALABLE_HIGH_INTRA = 14, -+// V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH = 16, -+// V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_HIGH = 17, -+ } -+ break; -+ case AV_CODEC_ID_MPEG2VIDEO: -+ case AV_CODEC_ID_MPEG4: -+ case AV_CODEC_ID_VC1: -+ case AV_CODEC_ID_VP8: -+ case AV_CODEC_ID_VP9: -+ case AV_CODEC_ID_AV1: -+ // Most profiles are a simple number that matches the V4L2 enum -+ return avprofile; -+ default: -+ break; -+ } -+ return ~(uint32_t)0; ++ return ret; +} -+ -+// This check mirrors Chrome's profile check by testing to see if the profile -+// exists as a possible value for the V4L2 profile control -+static int -+check_profile(AVCodecContext *const avctx, V4L2m2mContext *const s) -+{ -+ struct v4l2_queryctrl query_ctrl; -+ struct v4l2_querymenu query_menu; -+ uint32_t profile_id; -+ -+ // An unset profile is almost certainly zero or -99 - do not reject -+ if (avctx->profile <= 0) { -+ av_log(avctx, AV_LOG_VERBOSE, "Profile %d <= 0 - check skipped\n", avctx->profile); -+ return 0; -+ } -+ -+ memset(&query_ctrl, 0, sizeof(query_ctrl)); -+ switch (avctx->codec_id) { -+ case AV_CODEC_ID_MPEG2VIDEO: -+ profile_id = V4L2_CID_MPEG_VIDEO_MPEG2_PROFILE; -+ break; -+ case AV_CODEC_ID_MPEG4: -+ profile_id = V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE; -+ break; -+ case AV_CODEC_ID_H264: -+ profile_id = V4L2_CID_MPEG_VIDEO_H264_PROFILE; -+ break; -+ case AV_CODEC_ID_VP8: -+ profile_id = V4L2_CID_MPEG_VIDEO_VP8_PROFILE; -+ break; -+ case AV_CODEC_ID_VP9: -+ profile_id = V4L2_CID_MPEG_VIDEO_VP9_PROFILE; -+ break; -+#ifdef V4L2_CID_MPEG_VIDEO_AV1_PROFILE -+ case AV_CODEC_ID_AV1: -+ profile_id = V4L2_CID_MPEG_VIDEO_AV1_PROFILE; -+ break; +#endif -+ default: -+ av_log(avctx, AV_LOG_VERBOSE, "Can't map profile for codec id %d; profile check skipped\n", avctx->codec_id); -+ return 0; -+ } -+ -+ query_ctrl = (struct v4l2_queryctrl){.id = profile_id}; -+ if (ioctl(s->fd, VIDIOC_QUERYCTRL, &query_ctrl) != 0) { -+ av_log(avctx, AV_LOG_VERBOSE, "Query profile ctrl (%#x) not supported: assume OK\n", query_ctrl.id); -+ } -+ else { -+ av_log(avctx, AV_LOG_DEBUG, "%s: Control supported: %#x\n", __func__, query_ctrl.id); -+ -+ query_menu = (struct v4l2_querymenu){ -+ .id = query_ctrl.id, -+ .index = avprofile_to_v4l2(avctx->codec_id, avctx->profile), -+ }; -+ -+ if (query_menu.index > query_ctrl.maximum || -+ query_menu.index < query_ctrl.minimum || -+ ioctl(s->fd, VIDIOC_QUERYMENU, &query_menu) != 0) { -+ return AVERROR(ENOENT); -+ } -+ } -+ -+ return 0; -+}; + +static int +check_size(AVCodecContext * const avctx, V4L2m2mContext * const s) @@ -57076,78 +57572,6 @@ index ab07c0a24a..b25779fd3e 100644 + return size + (1 << 16); +} + -+static void -+parse_extradata(AVCodecContext *avctx) -+{ -+ if (!avctx->extradata || !avctx->extradata_size) -+ return; -+ -+ switch (avctx->codec_id) { -+#if CONFIG_H264_DECODER -+ case AV_CODEC_ID_H264: -+ { -+ H264ParamSets ps = {{NULL}}; -+ int is_avc = 0; -+ int nal_length_size = 0; -+ int ret; -+ -+ ret = ff_h264_decode_extradata(avctx->extradata, avctx->extradata_size, -+ &ps, &is_avc, &nal_length_size, -+ avctx->err_recognition, avctx); -+ if (ret > 0) { -+ const SPS * sps = NULL; -+ unsigned int i; -+ for (i = 0; i != MAX_SPS_COUNT; ++i) { -+ if (ps.sps_list[i]) { -+ sps = (const SPS *)ps.sps_list[i]->data; -+ break; -+ } -+ } -+ if (sps) { -+ avctx->profile = ff_h264_get_profile(sps); -+ avctx->level = sps->level_idc; -+ } -+ } -+ ff_h264_ps_uninit(&ps); -+ break; -+ } -+#endif -+#if CONFIG_HEVC_DECODER -+ case AV_CODEC_ID_HEVC: -+ { -+ HEVCParamSets ps = {{NULL}}; -+ HEVCSEI sei = {{{{0}}}}; -+ int is_nalff = 0; -+ int nal_length_size = 0; -+ int ret; -+ -+ ret = ff_hevc_decode_extradata(avctx->extradata, avctx->extradata_size, -+ &ps, &sei, &is_nalff, &nal_length_size, -+ avctx->err_recognition, 0, avctx); -+ if (ret > 0) { -+ const HEVCSPS * sps = NULL; -+ unsigned int i; -+ for (i = 0; i != HEVC_MAX_SPS_COUNT; ++i) { -+ if (ps.sps_list[i]) { -+ sps = (const HEVCSPS *)ps.sps_list[i]->data; -+ break; -+ } -+ } -+ if (sps) { -+ avctx->profile = sps->ptl.general_ptl.profile_idc; -+ avctx->level = sps->ptl.general_ptl.level_idc; -+ } -+ } -+ ff_hevc_ps_uninit(&ps); -+ ff_hevc_reset_sei(&sei); -+ break; -+ } -+#endif -+ default: -+ break; -+ } -+} - static av_cold int v4l2_decode_init(AVCodecContext *avctx) { V4L2Context *capture, *output; @@ -57168,8 +57592,7 @@ index ab07c0a24a..b25779fd3e 100644 + avctx->ticks_per_frame = 2; + } + -+ parse_extradata(avctx); -+ ++ av_log(avctx, AV_LOG_INFO, "level=%d\n", avctx->level); ret = ff_v4l2_m2m_create_context(priv, &s); if (ret < 0) return ret; @@ -57180,7 +57603,7 @@ index ab07c0a24a..b25779fd3e 100644 capture = &s->capture; output = &s->output; -@@ -192,14 +1081,65 @@ static av_cold int v4l2_decode_init(AVCodecContext *avctx) +@@ -192,14 +878,65 @@ static av_cold int v4l2_decode_init(AVCodecContext *avctx) * by the v4l2 driver; this event will trigger a full pipeline reconfig and * the proper values will be retrieved from the kernel driver. */ @@ -57248,7 +57671,7 @@ index ab07c0a24a..b25779fd3e 100644 s->avctx = avctx; ret = ff_v4l2_m2m_codec_init(priv); -@@ -208,12 +1148,88 @@ static av_cold int v4l2_decode_init(AVCodecContext *avctx) +@@ -208,12 +945,84 @@ static av_cold int v4l2_decode_init(AVCodecContext *avctx) return ret; } @@ -57271,10 +57694,6 @@ index ab07c0a24a..b25779fd3e 100644 + if ((ret = check_size(avctx, s)) != 0) + return ret; + -+ if ((ret = check_profile(avctx, s)) != 0) { -+ av_log(avctx, AV_LOG_WARNING, "Profile %d not supported by decode\n", avctx->profile); -+ return ret; -+ } + return 0; } @@ -57339,7 +57758,7 @@ index ab07c0a24a..b25779fd3e 100644 } #define OFFSET(x) offsetof(V4L2m2mPriv, x) -@@ -222,10 +1238,17 @@ static av_cold int v4l2_decode_close(AVCodecContext *avctx) +@@ -222,10 +1031,17 @@ static av_cold int v4l2_decode_close(AVCodecContext *avctx) static const AVOption options[] = { V4L_M2M_DEFAULT_OPTS, { "num_capture_buffers", "Number of buffers in the capture context", @@ -57358,7 +57777,7 @@ index ab07c0a24a..b25779fd3e 100644 #define M2MDEC_CLASS(NAME) \ static const AVClass v4l2_m2m_ ## NAME ## _dec_class = { \ .class_name = #NAME "_v4l2m2m_decoder", \ -@@ -246,9 +1269,15 @@ static const AVOption options[] = { +@@ -246,9 +1062,15 @@ static const AVOption options[] = { .init = v4l2_decode_init, \ .receive_frame = v4l2_receive_frame, \ .close = v4l2_decode_close, \ @@ -63132,343 +63551,57 @@ index 0000000000..99c90064ea +extern const v4l2_req_decode_fns V2(ff_v4l2_req_hevc, 4); + +#endif -diff --git a/libavcodec/vaapi_av1.c b/libavcodec/vaapi_av1.c -index 5985493b8d..16b7e35747 100644 ---- a/libavcodec/vaapi_av1.c -+++ b/libavcodec/vaapi_av1.c -@@ -21,28 +21,8 @@ - #include "libavutil/pixdesc.h" - #include "hwconfig.h" - #include "vaapi_decode.h" --#include "internal.h" - #include "av1dec.h" - --typedef struct VAAPIAV1FrameRef { -- ThreadFrame frame; -- int valid; --} VAAPIAV1FrameRef; -- --typedef struct VAAPIAV1DecContext { -- VAAPIDecodeContext base; -- -- /** -- * For film grain case, VAAPI generate 2 output for each frame, -- * current_frame will not apply film grain, and will be used for -- * references for next frames. Maintain the reference list without -- * applying film grain here. And current_display_picture will be -- * used to apply film grain and push to downstream. -- */ -- VAAPIAV1FrameRef ref_tab[AV1_NUM_REF_FRAMES]; -- ThreadFrame tmp_frame; --} VAAPIAV1DecContext; -- - static VASurfaceID vaapi_av1_surface_id(AV1Frame *vf) - { - if (vf) -@@ -69,48 +49,6 @@ static int8_t vaapi_av1_get_bit_depth_idx(AVCodecContext *avctx) - return bit_depth == 8 ? 0 : bit_depth == 10 ? 1 : 2; - } - --static int vaapi_av1_decode_init(AVCodecContext *avctx) --{ -- VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data; -- -- ctx->tmp_frame.f = av_frame_alloc(); -- if (!ctx->tmp_frame.f) { -- av_log(avctx, AV_LOG_ERROR, -- "Failed to allocate frame.\n"); -- return AVERROR(ENOMEM); -- } -- -- for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) { -- ctx->ref_tab[i].frame.f = av_frame_alloc(); -- if (!ctx->ref_tab[i].frame.f) { -- av_log(avctx, AV_LOG_ERROR, -- "Failed to allocate reference table frame %d.\n", i); -- return AVERROR(ENOMEM); -- } -- ctx->ref_tab[i].valid = 0; -- } -- -- return ff_vaapi_decode_init(avctx); --} -- --static int vaapi_av1_decode_uninit(AVCodecContext *avctx) --{ -- VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data; -- -- if (ctx->tmp_frame.f->buf[0]) -- ff_thread_release_buffer(avctx, &ctx->tmp_frame); -- av_frame_free(&ctx->tmp_frame.f); -- -- for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) { -- if (ctx->ref_tab[i].frame.f->buf[0]) -- ff_thread_release_buffer(avctx, &ctx->ref_tab[i].frame); -- av_frame_free(&ctx->ref_tab[i].frame.f); -- } -- -- return ff_vaapi_decode_uninit(avctx); --} -- -- - static int vaapi_av1_start_frame(AVCodecContext *avctx, - av_unused const uint8_t *buffer, - av_unused uint32_t size) -@@ -120,62 +58,40 @@ static int vaapi_av1_start_frame(AVCodecContext *avctx, - const AV1RawFrameHeader *frame_header = s->raw_frame_header; - const AV1RawFilmGrainParams *film_grain = &s->cur_frame.film_grain; - VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private; -- VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data; - VADecPictureParameterBufferAV1 pic_param; - int8_t bit_depth_idx; - int err = 0; - int apply_grain = !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) && film_grain->apply_grain; - uint8_t remap_lr_type[4] = {AV1_RESTORE_NONE, AV1_RESTORE_SWITCHABLE, AV1_RESTORE_WIENER, AV1_RESTORE_SGRPROJ}; -- uint8_t segmentation_feature_signed[AV1_SEG_LVL_MAX] = {1, 1, 1, 1, 1, 0, 0, 0}; -- uint8_t segmentation_feature_max[AV1_SEG_LVL_MAX] = {255, AV1_MAX_LOOP_FILTER, -- AV1_MAX_LOOP_FILTER, AV1_MAX_LOOP_FILTER, AV1_MAX_LOOP_FILTER, 7 , 0 , 0 }; -+ -+ pic->output_surface = vaapi_av1_surface_id(&s->cur_frame); - - bit_depth_idx = vaapi_av1_get_bit_depth_idx(avctx); - if (bit_depth_idx < 0) - goto fail; - -- if (apply_grain) { -- if (ctx->tmp_frame.f->buf[0]) -- ff_thread_release_buffer(avctx, &ctx->tmp_frame); -- err = ff_thread_get_buffer(avctx, &ctx->tmp_frame, AV_GET_BUFFER_FLAG_REF); -- if (err < 0) -- goto fail; -- pic->output_surface = ff_vaapi_get_surface_id(ctx->tmp_frame.f); -- } else { -- pic->output_surface = vaapi_av1_surface_id(&s->cur_frame); -- } -- - memset(&pic_param, 0, sizeof(VADecPictureParameterBufferAV1)); - pic_param = (VADecPictureParameterBufferAV1) { -- .profile = seq->seq_profile, -- .order_hint_bits_minus_1 = seq->order_hint_bits_minus_1, -- .bit_depth_idx = bit_depth_idx, -- .matrix_coefficients = seq->color_config.matrix_coefficients, -- .current_frame = pic->output_surface, -- .current_display_picture = vaapi_av1_surface_id(&s->cur_frame), -- .frame_width_minus1 = frame_header->frame_width_minus_1, -- .frame_height_minus1 = frame_header->frame_height_minus_1, -- .primary_ref_frame = frame_header->primary_ref_frame, -- .order_hint = frame_header->order_hint, -- .tile_cols = frame_header->tile_cols, -- .tile_rows = frame_header->tile_rows, -- .context_update_tile_id = frame_header->context_update_tile_id, -- .superres_scale_denominator = frame_header->use_superres ? -- frame_header->coded_denom + AV1_SUPERRES_DENOM_MIN : -- AV1_SUPERRES_NUM, -- .interp_filter = frame_header->interpolation_filter, -- .filter_level[0] = frame_header->loop_filter_level[0], -- .filter_level[1] = frame_header->loop_filter_level[1], -- .filter_level_u = frame_header->loop_filter_level[2], -- .filter_level_v = frame_header->loop_filter_level[3], -- .base_qindex = frame_header->base_q_idx, -- .y_dc_delta_q = frame_header->delta_q_y_dc, -- .u_dc_delta_q = frame_header->delta_q_u_dc, -- .u_ac_delta_q = frame_header->delta_q_u_ac, -- .v_dc_delta_q = frame_header->delta_q_v_dc, -- .v_ac_delta_q = frame_header->delta_q_v_ac, -- .cdef_damping_minus_3 = frame_header->cdef_damping_minus_3, -- .cdef_bits = frame_header->cdef_bits, -+ .profile = seq->seq_profile, -+ .order_hint_bits_minus_1 = seq->order_hint_bits_minus_1, -+ .bit_depth_idx = bit_depth_idx, -+ .current_frame = pic->output_surface, -+ .current_display_picture = pic->output_surface, -+ .frame_width_minus1 = frame_header->frame_width_minus_1, -+ .frame_height_minus1 = frame_header->frame_height_minus_1, -+ .primary_ref_frame = frame_header->primary_ref_frame, -+ .order_hint = frame_header->order_hint, -+ .tile_cols = frame_header->tile_cols, -+ .tile_rows = frame_header->tile_rows, -+ .context_update_tile_id = frame_header->context_update_tile_id, -+ .interp_filter = frame_header->interpolation_filter, -+ .filter_level[0] = frame_header->loop_filter_level[0], -+ .filter_level[1] = frame_header->loop_filter_level[1], -+ .filter_level_u = frame_header->loop_filter_level[2], -+ .filter_level_v = frame_header->loop_filter_level[3], -+ .base_qindex = frame_header->base_q_idx, -+ .cdef_damping_minus_3 = frame_header->cdef_damping_minus_3, -+ .cdef_bits = frame_header->cdef_bits, - .seq_info_fields.fields = { - .still_picture = seq->still_picture, - .use_128x128_superblock = seq->use_128x128_superblock, -@@ -246,15 +162,12 @@ static int vaapi_av1_start_frame(AVCodecContext *avctx, - .mode_ref_delta_update = frame_header->loop_filter_delta_update, - }, - .mode_control_fields.bits = { -- .delta_q_present_flag = frame_header->delta_q_present, -- .log2_delta_q_res = frame_header->delta_q_res, -- .delta_lf_present_flag = frame_header->delta_lf_present, -- .log2_delta_lf_res = frame_header->delta_lf_res, -- .delta_lf_multi = frame_header->delta_lf_multi, -- .tx_mode = frame_header->tx_mode, -- .reference_select = frame_header->reference_select, -- .reduced_tx_set_used = frame_header->reduced_tx_set, -- .skip_mode_present = frame_header->skip_mode_present, -+ .delta_q_present_flag = frame_header->delta_q_present, -+ .log2_delta_q_res = frame_header->delta_q_res, -+ .tx_mode = frame_header->tx_mode, -+ .reference_select = frame_header->reference_select, -+ .reduced_tx_set_used = frame_header->reduced_tx_set, -+ .skip_mode_present = frame_header->skip_mode_present, - }, - .loop_restoration_fields.bits = { - .yframe_restoration_type = remap_lr_type[frame_header->lr_type[0]], -@@ -265,9 +178,6 @@ static int vaapi_av1_start_frame(AVCodecContext *avctx, - }, - .qmatrix_fields.bits = { - .using_qmatrix = frame_header->using_qmatrix, -- .qm_y = frame_header->qm_y, -- .qm_u = frame_header->qm_u, -- .qm_v = frame_header->qm_v, - } - }; - -@@ -275,9 +185,7 @@ static int vaapi_av1_start_frame(AVCodecContext *avctx, - if (pic_param.pic_info_fields.bits.frame_type == AV1_FRAME_KEY) - pic_param.ref_frame_map[i] = VA_INVALID_ID; - else -- pic_param.ref_frame_map[i] = ctx->ref_tab[i].valid ? -- ff_vaapi_get_surface_id(ctx->ref_tab[i].frame.f) : -- vaapi_av1_surface_id(&s->ref[i]); -+ pic_param.ref_frame_map[i] = vaapi_av1_surface_id(&s->ref[i]); - } - for (int i = 0; i < AV1_REFS_PER_FRAME; i++) { - pic_param.ref_frame_idx[i] = frame_header->ref_frame_idx[i]; -@@ -305,22 +213,10 @@ static int vaapi_av1_start_frame(AVCodecContext *avctx, - frame_header->height_in_sbs_minus_1[i]; - } - for (int i = AV1_REF_FRAME_LAST; i <= AV1_REF_FRAME_ALTREF; i++) { -- pic_param.wm[i - 1].invalid = s->cur_frame.gm_invalid[i]; -- pic_param.wm[i - 1].wmtype = s->cur_frame.gm_type[i]; -+ pic_param.wm[i - 1].wmtype = s->cur_frame.gm_type[i]; - for (int j = 0; j < 6; j++) - pic_param.wm[i - 1].wmmat[j] = s->cur_frame.gm_params[i][j]; - } -- for (int i = 0; i < AV1_MAX_SEGMENTS; i++) { -- for (int j = 0; j < AV1_SEG_LVL_MAX; j++) { -- pic_param.seg_info.feature_mask[i] |= (frame_header->feature_enabled[i][j] << j); -- if (segmentation_feature_signed[j]) -- pic_param.seg_info.feature_data[i][j] = av_clip(frame_header->feature_value[i][j], -- -segmentation_feature_max[j], segmentation_feature_max[j]); -- else -- pic_param.seg_info.feature_data[i][j] = av_clip(frame_header->feature_value[i][j], -- 0, segmentation_feature_max[j]); -- } -- } - if (apply_grain) { - for (int i = 0; i < film_grain->num_y_points; i++) { - pic_param.film_grain_info.point_y_value[i] = -@@ -367,34 +263,8 @@ fail: - static int vaapi_av1_end_frame(AVCodecContext *avctx) - { - const AV1DecContext *s = avctx->priv_data; -- const AV1RawFrameHeader *header = s->raw_frame_header; -- const AV1RawFilmGrainParams *film_grain = &s->cur_frame.film_grain; - VAAPIDecodePicture *pic = s->cur_frame.hwaccel_picture_private; -- VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data; -- -- int apply_grain = !(avctx->export_side_data & AV_CODEC_EXPORT_DATA_FILM_GRAIN) && film_grain->apply_grain; -- int ret; -- ret = ff_vaapi_decode_issue(avctx, pic); -- if (ret < 0) -- return ret; -- -- for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) { -- if (header->refresh_frame_flags & (1 << i)) { -- if (ctx->ref_tab[i].frame.f->buf[0]) -- ff_thread_release_buffer(avctx, &ctx->ref_tab[i].frame); -- -- if (apply_grain) { -- ret = ff_thread_ref_frame(&ctx->ref_tab[i].frame, &ctx->tmp_frame); -- if (ret < 0) -- return ret; -- ctx->ref_tab[i].valid = 1; -- } else { -- ctx->ref_tab[i].valid = 0; -- } -- } -- } -- -- return 0; -+ return ff_vaapi_decode_issue(avctx, pic); - } - - static int vaapi_av1_decode_slice(AVCodecContext *avctx, -@@ -441,9 +311,9 @@ const AVHWAccel ff_av1_vaapi_hwaccel = { - .end_frame = vaapi_av1_end_frame, - .decode_slice = vaapi_av1_decode_slice, - .frame_priv_data_size = sizeof(VAAPIDecodePicture), -- .init = vaapi_av1_decode_init, -- .uninit = vaapi_av1_decode_uninit, -+ .init = ff_vaapi_decode_init, -+ .uninit = ff_vaapi_decode_uninit, - .frame_params = ff_vaapi_common_frame_params, -- .priv_data_size = sizeof(VAAPIAV1DecContext), -+ .priv_data_size = sizeof(VAAPIDecodeContext), - .caps_internal = HWACCEL_CAP_ASYNC_SAFE, - }; -diff --git a/libavcodec/vaapi_decode.c b/libavcodec/vaapi_decode.c -index 032e8531f2..57a0eb4e6e 100644 ---- a/libavcodec/vaapi_decode.c -+++ b/libavcodec/vaapi_decode.c -@@ -577,10 +577,10 @@ static int vaapi_decode_make_config(AVCodecContext *avctx, - switch (avctx->codec_id) { - case AV_CODEC_ID_H264: - case AV_CODEC_ID_HEVC: -- case AV_CODEC_ID_AV1: - frames->initial_pool_size += 16; - break; - case AV_CODEC_ID_VP9: -+ case AV_CODEC_ID_AV1: - frames->initial_pool_size += 8; - break; - case AV_CODEC_ID_VP8: diff --git a/libavcodec/vaapi_encode.c b/libavcodec/vaapi_encode.c -index b1fa3307cc..607858435f 100644 +index 7e93afcb12..b1fa3307cc 100644 --- a/libavcodec/vaapi_encode.c +++ b/libavcodec/vaapi_encode.c -@@ -2366,11 +2366,6 @@ av_cold int ff_vaapi_encode_init(AVCodecContext *avctx) - VAStatus vas; - int err; +@@ -2546,14 +2546,12 @@ av_cold int ff_vaapi_encode_close(AVCodecContext *avctx) + av_buffer_pool_uninit(&ctx->output_buffer_pool); -- ctx->va_config = VA_INVALID_ID; -- ctx->va_context = VA_INVALID_ID; -- -- /* If you add something that can fail above this av_frame_alloc(), -- * modify ff_vaapi_encode_close() accordingly. */ - ctx->frame = av_frame_alloc(); - if (!ctx->frame) { - return AVERROR(ENOMEM); -@@ -2382,6 +2377,9 @@ av_cold int ff_vaapi_encode_init(AVCodecContext *avctx) - return AVERROR(EINVAL); + if (ctx->va_context != VA_INVALID_ID) { +- if (ctx->hwctx) +- vaDestroyContext(ctx->hwctx->display, ctx->va_context); ++ vaDestroyContext(ctx->hwctx->display, ctx->va_context); + ctx->va_context = VA_INVALID_ID; } -+ ctx->va_config = VA_INVALID_ID; -+ ctx->va_context = VA_INVALID_ID; -+ - ctx->input_frames_ref = av_buffer_ref(avctx->hw_frames_ctx); - if (!ctx->input_frames_ref) { - err = AVERROR(ENOMEM); -@@ -2533,11 +2531,6 @@ av_cold int ff_vaapi_encode_close(AVCodecContext *avctx) - VAAPIEncodeContext *ctx = avctx->priv_data; - VAAPIEncodePicture *pic, *next; + if (ctx->va_config != VA_INVALID_ID) { +- if (ctx->hwctx) +- vaDestroyConfig(ctx->hwctx->display, ctx->va_config); ++ vaDestroyConfig(ctx->hwctx->display, ctx->va_config); + ctx->va_config = VA_INVALID_ID; + } -- /* We check ctx->frame to know whether ff_vaapi_encode_init() -- * has been called and va_config/va_context initialized. */ -- if (!ctx->frame) -- return 0; +diff --git a/libavcodec/vble.c b/libavcodec/vble.c +index f2ad2f8366..2cddd550b1 100644 +--- a/libavcodec/vble.c ++++ b/libavcodec/vble.c +@@ -193,9 +193,6 @@ static av_cold int vble_decode_init(AVCodecContext *avctx) + ctx->size = av_image_get_buffer_size(avctx->pix_fmt, + avctx->width, avctx->height, 1); + +- if (ctx->size < 0) +- return ctx->size; - - for (pic = ctx->pic_start; pic; pic = next) { - next = pic->next; - vaapi_encode_free(avctx, pic); + ctx->val = av_malloc_array(ctx->size, sizeof(*ctx->val)); + + if (!ctx->val) { +diff --git a/libavcodec/vc1_loopfilter.c b/libavcodec/vc1_loopfilter.c +index ee694ede28..0f990cccef 100644 +--- a/libavcodec/vc1_loopfilter.c ++++ b/libavcodec/vc1_loopfilter.c +@@ -1125,7 +1125,10 @@ static av_always_inline void vc1_b_h_intfi_loop_filter(VC1Context *v, uint8_t *d + dst = dest + (block_num & 2) * 4 * s->linesize + (block_num & 1) * 8; + + if (!(flags & RIGHT_EDGE) || !(block_num & 5)) { +- v->vc1dsp.vc1_h_loop_filter8(dst + 8, linesize, pq); ++ if (block_num > 3) ++ v->vc1dsp.vc1_h_loop_filter8(dst + 8, linesize, pq); ++ else ++ v->vc1dsp.vc1_h_loop_filter8(dst + 8, linesize, pq); + } + + tt = ttblk[0] >> (block_num * 4) & 0xf; diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c index d4ceb60791..fb7f839c5e 100644 --- a/libavcodec/vc1dec.c @@ -63577,6 +63710,68 @@ index 75db62b1b4..e192b431be 100644 } VC1DSPContext; void ff_vc1dsp_init(VC1DSPContext* c); +diff --git a/libavcodec/vc2enc.c b/libavcodec/vc2enc.c +index a55c14092f..295cc21dfa 100644 +--- a/libavcodec/vc2enc.c ++++ b/libavcodec/vc2enc.c +@@ -183,9 +183,7 @@ typedef struct VC2EncContext { + static av_always_inline void put_vc2_ue_uint(PutBitContext *pb, uint32_t val) + { + int i; +- int bits = 0; +- unsigned topbit = 1, maxval = 1; +- uint64_t pbits = 0; ++ int pbits = 0, bits = 0, topbit = 1, maxval = 1; + + if (!val++) { + put_bits(pb, 1, 1); +@@ -202,13 +200,12 @@ static av_always_inline void put_vc2_ue_uint(PutBitContext *pb, uint32_t val) + + for (i = 0; i < bits; i++) { + topbit >>= 1; +- av_assert2(pbits <= UINT64_MAX>>3); + pbits <<= 2; + if (val & topbit) + pbits |= 0x1; + } + +- put_bits64(pb, bits*2 + 1, (pbits << 1) | 1); ++ put_bits(pb, bits*2 + 1, (pbits << 1) | 1); + } + + static av_always_inline int count_vc2_ue_uint(uint32_t val) +diff --git a/libavcodec/vdpau_mpeg12.c b/libavcodec/vdpau_mpeg12.c +index 0860af815e..72220ffb4e 100644 +--- a/libavcodec/vdpau_mpeg12.c ++++ b/libavcodec/vdpau_mpeg12.c +@@ -73,9 +73,8 @@ static int vdpau_mpeg_start_frame(AVCodecContext *avctx, + info->f_code[1][0] = s->mpeg_f_code[1][0]; + info->f_code[1][1] = s->mpeg_f_code[1][1]; + for (i = 0; i < 64; ++i) { +- int n = s->idsp.idct_permutation[i]; +- info->intra_quantizer_matrix[i] = s->intra_matrix[n]; +- info->non_intra_quantizer_matrix[i] = s->inter_matrix[n]; ++ info->intra_quantizer_matrix[i] = s->intra_matrix[i]; ++ info->non_intra_quantizer_matrix[i] = s->inter_matrix[i]; + } + + return ff_vdpau_common_start_frame(pic_ctx, buffer, size); +diff --git a/libavcodec/vdpau_mpeg4.c b/libavcodec/vdpau_mpeg4.c +index 59cdb96378..93b25beb1f 100644 +--- a/libavcodec/vdpau_mpeg4.c ++++ b/libavcodec/vdpau_mpeg4.c +@@ -74,9 +74,8 @@ static int vdpau_mpeg4_start_frame(AVCodecContext *avctx, + info->alternate_vertical_scan_flag = s->alternate_scan; + info->top_field_first = s->top_field_first; + for (i = 0; i < 64; ++i) { +- int n = s->idsp.idct_permutation[i]; +- info->intra_quantizer_matrix[i] = s->intra_matrix[n]; +- info->non_intra_quantizer_matrix[i] = s->inter_matrix[n]; ++ info->intra_quantizer_matrix[i] = s->intra_matrix[i]; ++ info->non_intra_quantizer_matrix[i] = s->inter_matrix[i]; + } + + ff_vdpau_common_start_frame(pic_ctx, buffer, size); diff --git a/libavcodec/videodsp_template.c b/libavcodec/videodsp_template.c index 8743d725c6..55123a5844 100644 --- a/libavcodec/videodsp_template.c @@ -63599,53 +63794,55 @@ index 8743d725c6..55123a5844 100644 while (block_h--) { pixel *bufp = (pixel *) buf; -diff --git a/libavcodec/videotoolbox.c b/libavcodec/videotoolbox.c -index 2357401412..49e726a75f 100644 ---- a/libavcodec/videotoolbox.c -+++ b/libavcodec/videotoolbox.c -@@ -608,7 +608,8 @@ static void videotoolbox_decoder_callback(void *opaque, - CMTime pts, - CMTime duration) - { -- VTContext *vtctx = opaque; -+ AVCodecContext *avctx = opaque; -+ VTContext *vtctx = avctx->internal->hwaccel_priv_data; +diff --git a/libavcodec/vorbisdec.c b/libavcodec/vorbisdec.c +index 9d89726c09..169df591b3 100644 +--- a/libavcodec/vorbisdec.c ++++ b/libavcodec/vorbisdec.c +@@ -363,10 +363,6 @@ static int vorbis_parse_setup_hdr_codebooks(vorbis_context *vc) + unsigned codebook_value_bits = get_bits(gb, 4) + 1; + unsigned codebook_sequence_p = get_bits1(gb); - if (vtctx->frame) { - CVPixelBufferRelease(vtctx->frame); -@@ -616,8 +617,7 @@ static void videotoolbox_decoder_callback(void *opaque, - } +- if (!isfinite(codebook_minimum_value) || !isfinite(codebook_delta_value)) { +- ret = AVERROR_INVALIDDATA; +- goto error; +- } + ff_dlog(NULL, " We expect %d numbers for building the codevectors. \n", + codebook_lookup_values); + ff_dlog(NULL, " delta %f minmum %f \n", +@@ -1451,9 +1447,6 @@ static av_always_inline int vorbis_residue_decode_internal(vorbis_context *vc, + unsigned step = FASTDIV(vr->partition_size << 1, dim << 1); + vorbis_codebook codebook = vc->codebooks[vqbook]; - if (!image_buffer) { -- av_log(vtctx->logctx, AV_LOG_DEBUG, -- "vt decoder cb: output image buffer is null: %i\n", status); -+ av_log(avctx, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n"); - return; - } - -@@ -828,7 +828,7 @@ static int videotoolbox_start(AVCodecContext *avctx) - videotoolbox->cv_pix_fmt_type); - - decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback; -- decoder_cb.decompressionOutputRefCon = avctx->internal->hwaccel_priv_data; -+ decoder_cb.decompressionOutputRefCon = avctx; - - status = VTDecompressionSessionCreate(NULL, // allocator - videotoolbox->cm_fmt_desc, // videoFormatDescription -@@ -1040,8 +1040,6 @@ static int videotoolbox_common_init(AVCodecContext *avctx) - AVHWFramesContext *hw_frames; - int err; - -- vtctx->logctx = avctx; +- if (get_bits_left(gb) <= 0) +- return AVERROR_INVALIDDATA; - - // Old API - do nothing. - if (avctx->hwaccel_context) - return 0; + if (vr_type == 0) { + + voffs = voffset+j*vlen; diff --git a/libavcodec/vp3.c b/libavcodec/vp3.c -index 90c889182c..57c6eb1ff9 100644 +index 3c5f8d710e..aa6d943822 100644 --- a/libavcodec/vp3.c +++ b/libavcodec/vp3.c -@@ -2683,27 +2683,15 @@ static int vp3_decode_frame(AVCodecContext *avctx, +@@ -2012,7 +2012,8 @@ static int vp4_mc_loop_filter(Vp3DecodeContext *s, int plane, int motion_x, int + x_offset = (-(x + 2) & 7) + 2; + y_offset = (-(y + 2) & 7) + 2; + +- av_assert1(!(x_offset > 8 + x_subpel && y_offset > 8 + y_subpel)); ++ if (x_offset > 8 + x_subpel && y_offset > 8 + y_subpel) ++ return 0; + + s->vdsp.emulated_edge_mc(loop, motion_source - stride - 1, + loop_stride, stride, +@@ -2344,8 +2345,6 @@ static av_cold int vp3_decode_init(AVCodecContext *avctx) + s->avctx = avctx; + s->width = FFALIGN(avctx->coded_width, 16); + s->height = FFALIGN(avctx->coded_height, 16); +- if (s->width < 18) +- return AVERROR_PATCHWELCOME; + if (avctx->codec_id != AV_CODEC_ID_THEORA) + avctx->pix_fmt = AV_PIX_FMT_YUV420P; + avctx->chroma_sample_location = AVCHROMA_LOC_CENTER; +@@ -2684,13 +2683,8 @@ static int vp3_decode_frame(AVCodecContext *avctx, if ((ret = ff_thread_get_buffer(avctx, &s->current_frame, AV_GET_BUFFER_FLAG_REF)) < 0) goto error; @@ -63660,56 +63857,66 @@ index 90c889182c..57c6eb1ff9 100644 if (s->keyframe) { if (!s->theora) { - skip_bits(&gb, 4); /* width code */ - skip_bits(&gb, 4); /* height code */ - if (s->version) { -- int version = get_bits(&gb, 5); --#if !CONFIG_VP4_DECODER -- if (version >= 2) { -- av_log(avctx, AV_LOG_ERROR, "This build does not support decoding VP4.\n"); -- return AVERROR_DECODER_NOT_FOUND; -- } --#endif -- s->version = version; -+ s->version = get_bits(&gb, 5); - if (avctx->frame_number == 0) - av_log(s->avctx, AV_LOG_DEBUG, - "VP version: %d\n", s->version); -diff --git a/libavcodec/vqavideo.c b/libavcodec/vqavideo.c -index d0e1927444..f45390cfe5 100644 ---- a/libavcodec/vqavideo.c -+++ b/libavcodec/vqavideo.c -@@ -588,14 +588,13 @@ static int vqa_decode_chunk(VqaContext *s, AVFrame *frame) - if (s->partial_countdown <= 0) { - bytestream2_init(&s->gb, s->next_codebook_buffer, s->next_codebook_buffer_index); - /* decompress codebook */ -- res = decode_format80(s, s->next_codebook_buffer_index, -- s->codebook, s->codebook_size, 0); -+ if ((res = decode_format80(s, s->next_codebook_buffer_index, -+ s->codebook, s->codebook_size, 0)) < 0) -+ return res; +@@ -2923,9 +2917,7 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb) + /* sanity check */ + if (av_image_check_size(visible_width, visible_height, 0, avctx) < 0 || + visible_width + offset_x > s->width || +- visible_height + offset_y > s->height || +- visible_width < 18 +- ) { ++ visible_height + offset_y > s->height) { + av_log(avctx, AV_LOG_ERROR, + "Invalid frame dimensions - w:%d h:%d x:%d y:%d (%dx%d).\n", + visible_width, visible_height, offset_x, offset_y, +@@ -2971,8 +2963,6 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb) + } else + avctx->pix_fmt = AV_PIX_FMT_YUV420P; - /* reset accounting */ - s->next_codebook_buffer_index = 0; - s->partial_countdown = s->partial_count; -- if (res < 0) -- return res; +- if (s->width < 18) +- return AVERROR_PATCHWELCOME; + ret = ff_set_dimensions(avctx, s->width, s->height); + if (ret < 0) + return ret; +diff --git a/libavcodec/vp8.c b/libavcodec/vp8.c +index 06b38bc9c8..d16e7b6aa3 100644 +--- a/libavcodec/vp8.c ++++ b/libavcodec/vp8.c +@@ -239,16 +239,8 @@ int update_dimensions(VP8Context *s, int width, int height, int is_vp7) + return AVERROR(ENOMEM); } + #if HAVE_THREADS +- ret = pthread_mutex_init(&s->thread_data[i].lock, NULL); +- if (ret) { +- free_buffers(s); +- return AVERROR(ret); +- } +- ret = pthread_cond_init(&s->thread_data[i].cond, NULL); +- if (ret) { +- free_buffers(s); +- return AVERROR(ret); +- } ++ pthread_mutex_init(&s->thread_data[i].lock, NULL); ++ pthread_cond_init(&s->thread_data[i].cond, NULL); + #endif } -diff --git a/libavcodec/vt_internal.h b/libavcodec/vt_internal.h -index 08d9c77090..fb64735b8c 100644 ---- a/libavcodec/vt_internal.h -+++ b/libavcodec/vt_internal.h -@@ -42,8 +42,6 @@ typedef struct VTContext { - // Current H264 parameters (used to trigger decoder restart on SPS changes). - uint8_t sps[3]; - bool reconfig_needed; -- -- void *logctx; - } VTContext; +diff --git a/libavcodec/vp9.c b/libavcodec/vp9.c +index 43b11ebede..4659f94ee8 100644 +--- a/libavcodec/vp9.c ++++ b/libavcodec/vp9.c +@@ -715,12 +715,6 @@ static int decode_frame_header(AVCodecContext *avctx, + s->s.h.segmentation.feat[i].skip_enabled = get_bits1(&s->gb); + } + } +- } else { +- // Reset fields under segmentation switch if segmentation is disabled. +- // This is necessary because some hwaccels don't ignore these fields +- // if segmentation is disabled. +- s->s.h.segmentation.temporal = 0; +- s->s.h.segmentation.update_map = 0; + } - int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame); + // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas diff --git a/libavcodec/wavpack.c b/libavcodec/wavpack.c index 4b865087bb..2d49172eaf 100644 --- a/libavcodec/wavpack.c @@ -63741,6 +63948,28 @@ index 4b865087bb..2d49172eaf 100644 while (DSD_BYTE_READY(high, low) && bytestream2_get_bytes_left(&s->gbyte)) { value = (value << 8) | bytestream2_get_byte(&s->gbyte); high = (high << 8) | 0xff; +diff --git a/libavcodec/wavpackenc.c b/libavcodec/wavpackenc.c +index 4dca7728d0..0a798438bc 100644 +--- a/libavcodec/wavpackenc.c ++++ b/libavcodec/wavpackenc.c +@@ -1976,7 +1976,7 @@ static void encode_flush(WavPackEncodeContext *s) + put_bits(pb, 31, 0x7FFFFFFF); + cbits -= 31; + } else { +- put_bits(pb, cbits, (1U << cbits) - 1); ++ put_bits(pb, cbits, (1 << cbits) - 1); + cbits = 0; + } + } while (cbits); +@@ -2005,7 +2005,7 @@ static void encode_flush(WavPackEncodeContext *s) + put_bits(pb, 31, 0x7FFFFFFF); + cbits -= 31; + } else { +- put_bits(pb, cbits, (1U << cbits) - 1); ++ put_bits(pb, cbits, (1 << cbits) - 1); + cbits = 0; + } + } while (cbits); diff --git a/libavcodec/weak_link.c b/libavcodec/weak_link.c new file mode 100644 index 0000000000..f234a985b9 @@ -63878,58 +64107,79 @@ index 0000000000..415b6a27a0 + + + -diff --git a/libavcodec/wmadec.c b/libavcodec/wmadec.c -index f5408a1789..8710414936 100644 ---- a/libavcodec/wmadec.c -+++ b/libavcodec/wmadec.c -@@ -980,7 +980,6 @@ AVCodec ff_wmav1_decoder = { - .capabilities = AV_CODEC_CAP_DR1, - .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, - AV_SAMPLE_FMT_NONE }, -- .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, - }; - #endif - #if CONFIG_WMAV2_DECODER -@@ -997,6 +996,5 @@ AVCodec ff_wmav2_decoder = { - .capabilities = AV_CODEC_CAP_DR1, - .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, - AV_SAMPLE_FMT_NONE }, -- .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, - }; - #endif -diff --git a/libavcodec/wmaenc.c b/libavcodec/wmaenc.c -index a28a0c387b..6a7e23d016 100644 ---- a/libavcodec/wmaenc.c -+++ b/libavcodec/wmaenc.c -@@ -436,7 +436,6 @@ AVCodec ff_wmav1_encoder = { - .close = ff_wma_end, - .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, - AV_SAMPLE_FMT_NONE }, -- .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, - }; - #endif - #if CONFIG_WMAV2_ENCODER -@@ -451,6 +450,5 @@ AVCodec ff_wmav2_encoder = { - .close = ff_wma_end, - .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP, - AV_SAMPLE_FMT_NONE }, -- .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, - }; - #endif -diff --git a/libavcodec/wnv1.c b/libavcodec/wnv1.c -index fd9721f4ca..dcf417763c 100644 ---- a/libavcodec/wnv1.c -+++ b/libavcodec/wnv1.c -@@ -126,9 +126,6 @@ static av_cold int decode_init(AVCodecContext *avctx) +diff --git a/libavcodec/x86/mathops.h b/libavcodec/x86/mathops.h +index ca7e2dffc1..6298f5ed19 100644 +--- a/libavcodec/x86/mathops.h ++++ b/libavcodec/x86/mathops.h +@@ -35,20 +35,12 @@ + static av_always_inline av_const int MULL(int a, int b, unsigned shift) { - static AVOnce init_static_once = AV_ONCE_INIT; + int rt, dummy; +- if (__builtin_constant_p(shift)) + __asm__ ( + "imull %3 \n\t" + "shrdl %4, %%edx, %%eax \n\t" + :"=a"(rt), "=d"(dummy) +- :"a"(a), "rm"(b), "i"(shift & 0x1F) ++ :"a"(a), "rm"(b), "ci"((uint8_t)shift) + ); +- else +- __asm__ ( +- "imull %3 \n\t" +- "shrdl %4, %%edx, %%eax \n\t" +- :"=a"(rt), "=d"(dummy) +- :"a"(a), "rm"(b), "c"((uint8_t)shift) +- ); + return rt; + } -- if (avctx->width <= 1) -- return AVERROR_INVALIDDATA; -- - avctx->pix_fmt = AV_PIX_FMT_YUV422P; +@@ -121,31 +113,19 @@ __asm__ volatile(\ + // avoid +32 for shift optimization (gcc should do that ...) + #define NEG_SSR32 NEG_SSR32 + static inline int32_t NEG_SSR32( int32_t a, int8_t s){ +- if (__builtin_constant_p(s)) + __asm__ ("sarl %1, %0\n\t" + : "+r" (a) +- : "i" (-s & 0x1F) ++ : "ic" ((uint8_t)(-s)) + ); +- else +- __asm__ ("sarl %1, %0\n\t" +- : "+r" (a) +- : "c" ((uint8_t)(-s)) +- ); + return a; + } + + #define NEG_USR32 NEG_USR32 + static inline uint32_t NEG_USR32(uint32_t a, int8_t s){ +- if (__builtin_constant_p(s)) + __asm__ ("shrl %1, %0\n\t" + : "+r" (a) +- : "i" (-s & 0x1F) ++ : "ic" ((uint8_t)(-s)) + ); +- else +- __asm__ ("shrl %1, %0\n\t" +- : "+r" (a) +- : "c" ((uint8_t)(-s)) +- ); + return a; + } + +diff --git a/libavcodec/x86/vp3dsp_init.c b/libavcodec/x86/vp3dsp_init.c +index d23420c89b..ba47e1c6cd 100644 +--- a/libavcodec/x86/vp3dsp_init.c ++++ b/libavcodec/x86/vp3dsp_init.c +@@ -60,7 +60,7 @@ av_cold void ff_vp3dsp_init_x86(VP3DSPContext *c, int flags) + + if (!(flags & AV_CODEC_FLAG_BITEXACT)) { + c->v_loop_filter = c->v_loop_filter_unaligned = ff_vp3_v_loop_filter_mmxext; +- c->h_loop_filter = c->h_loop_filter_unaligned = ff_vp3_h_loop_filter_mmxext; ++ c->h_loop_filter = c->v_loop_filter_unaligned = ff_vp3_h_loop_filter_mmxext; + } + } - ff_thread_once(&init_static_once, wnv1_init_static); diff --git a/libavcodec/xpmdec.c b/libavcodec/xpmdec.c index 6db95285ce..993873c595 100644 --- a/libavcodec/xpmdec.c @@ -63944,52 +64194,116 @@ index 6db95285ce..993873c595 100644 size *= 4; ptr += mod_strcspn(ptr, ",") + 1; -diff --git a/libavcodec/zmbvenc.c b/libavcodec/zmbvenc.c -index d050cc2ef0..319381dd48 100644 ---- a/libavcodec/zmbvenc.c -+++ b/libavcodec/zmbvenc.c -@@ -73,7 +73,6 @@ typedef struct ZmbvEncContext { - int keyint, curfrm; - int bypp; - enum ZmbvFormat fmt; -- int zlib_init_ok; - z_stream zstream; +diff --git a/libavcodec/xsubdec.c b/libavcodec/xsubdec.c +index 3be7393651..87ac910577 100644 +--- a/libavcodec/xsubdec.c ++++ b/libavcodec/xsubdec.c +@@ -58,7 +58,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_sub_ptr, + int64_t packet_time = 0; + GetBitContext gb; + int has_alpha = avctx->codec_tag == MKTAG('D','X','S','A'); +- int64_t start_display_time, end_display_time; - int score_tab[ZMBV_BLOCK * ZMBV_BLOCK * 4 + 1]; -@@ -311,9 +310,8 @@ static av_cold int encode_end(AVCodecContext *avctx) - av_freep(&c->comp_buf); - av_freep(&c->work_buf); - -+ deflateEnd(&c->zstream); - av_freep(&c->prev_buf); -- if (c->zlib_init_ok) -- deflateEnd(&c->zstream); - - return 0; - } -@@ -383,6 +381,8 @@ static av_cold int encode_init(AVCodecContext *avctx) - return AVERROR(EINVAL); + // check that at least header fits + if (buf_size < 27 + 7 * 2 + 4 * (3 + has_alpha)) { +@@ -73,14 +72,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_sub_ptr, } + if (avpkt->pts != AV_NOPTS_VALUE) + packet_time = av_rescale_q(avpkt->pts, AV_TIME_BASE_Q, (AVRational){1, 1000}); +- +- sub->start_display_time = start_display_time = parse_timecode(buf + 1, packet_time); +- sub->end_display_time = end_display_time = parse_timecode(buf + 14, packet_time); +- if (sub->start_display_time != start_display_time || +- sub-> end_display_time != end_display_time) { +- av_log(avctx, AV_LOG_ERROR, "time code not representable in 32bit\n"); +- return -1; +- } ++ sub->start_display_time = parse_timecode(buf + 1, packet_time); ++ sub->end_display_time = parse_timecode(buf + 14, packet_time); + buf += 27; -+ // Needed if zlib unused or init aborted before deflateInit -+ memset(&c->zstream, 0, sizeof(z_stream)); - c->comp_size = avctx->width * c->bypp * avctx->height + 1024 + - ((avctx->width + ZMBV_BLOCK - 1) / ZMBV_BLOCK) * ((avctx->height + ZMBV_BLOCK - 1) / ZMBV_BLOCK) * 2 + 4; - if (!(c->work_buf = av_malloc(c->comp_size))) { -@@ -424,7 +424,6 @@ static av_cold int encode_init(AVCodecContext *avctx) - av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret); - return -1; - } -- c->zlib_init_ok = 1; + // read header +diff --git a/libavcodec/xvididct.c b/libavcodec/xvididct.c +index ced8c7235a..360deb3244 100644 +--- a/libavcodec/xvididct.c ++++ b/libavcodec/xvididct.c +@@ -56,37 +56,37 @@ static const int TAB35[] = { 26722, 25172, 22654, 19266, 15137, 10426, 5315 }; - return 0; - } -@@ -446,5 +445,4 @@ AVCodec ff_zmbv_encoder = { - #endif //ZMBV_ENABLE_24BPP - AV_PIX_FMT_BGR0, - AV_PIX_FMT_NONE }, -- .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, - }; + static int idct_row(short *in, const int *const tab, int rnd) + { +- const unsigned c1 = tab[0]; +- const unsigned c2 = tab[1]; +- const unsigned c3 = tab[2]; +- const unsigned c4 = tab[3]; +- const unsigned c5 = tab[4]; +- const unsigned c6 = tab[5]; +- const unsigned c7 = tab[6]; ++ const int c1 = tab[0]; ++ const int c2 = tab[1]; ++ const int c3 = tab[2]; ++ const int c4 = tab[3]; ++ const int c5 = tab[4]; ++ const int c6 = tab[5]; ++ const int c7 = tab[6]; + + const int right = in[5] | in[6] | in[7]; + const int left = in[1] | in[2] | in[3]; + if (!(right | in[4])) { + const int k = c4 * in[0] + rnd; + if (left) { +- const unsigned a0 = k + c2 * in[2]; +- const unsigned a1 = k + c6 * in[2]; +- const unsigned a2 = k - c6 * in[2]; +- const unsigned a3 = k - c2 * in[2]; ++ const int a0 = k + c2 * in[2]; ++ const int a1 = k + c6 * in[2]; ++ const int a2 = k - c6 * in[2]; ++ const int a3 = k - c2 * in[2]; + + const int b0 = c1 * in[1] + c3 * in[3]; + const int b1 = c3 * in[1] - c7 * in[3]; + const int b2 = c5 * in[1] - c1 * in[3]; + const int b3 = c7 * in[1] - c5 * in[3]; + +- in[0] = (int)(a0 + b0) >> ROW_SHIFT; +- in[1] = (int)(a1 + b1) >> ROW_SHIFT; +- in[2] = (int)(a2 + b2) >> ROW_SHIFT; +- in[3] = (int)(a3 + b3) >> ROW_SHIFT; +- in[4] = (int)(a3 - b3) >> ROW_SHIFT; +- in[5] = (int)(a2 - b2) >> ROW_SHIFT; +- in[6] = (int)(a1 - b1) >> ROW_SHIFT; +- in[7] = (int)(a0 - b0) >> ROW_SHIFT; ++ in[0] = (a0 + b0) >> ROW_SHIFT; ++ in[1] = (a1 + b1) >> ROW_SHIFT; ++ in[2] = (a2 + b2) >> ROW_SHIFT; ++ in[3] = (a3 + b3) >> ROW_SHIFT; ++ in[4] = (a3 - b3) >> ROW_SHIFT; ++ in[5] = (a2 - b2) >> ROW_SHIFT; ++ in[6] = (a1 - b1) >> ROW_SHIFT; ++ in[7] = (a0 - b0) >> ROW_SHIFT; + } else { + const int a0 = k >> ROW_SHIFT; + if (a0) { +@@ -102,8 +102,8 @@ static int idct_row(short *in, const int *const tab, int rnd) + return 0; + } + } else if (!(left | right)) { +- const int a0 = (int)(rnd + c4 * (in[0] + in[4])) >> ROW_SHIFT; +- const int a1 = (int)(rnd + c4 * (in[0] - in[4])) >> ROW_SHIFT; ++ const int a0 = (rnd + c4 * (in[0] + in[4])) >> ROW_SHIFT; ++ const int a1 = (rnd + c4 * (in[0] - in[4])) >> ROW_SHIFT; + + in[0] = a0; + in[3] = a0; +@@ -114,7 +114,7 @@ static int idct_row(short *in, const int *const tab, int rnd) + in[5] = a1; + in[6] = a1; + } else { +- const unsigned int k = c4 * in[0] + rnd; ++ const int k = c4 * in[0] + rnd; + const unsigned int a0 = k + c2 * in[2] + c4 * in[4] + c6 * in[6]; + const unsigned int a1 = k + c6 * in[2] - c4 * in[4] - c2 * in[6]; + const unsigned int a2 = k - c6 * in[2] - c4 * in[4] + c2 * in[6]; diff --git a/libavdevice/Makefile b/libavdevice/Makefile index 0dfe47a1f4..ec7c7b4147 100644 --- a/libavdevice/Makefile @@ -64704,6 +65018,69 @@ index 0000000000..c7b90e6dd8 + .deinit = drm_vout_deinit, +}; + +diff --git a/libavdevice/dshow.c b/libavdevice/dshow.c +index c0d8b12d5e..73a9a48b20 100644 +--- a/libavdevice/dshow.c ++++ b/libavdevice/dshow.c +@@ -778,10 +778,10 @@ dshow_open_device(AVFormatContext *avctx, ICreateDevEnum *devenum, + goto error; + } + } +- if (ctx->device_filter[otherDevType]) { ++ if (ctx->device_filter[otherDevType]) { + // avoid adding add two instances of the same device to the graph, one for video, one for audio + // a few devices don't support this (could also do this check earlier to avoid double crossbars, etc. but they seem OK) +- if (!device_filter_unique_name || strcmp(device_filter_unique_name, ctx->device_unique_name[otherDevType]) == 0) { ++ if (strcmp(device_filter_unique_name, ctx->device_unique_name[otherDevType]) == 0) { + av_log(avctx, AV_LOG_DEBUG, "reusing previous graph capture filter... %s\n", device_filter_unique_name); + IBaseFilter_Release(device_filter); + device_filter = ctx->device_filter[otherDevType]; +@@ -873,7 +873,7 @@ dshow_open_device(AVFormatContext *avctx, ICreateDevEnum *devenum, + av_log(avctx, AV_LOG_ERROR, "Could not create CaptureGraphBuilder2\n"); + goto error; + } +- r = ICaptureGraphBuilder2_SetFiltergraph(graph_builder2, graph); ++ ICaptureGraphBuilder2_SetFiltergraph(graph_builder2, graph); + if (r != S_OK) { + av_log(avctx, AV_LOG_ERROR, "Could not set graph for CaptureGraphBuilder2\n"); + goto error; +diff --git a/libavdevice/dshow_capture.h b/libavdevice/dshow_capture.h +index f2e35bd600..06ded2ba96 100644 +--- a/libavdevice/dshow_capture.h ++++ b/libavdevice/dshow_capture.h +@@ -125,15 +125,14 @@ void ff_dshow_##prefix##_Destroy(class *this) \ + class *ff_dshow_##prefix##_Create(__VA_ARGS__) \ + { \ + class *this = CoTaskMemAlloc(sizeof(class)); \ ++ void *vtbl = CoTaskMemAlloc(sizeof(*this->vtbl)); \ + dshowdebug("ff_dshow_"AV_STRINGIFY(prefix)"_Create(%p)\n", this); \ +- if (!this) \ ++ if (!this || !vtbl) \ + goto fail; \ + ZeroMemory(this, sizeof(class)); \ +- this->vtbl = CoTaskMemAlloc(sizeof(*this->vtbl)); \ +- if (!this->vtbl) \ +- goto fail; \ +- ZeroMemory(this->vtbl, sizeof(*this->vtbl)); \ ++ ZeroMemory(vtbl, sizeof(*this->vtbl)); \ + this->ref = 1; \ ++ this->vtbl = vtbl; \ + if (!setup) \ + goto fail; \ + dshowdebug("created ff_dshow_"AV_STRINGIFY(prefix)" %p\n", this); \ +diff --git a/libavdevice/dshow_filter.c b/libavdevice/dshow_filter.c +index 4295f42aa2..61e057a836 100644 +--- a/libavdevice/dshow_filter.c ++++ b/libavdevice/dshow_filter.c +@@ -135,7 +135,7 @@ long ff_dshow_filter_JoinFilterGraph(DShowFilter *this, IFilterGraph *graph, + + this->info.pGraph = graph; + if (name) +- wcscpy_s(this->info.achName, sizeof(this->info.achName) / sizeof(wchar_t), name); ++ wcscpy(this->info.achName, name); + + return S_OK; + } diff --git a/libavdevice/egl_vout.c b/libavdevice/egl_vout.c new file mode 100644 index 0000000000..cc6e310551 @@ -66038,19 +66415,40 @@ index 0000000000..84723a34ad + .init = rpi_vout_init, + .deinit = rpi_vout_deinit, +}; -diff --git a/libavdevice/xv.c b/libavdevice/xv.c -index 33507291d2..50dc4e0d04 100644 ---- a/libavdevice/xv.c -+++ b/libavdevice/xv.c -@@ -296,7 +296,7 @@ static int write_picture(AVFormatContext *s, uint8_t *input_data[4], - { - XVContext *xv = s->priv_data; - XvImage *img = xv->yuv_image; -- uint8_t *data[4] = { -+ uint8_t *data[3] = { - img->data + img->offsets[0], - img->data + img->offsets[1], - img->data + img->offsets[2] +diff --git a/libavdevice/v4l2.c b/libavdevice/v4l2.c +index 1dcbe04bb1..365bacd771 100644 +--- a/libavdevice/v4l2.c ++++ b/libavdevice/v4l2.c +@@ -95,10 +95,10 @@ struct video_data { + int (*open_f)(const char *file, int oflag, ...); + int (*close_f)(int fd); + int (*dup_f)(int fd); +-#if defined(__sun) || defined(__BIONIC__) || defined(__musl__) /* POSIX-like */ +- int (*ioctl_f)(int fd, int request, ...); +-#else ++#ifdef __GLIBC__ + int (*ioctl_f)(int fd, unsigned long int request, ...); ++#else ++ int (*ioctl_f)(int fd, int request, ...); + #endif + ssize_t (*read_f)(int fd, void *buffer, size_t n); + void *(*mmap_f)(void *start, size_t length, int prot, int flags, int fd, int64_t offset); +diff --git a/libavdevice/xcbgrab.c b/libavdevice/xcbgrab.c +index b4968e42ff..8e3292e577 100644 +--- a/libavdevice/xcbgrab.c ++++ b/libavdevice/xcbgrab.c +@@ -826,10 +826,7 @@ static av_cold int xcbgrab_read_header(AVFormatContext *s) + + if (!sscanf(s->url, "%[^+]+%d,%d", display_name, &c->x, &c->y)) { + *display_name = 0; +- if(sscanf(s->url, "+%d,%d", &c->x, &c->y) != 2) { +- if (*s->url) +- av_log(s, AV_LOG_WARNING, "Ambigous URL: %s\n", s->url); +- } ++ sscanf(s->url, "+%d,%d", &c->x, &c->y); + } + + c->conn = xcb_connect(display_name[0] ? display_name : NULL, &screen_num); diff --git a/libavfilter/Makefile b/libavfilter/Makefile index b2c254ea67..144fbda652 100644 --- a/libavfilter/Makefile @@ -66071,76 +66469,84 @@ index b2c254ea67..144fbda652 100644 OBJS-$(CONFIG_UNSHARP_FILTER) += vf_unsharp.o OBJS-$(CONFIG_UNSHARP_OPENCL_FILTER) += vf_unsharp_opencl.o opencl.o \ opencl/unsharp.o -diff --git a/libavfilter/aeval.c b/libavfilter/aeval.c -index 7636063bcf..d5437431ab 100644 ---- a/libavfilter/aeval.c -+++ b/libavfilter/aeval.c -@@ -124,10 +124,11 @@ static int parse_channel_expressions(AVFilterContext *ctx, - } - - #define ADD_EXPRESSION(expr_) do { \ -- ret = av_dynarray_add_nofree(&eval->expr, \ -- &eval->nb_channels, NULL); \ -- if (ret < 0) \ -+ if (!av_dynarray2_add((void **)&eval->expr, &eval->nb_channels, \ -+ sizeof(*eval->expr), NULL)) { \ -+ ret = AVERROR(ENOMEM); \ - goto end; \ -+ } \ - eval->expr[eval->nb_channels-1] = NULL; \ - ret = av_expr_parse(&eval->expr[eval->nb_channels - 1], expr_, \ - var_names, func1_names, func1, \ -diff --git a/libavfilter/af_surround.c b/libavfilter/af_surround.c -index c0b8b002c2..d18b3146e7 100644 ---- a/libavfilter/af_surround.c -+++ b/libavfilter/af_surround.c -@@ -203,13 +203,13 @@ static int config_input(AVFilterLink *inlink) - s->rdft = av_calloc(inlink->channels, sizeof(*s->rdft)); - if (!s->rdft) - return AVERROR(ENOMEM); -- s->nb_in_channels = inlink->channels; - - for (ch = 0; ch < inlink->channels; ch++) { - s->rdft[ch] = av_rdft_init(ff_log2(s->buf_size), DFT_R2C); - if (!s->rdft[ch]) +diff --git a/libavfilter/af_aderivative.c b/libavfilter/af_aderivative.c +index 56a59d517e..a591515cbf 100644 +--- a/libavfilter/af_aderivative.c ++++ b/libavfilter/af_aderivative.c +@@ -150,7 +150,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) + s->prev = ff_get_audio_buffer(inlink, 1); + if (!s->prev) { + av_frame_free(&in); +- av_frame_free(&out); return AVERROR(ENOMEM); + } } -+ s->nb_in_channels = inlink->channels; - s->input_levels = av_malloc_array(s->nb_in_channels, sizeof(*s->input_levels)); - if (!s->input_levels) - return AVERROR(ENOMEM); -@@ -266,13 +266,13 @@ static int config_output(AVFilterLink *outlink) - s->irdft = av_calloc(outlink->channels, sizeof(*s->irdft)); - if (!s->irdft) - return AVERROR(ENOMEM); -- s->nb_out_channels = outlink->channels; +diff --git a/libavfilter/af_alimiter.c b/libavfilter/af_alimiter.c +index f941768848..c41e95576f 100644 +--- a/libavfilter/af_alimiter.c ++++ b/libavfilter/af_alimiter.c +@@ -176,11 +176,10 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) + } else { + for (i = s->nextiter; i < s->nextiter + s->nextlen; i++) { + int j = i % buffer_size; +- double ppeak = 0, pdelta; ++ double ppeak, pdelta; - for (ch = 0; ch < outlink->channels; ch++) { - s->irdft[ch] = av_rdft_init(ff_log2(s->buf_size), IDFT_C2R); - if (!s->irdft[ch]) - return AVERROR(ENOMEM); - } -+ s->nb_out_channels = outlink->channels; - s->output_levels = av_malloc_array(s->nb_out_channels, sizeof(*s->output_levels)); - if (!s->output_levels) - return AVERROR(ENOMEM); -diff --git a/libavfilter/af_vibrato.c b/libavfilter/af_vibrato.c -index 64d6068b39..5db1f0f6c9 100644 ---- a/libavfilter/af_vibrato.c -+++ b/libavfilter/af_vibrato.c -@@ -157,11 +157,11 @@ static int config_input(AVFilterLink *inlink) - int c; - AVFilterContext *ctx = inlink->dst; - VibratoContext *s = ctx->priv; -+ s->channels = inlink->channels; +- if (nextpos[j] >= 0) +- ppeak = fabs(buffer[nextpos[j]]) > fabs(buffer[nextpos[j] + 1]) ? +- fabs(buffer[nextpos[j]]) : fabs(buffer[nextpos[j] + 1]); ++ ppeak = fabs(buffer[nextpos[j]]) > fabs(buffer[nextpos[j] + 1]) ? ++ fabs(buffer[nextpos[j]]) : fabs(buffer[nextpos[j] + 1]); + pdelta = (limit / peak - limit / ppeak) / (((buffer_size - nextpos[j] + s->pos) % buffer_size) / channels); + if (pdelta < nextdelta[j]) { + nextdelta[j] = pdelta; +diff --git a/libavfilter/af_mcompand.c b/libavfilter/af_mcompand.c +index 9ac1940a52..ce4f366ad7 100644 +--- a/libavfilter/af_mcompand.c ++++ b/libavfilter/af_mcompand.c +@@ -445,8 +445,8 @@ static int config_output(AVFilterLink *outlink) + } - s->buf = av_calloc(inlink->channels, sizeof(*s->buf)); - if (!s->buf) + new_nb_items += sscanf(tstr2, "%lf", &s->bands[i].topfreq) == 1; +- if (s->bands[i].topfreq < 0 || s->bands[i].topfreq >= outlink->sample_rate / 2.0) { +- av_log(ctx, AV_LOG_ERROR, "crossover_frequency: %f, should be >=0 and lower than half of sample rate: %f.\n", s->bands[i].topfreq, outlink->sample_rate / 2.0); ++ if (s->bands[i].topfreq < 0 || s->bands[i].topfreq >= outlink->sample_rate / 2) { ++ av_log(ctx, AV_LOG_ERROR, "crossover_frequency: %f, should be >=0 and lower than half of sample rate: %d.\n", s->bands[i].topfreq, outlink->sample_rate / 2); + return AVERROR(EINVAL); + } + +diff --git a/libavfilter/af_pan.c b/libavfilter/af_pan.c +index a29d12d2b1..b628177071 100644 +--- a/libavfilter/af_pan.c ++++ b/libavfilter/af_pan.c +@@ -126,14 +126,6 @@ static av_cold int init(AVFilterContext *ctx) + if (ret < 0) + goto fail; + +- if (pan->nb_output_channels > MAX_CHANNELS) { +- av_log(ctx, AV_LOG_ERROR, +- "af_pan supports a maximum of %d channels. " +- "Feel free to ask for a higher limit.\n", MAX_CHANNELS); +- ret = AVERROR_PATCHWELCOME; +- goto fail; +- } +- + /* parse channel specifications */ + while ((arg = arg0 = av_strtok(NULL, "|", &tokenizer))) { + int used_in_ch[MAX_CHANNELS] = {0}; +diff --git a/libavfilter/af_stereowiden.c b/libavfilter/af_stereowiden.c +index 8ce2dd02d3..251f08438e 100644 +--- a/libavfilter/af_stereowiden.c ++++ b/libavfilter/af_stereowiden.c +@@ -75,8 +75,6 @@ static int config_input(AVFilterLink *inlink) + + s->length = s->delay * inlink->sample_rate / 1000; + s->length *= 2; +- if (s->length == 0) +- return AVERROR(EINVAL); + s->buffer = av_calloc(s->length, sizeof(*s->buffer)); + if (!s->buffer) return AVERROR(ENOMEM); -- s->channels = inlink->channels; - s->buf_size = lrint(inlink->sample_rate * 0.005 + 0.5); - for (c = 0; c < s->channels; c++) { - s->buf[c] = av_malloc_array(s->buf_size, sizeof(*s->buf[c])); diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c index 0872c6e0f2..1dd05e4d75 100644 --- a/libavfilter/allfilters.c @@ -66169,49 +66575,6 @@ index 0872c6e0f2..1dd05e4d75 100644 extern AVFilter ff_vf_unsharp; extern AVFilter ff_vf_unsharp_opencl; extern AVFilter ff_vf_untile; -diff --git a/libavfilter/asrc_flite.c b/libavfilter/asrc_flite.c -index 6373ae761d..3e543a3ab6 100644 ---- a/libavfilter/asrc_flite.c -+++ b/libavfilter/asrc_flite.c -@@ -196,12 +196,10 @@ static av_cold void uninit(AVFilterContext *ctx) - { - FliteContext *flite = ctx->priv; - -- if (flite->voice_entry) { -- if (!--flite->voice_entry->usage_count) { -- flite->voice_entry->unregister_fn(flite->voice); -- flite->voice_entry->voice = NULL; -- } -- } -+ if (!--flite->voice_entry->usage_count) -+ flite->voice_entry->unregister_fn(flite->voice); -+ flite->voice = NULL; -+ flite->voice_entry = NULL; - delete_wave(flite->wave); - flite->wave = NULL; - } -diff --git a/libavfilter/avfilter.c b/libavfilter/avfilter.c -index 6a344282eb..22ecad5f77 100644 ---- a/libavfilter/avfilter.c -+++ b/libavfilter/avfilter.c -@@ -925,8 +925,6 @@ int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options) - ret = ctx->filter->init(ctx); - else if (ctx->filter->init_dict) - ret = ctx->filter->init_dict(ctx, options); -- if (ret < 0) -- return ret; - - if (ctx->enable_str) { - ret = set_enable_expr(ctx, ctx->enable_str); -@@ -934,7 +932,7 @@ int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options) - return ret; - } - -- return 0; -+ return ret; - } - - int avfilter_init_str(AVFilterContext *filter, const char *args) diff --git a/libavfilter/avfiltergraph.c b/libavfilter/avfiltergraph.c index f6b572b3de..44fe8b679c 100644 --- a/libavfilter/avfiltergraph.c @@ -66416,6 +66779,176 @@ index da1cf9941e..c588ed23cb 100644 frame->format, frame->pts); break; case AVMEDIA_TYPE_AUDIO: +diff --git a/libavfilter/scale_eval.c b/libavfilter/scale_eval.c +index ea71260dcb..dfec081e15 100644 +--- a/libavfilter/scale_eval.c ++++ b/libavfilter/scale_eval.c +@@ -114,7 +114,7 @@ int ff_scale_adjust_dimensions(AVFilterLink *inlink, + int *ret_w, int *ret_h, + int force_original_aspect_ratio, int force_divisible_by) + { +- int64_t w, h; ++ int w, h; + int factor_w, factor_h; + + w = *ret_w; +@@ -148,8 +148,8 @@ int ff_scale_adjust_dimensions(AVFilterLink *inlink, + * dimensions so that it is not divisible by the set factors anymore + * unless force_divisible_by is defined as well */ + if (force_original_aspect_ratio) { +- int64_t tmp_w = av_rescale(h, inlink->w, inlink->h); +- int64_t tmp_h = av_rescale(w, inlink->h, inlink->w); ++ int tmp_w = av_rescale(h, inlink->w, inlink->h); ++ int tmp_h = av_rescale(w, inlink->h, inlink->w); + + if (force_original_aspect_ratio == 1) { + w = FFMIN(tmp_w, w); +@@ -170,9 +170,6 @@ int ff_scale_adjust_dimensions(AVFilterLink *inlink, + } + } + +- if ((int32_t)w != w || (int32_t)h != h) +- return AVERROR(EINVAL); +- + *ret_w = w; + *ret_h = h; + +diff --git a/libavfilter/scale_eval.h b/libavfilter/scale_eval.h +index 658092962d..fceb023fec 100644 +--- a/libavfilter/scale_eval.h ++++ b/libavfilter/scale_eval.h +@@ -40,7 +40,7 @@ int ff_scale_eval_dimensions(void *ctx, + * or both of the evaluated values are of the form '-n' or if + * force_original_aspect_ratio is set. + * +- * Returns negative error code on error or non negative on success ++ * Returns 0. + */ + int ff_scale_adjust_dimensions(AVFilterLink *inlink, + int *ret_w, int *ret_h, +diff --git a/libavfilter/signature_lookup.c b/libavfilter/signature_lookup.c +index ba0dcfbf34..272c717c77 100644 +--- a/libavfilter/signature_lookup.c ++++ b/libavfilter/signature_lookup.c +@@ -37,16 +37,6 @@ + #define STATUS_END_REACHED 1 + #define STATUS_BEGIN_REACHED 2 + +-static void sll_free(MatchingInfo **sll) +-{ +- while (*sll) { +- MatchingInfo *tmp = *sll; +- *sll = tmp->next; +- tmp->next = NULL; +- av_free(tmp); +- } +-} +- + static void fill_l1distlut(uint8_t lut[]) + { + int i, j, tmp_i, tmp_j,count; +@@ -299,11 +289,6 @@ static MatchingInfo* get_matching_parameters(AVFilterContext *ctx, SignatureCont + if (!c->next) + av_log(ctx, AV_LOG_FATAL, "Could not allocate memory"); + c = c->next; +- +- } +- if (!c) { +- sll_free(&cands); +- goto error; + } + c->framerateratio = (i+1.0) / 30; + c->score = hspace[i][j].score; +@@ -320,7 +305,6 @@ static MatchingInfo* get_matching_parameters(AVFilterContext *ctx, SignatureCont + } + } + } +- error: + for (i = 0; i < MAX_FRAMERATE; i++) { + av_freep(&hspace[i]); + } +@@ -453,14 +437,14 @@ static MatchingInfo evaluate_parameters(AVFilterContext *ctx, SignatureContext * + } + + if (tolerancecount > 2) { ++ a = aprev; ++ b = bprev; + if (dir == DIR_NEXT) { + /* turn around */ + a = infos->first; + b = infos->second; + dir = DIR_PREV; + } else { +- a = aprev; +- b = bprev; + break; + } + } +@@ -501,10 +485,10 @@ static MatchingInfo evaluate_parameters(AVFilterContext *ctx, SignatureContext * + continue; /* matching sequence is too short */ + if ((double) goodfcount / (double) fcount < sc->thit) + continue; +- if ((double) goodfcount*0.5 <= FFMAX(gooda, goodb)) ++ if ((double) goodfcount*0.5 < FFMAX(gooda, goodb)) + continue; + +- meandist = (double) distsum / (double) goodfcount; ++ meandist = (double) goodfcount / (double) distsum; + + if (meandist < minmeandist || + status == STATUS_END_REACHED | STATUS_BEGIN_REACHED || +@@ -536,6 +520,16 @@ static MatchingInfo evaluate_parameters(AVFilterContext *ctx, SignatureContext * + return bestmatch; + } + ++static void sll_free(MatchingInfo *sll) ++{ ++ void *tmp; ++ while (sll) { ++ tmp = sll; ++ sll = sll->next; ++ av_freep(&tmp); ++ } ++} ++ + static MatchingInfo lookup_signatures(AVFilterContext *ctx, SignatureContext *sc, StreamContext *first, StreamContext *second, int mode) + { + CoarseSignature *cs, *cs2; +@@ -578,7 +572,7 @@ static MatchingInfo lookup_signatures(AVFilterContext *ctx, SignatureContext *sc + "ratio %f, offset %d, score %d, %d frames matching\n", + bestmatch.first->index, bestmatch.second->index, + bestmatch.framerateratio, bestmatch.offset, bestmatch.score, bestmatch.matchframes); +- sll_free(&infos); ++ sll_free(infos); + } + } while (find_next_coarsecandidate(sc, second->coarsesiglist, &cs, &cs2, 0) && !bestmatch.whole); + return bestmatch; +diff --git a/libavfilter/vf_avgblur.c b/libavfilter/vf_avgblur.c +index 070500c37a..7fd65eabfc 100644 +--- a/libavfilter/vf_avgblur.c ++++ b/libavfilter/vf_avgblur.c +@@ -273,7 +273,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) + const int width = s->planewidth[plane]; + + if (!(s->planes & (1 << plane))) { +- if (out->data[plane] != in->data[plane]) ++ if (out != in) + av_image_copy_plane(out->data[plane], out->linesize[plane], + in->data[plane], in->linesize[plane], + width * ((s->depth + 7) / 8), height); +diff --git a/libavfilter/vf_bm3d.c b/libavfilter/vf_bm3d.c +index 0bb7e41eff..18d13b25ff 100644 +--- a/libavfilter/vf_bm3d.c ++++ b/libavfilter/vf_bm3d.c +@@ -279,7 +279,7 @@ static void do_block_matching_multi(BM3DContext *s, const uint8_t *src, int src_ + int r_y, int r_x, int plane, int jobnr) + { + SliceContext *sc = &s->slices[jobnr]; +- double MSE2SSE = s->group_size * s->block_size * s->block_size * src_range * src_range / (double)(s->max * s->max); ++ double MSE2SSE = s->group_size * s->block_size * s->block_size * src_range * src_range / (s->max * s->max); + double distMul = 1. / MSE2SSE; + double th_sse = th_mse * MSE2SSE; + int i, index = sc->nb_match_blocks; diff --git a/libavfilter/vf_deinterlace_v4l2m2m.c b/libavfilter/vf_deinterlace_v4l2m2m.c new file mode 100644 index 0000000000..d4c11cfc51 @@ -68537,195 +69070,193 @@ index 0000000000..d4c11cfc51 + .activate = deint_v4l2m2m_activate, +}; + -diff --git a/libavfilter/vf_frei0r.c b/libavfilter/vf_frei0r.c -index ed0ba9f866..2ec4707d97 100644 ---- a/libavfilter/vf_frei0r.c -+++ b/libavfilter/vf_frei0r.c -@@ -353,20 +353,14 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) - { - Frei0rContext *s = inlink->dst->priv; - AVFilterLink *outlink = inlink->dst->outputs[0]; -- AVFrame *out = ff_default_get_video_buffer2(outlink, outlink->w, outlink->h, 16); -- if (!out) -- goto fail; -+ AVFrame *out; +diff --git a/libavfilter/vf_deshake_opencl.c b/libavfilter/vf_deshake_opencl.c +index 6a46fff9e2..4f1bb09362 100644 +--- a/libavfilter/vf_deshake_opencl.c ++++ b/libavfilter/vf_deshake_opencl.c +@@ -712,7 +712,7 @@ static int minimize_error( + total_err += deshake_ctx->ransac_err[j]; + } -- av_frame_copy_props(out, in); -- -- if (in->linesize[0] != out->linesize[0]) { -- AVFrame *in2 = ff_default_get_video_buffer2(outlink, outlink->w, outlink->h, 16); -- if (!in2) -- goto fail; -- av_frame_copy(in2, in); -+ out = ff_get_video_buffer(outlink, outlink->w, outlink->h); -+ if (!out) { - av_frame_free(&in); -- in = in2; -+ return AVERROR(ENOMEM); - } -+ av_frame_copy_props(out, in); +- if (i == 0 || total_err < best_err) { ++ if (total_err < best_err) { + for (int mi = 0; mi < 6; ++mi) { + best_model[mi] = model[mi]; + } +diff --git a/libavfilter/vf_gradfun.c b/libavfilter/vf_gradfun.c +index 31c2bf19c3..28da37ff93 100644 +--- a/libavfilter/vf_gradfun.c ++++ b/libavfilter/vf_gradfun.c +@@ -93,7 +93,7 @@ static void filter(GradFunContext *ctx, uint8_t *dst, const uint8_t *src, int wi + for (y = 0; y < r; y++) + ctx->blur_line(dc, buf + y * bstride, buf + (y - 1) * bstride, src + 2 * y * src_linesize, src_linesize, width / 2); + for (;;) { +- if (y + 1 < height - r) { ++ if (y < height - r) { + int mod = ((y + r) / 2) % r; + uint16_t *buf0 = buf + mod * bstride; + uint16_t *buf1 = buf + (mod ? mod - 1 : r - 1) * bstride; +diff --git a/libavfilter/vf_lut3d.c b/libavfilter/vf_lut3d.c +index 106e096579..8b1aeb75e7 100644 +--- a/libavfilter/vf_lut3d.c ++++ b/libavfilter/vf_lut3d.c +@@ -757,8 +757,7 @@ try_again: + else if (!strncmp(line + 7, "MAX ", 4)) vals = max; + if (!vals) + return AVERROR_INVALIDDATA; +- if (av_sscanf(line + 11, "%f %f %f", vals, vals + 1, vals + 2) != 3) +- return AVERROR_INVALIDDATA; ++ av_sscanf(line + 11, "%f %f %f", vals, vals + 1, vals + 2); + av_log(ctx, AV_LOG_DEBUG, "min: %f %f %f | max: %f %f %f\n", + min[0], min[1], min[2], max[0], max[1], max[2]); + goto try_again; +@@ -1786,14 +1785,12 @@ try_again: + else if (!strncmp(line + 7, "MAX ", 4)) vals = max; + if (!vals) + return AVERROR_INVALIDDATA; +- if (av_sscanf(line + 11, "%f %f %f", vals, vals + 1, vals + 2) != 3) +- return AVERROR_INVALIDDATA; ++ av_sscanf(line + 11, "%f %f %f", vals, vals + 1, vals + 2); + av_log(ctx, AV_LOG_DEBUG, "min: %f %f %f | max: %f %f %f\n", + min[0], min[1], min[2], max[0], max[1], max[2]); + goto try_again; + } else if (!strncmp(line, "LUT_1D_INPUT_RANGE ", 19)) { +- if (av_sscanf(line + 19, "%f %f", min, max) != 2) +- return AVERROR_INVALIDDATA; ++ av_sscanf(line + 19, "%f %f", min, max); + min[1] = min[2] = min[0]; + max[1] = max[2] = max[0]; + goto try_again; +diff --git a/libavfilter/vf_minterpolate.c b/libavfilter/vf_minterpolate.c +index cc59f02c21..969463f021 100644 +--- a/libavfilter/vf_minterpolate.c ++++ b/libavfilter/vf_minterpolate.c +@@ -1087,13 +1087,8 @@ static void interpolate(AVFilterLink *inlink, AVFrame *avf_out) + pts = av_rescale(avf_out->pts, (int64_t) ALPHA_MAX * outlink->time_base.num * inlink->time_base.den, + (int64_t) outlink->time_base.den * inlink->time_base.num); - s->update(s->instance, in->pts * av_q2d(inlink->time_base) * 1000, - (const uint32_t *)in->data[0], -@@ -375,10 +369,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) - av_frame_free(&in); +- if (mi_ctx->frames[2].avf->pts > mi_ctx->frames[1].avf->pts) { +- alpha = (pts - mi_ctx->frames[1].avf->pts * ALPHA_MAX) / (mi_ctx->frames[2].avf->pts - mi_ctx->frames[1].avf->pts); +- alpha = av_clip(alpha, 0, ALPHA_MAX); +- } else { +- av_log(ctx, AV_LOG_DEBUG, "duplicate input PTS detected\n"); +- alpha = 0; +- } ++ alpha = (pts - mi_ctx->frames[1].avf->pts * ALPHA_MAX) / (mi_ctx->frames[2].avf->pts - mi_ctx->frames[1].avf->pts); ++ alpha = av_clip(alpha, 0, ALPHA_MAX); - return ff_filter_frame(outlink, out); --fail: -- av_frame_free(&in); -- av_frame_free(&out); -- return AVERROR(ENOMEM); - } + if (alpha == 0 || alpha == ALPHA_MAX) { + av_frame_copy(avf_out, alpha ? mi_ctx->frames[2].avf : mi_ctx->frames[1].avf); +diff --git a/libavfilter/vf_rotate.c b/libavfilter/vf_rotate.c +index 9add726a6f..65c6cc411a 100644 +--- a/libavfilter/vf_rotate.c ++++ b/libavfilter/vf_rotate.c +@@ -296,9 +296,7 @@ static int config_props(AVFilterLink *outlink) + double res; + char *expr; - static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, -@@ -477,7 +467,7 @@ static int source_config_props(AVFilterLink *outlink) - static int source_request_frame(AVFilterLink *outlink) - { - Frei0rContext *s = outlink->src->priv; -- AVFrame *frame = ff_default_get_video_buffer2(outlink, outlink->w, outlink->h, 16); -+ AVFrame *frame = ff_get_video_buffer(outlink, outlink->w, outlink->h); +- ret = ff_draw_init(&rot->draw, inlink->format, 0); +- if (ret < 0) +- return ret; ++ ff_draw_init(&rot->draw, inlink->format, 0); + ff_draw_color(&rot->draw, &rot->color, rot->fillcolor); - if (!frame) - return AVERROR(ENOMEM); -diff --git a/libavfilter/vf_idet.c b/libavfilter/vf_idet.c -index cc08722b06..02ae2edcb9 100644 ---- a/libavfilter/vf_idet.c -+++ b/libavfilter/vf_idet.c -@@ -336,19 +336,20 @@ static int request_frame(AVFilterLink *link) - static av_cold void uninit(AVFilterContext *ctx) - { - IDETContext *idet = ctx->priv; -+ int level = strncmp(ctx->name, "auto-inserted", 13) ? AV_LOG_INFO : AV_LOG_DEBUG; - -- av_log(ctx, AV_LOG_INFO, "Repeated Fields: Neither:%6"PRId64" Top:%6"PRId64" Bottom:%6"PRId64"\n", -+ av_log(ctx, level, "Repeated Fields: Neither:%6"PRId64" Top:%6"PRId64" Bottom:%6"PRId64"\n", - idet->total_repeats[REPEAT_NONE], - idet->total_repeats[REPEAT_TOP], - idet->total_repeats[REPEAT_BOTTOM] - ); -- av_log(ctx, AV_LOG_INFO, "Single frame detection: TFF:%6"PRId64" BFF:%6"PRId64" Progressive:%6"PRId64" Undetermined:%6"PRId64"\n", -+ av_log(ctx, level, "Single frame detection: TFF:%6"PRId64" BFF:%6"PRId64" Progressive:%6"PRId64" Undetermined:%6"PRId64"\n", - idet->total_prestat[TFF], - idet->total_prestat[BFF], - idet->total_prestat[PROGRESSIVE], - idet->total_prestat[UNDETERMINED] - ); -- av_log(ctx, AV_LOG_INFO, "Multi frame detection: TFF:%6"PRId64" BFF:%6"PRId64" Progressive:%6"PRId64" Undetermined:%6"PRId64"\n", -+ av_log(ctx, level, "Multi frame detection: TFF:%6"PRId64" BFF:%6"PRId64" Progressive:%6"PRId64" Undetermined:%6"PRId64"\n", - idet->total_poststat[TFF], - idet->total_poststat[BFF], - idet->total_poststat[PROGRESSIVE], + rot->hsub = pixdesc->log2_chroma_w; diff --git a/libavfilter/vf_scale.c b/libavfilter/vf_scale.c -index 788e4bab5a..3ca6ba2368 100644 +index ba50fbf492..788e4bab5a 100644 --- a/libavfilter/vf_scale.c +++ b/libavfilter/vf_scale.c -@@ -493,19 +493,19 @@ static int config_props(AVFilterLink *outlink) - if ((ret = scale_eval_dimensions(ctx)) < 0) - goto fail; +@@ -496,13 +496,10 @@ static int config_props(AVFilterLink *outlink) + outlink->w = scale->w; + outlink->h = scale->h; -- outlink->w = scale->w; -- outlink->h = scale->h; -- -- ff_scale_adjust_dimensions(inlink, &outlink->w, &outlink->h, -+ ff_scale_adjust_dimensions(inlink, &scale->w, &scale->h, +- ret = ff_scale_adjust_dimensions(inlink, &outlink->w, &outlink->h, ++ ff_scale_adjust_dimensions(inlink, &outlink->w, &outlink->h, scale->force_original_aspect_ratio, scale->force_divisible_by); -- if (outlink->w > INT_MAX || -- outlink->h > INT_MAX || -- (outlink->h * inlink->w) > INT_MAX || -- (outlink->w * inlink->h) > INT_MAX) -+ if (scale->w > INT_MAX || -+ scale->h > INT_MAX || -+ (scale->h * inlink->w) > INT_MAX || -+ (scale->w * inlink->h) > INT_MAX) - av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n"); - -+ outlink->w = scale->w; -+ outlink->h = scale->h; -+ - /* TODO: make algorithm configurable */ - - scale->input_is_pal = desc->flags & AV_PIX_FMT_FLAG_PAL; -@@ -684,9 +684,9 @@ static int scale_frame(AVFilterLink *link, AVFrame *in, AVFrame **frame_out) - goto scale; - - if (scale->eval_mode == EVAL_MODE_INIT) { -- snprintf(buf, sizeof(buf) - 1, "%d", scale->w); -+ snprintf(buf, sizeof(buf)-1, "%d", outlink->w); - av_opt_set(scale, "w", buf, 0); -- snprintf(buf, sizeof(buf) - 1, "%d", scale->h); -+ snprintf(buf, sizeof(buf)-1, "%d", outlink->h); - av_opt_set(scale, "h", buf, 0); - - ret = scale_parse_expr(ctx, NULL, &scale->w_pexpr, "width", scale->w_expr); -diff --git a/libavfilter/vf_showinfo.c b/libavfilter/vf_showinfo.c -index 0b67cd7205..6208892005 100644 ---- a/libavfilter/vf_showinfo.c -+++ b/libavfilter/vf_showinfo.c -@@ -454,15 +454,12 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame) - av_log(ctx, AV_LOG_INFO, " %08"PRIX32, plane_checksum[plane]); - av_log(ctx, AV_LOG_INFO, "] mean:["); - for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) -- av_log(ctx, AV_LOG_INFO, "%s%"PRId64, -- plane ? " ":"", -- (sum[plane] + pixelcount[plane]/2) / pixelcount[plane]); -- av_log(ctx, AV_LOG_INFO, "] stdev:["); -+ av_log(ctx, AV_LOG_INFO, "%"PRId64" ", (sum[plane] + pixelcount[plane]/2) / pixelcount[plane]); -+ av_log(ctx, AV_LOG_INFO, "\b] stdev:["); - for (plane = 0; plane < 4 && frame->data[plane] && frame->linesize[plane]; plane++) -- av_log(ctx, AV_LOG_INFO, "%s%3.1f", -- plane ? " ":"", -+ av_log(ctx, AV_LOG_INFO, "%3.1f ", - sqrt((sum2[plane] - sum[plane]*(double)sum[plane]/pixelcount[plane])/pixelcount[plane])); -- av_log(ctx, AV_LOG_INFO, "]"); -+ av_log(ctx, AV_LOG_INFO, "\b]"); - } - av_log(ctx, AV_LOG_INFO, "\n"); - +- if (ret < 0) +- goto fail; +- + if (outlink->w > INT_MAX || + outlink->h > INT_MAX || + (outlink->h * inlink->w) > INT_MAX || diff --git a/libavfilter/vf_signature.c b/libavfilter/vf_signature.c -index 1205168f8f..32a6405e14 100644 +index fc0ca92d37..1205168f8f 100644 --- a/libavfilter/vf_signature.c +++ b/libavfilter/vf_signature.c -@@ -224,7 +224,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref) - dw1 = inlink->w / 32; - if (inlink->w % 32) - dw2 = dw1 + 1; -- denom = (sc->divide) ? dh1 * (int64_t)dh2 * dw1 * dw2 : 1; -+ denom = (sc->divide) ? dh1 * dh2 * dw1 * dw2 : 1; +@@ -391,9 +391,6 @@ static int xml_export(AVFilterContext *ctx, StreamContext *sc, const char* filen + FILE* f; + unsigned int pot3[5] = { 3*3*3*3, 3*3*3, 3*3, 3, 1 }; - for (i = 0; i < 32; i++) { - rowcount = 0; -@@ -250,7 +250,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref) - } +- if (!sc->coarseend->last) +- return AVERROR(EINVAL); // No frames ? +- + f = fopen(filename, "w"); + if (!f) { + int err = AVERROR(EINVAL); +diff --git a/libavfilter/vf_swaprect.c b/libavfilter/vf_swaprect.c +index 9a96b0b9cb..66bed161f4 100644 +--- a/libavfilter/vf_swaprect.c ++++ b/libavfilter/vf_swaprect.c +@@ -18,7 +18,6 @@ + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +-#include "libavutil/avassert.h" + #include "libavutil/avstring.h" + #include "libavutil/eval.h" + #include "libavutil/imgutils.h" +@@ -147,10 +146,10 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) + w = dw; h = dh; x1[0] = dx1; y1[0] = dy1; x2[0] = dx2; y2[0] = dy2; + + x1[0] = av_clip(x1[0], 0, inlink->w - 1); +- y1[0] = av_clip(y1[0], 0, inlink->h - 1); ++ y1[0] = av_clip(y1[0], 0, inlink->w - 1); + + x2[0] = av_clip(x2[0], 0, inlink->w - 1); +- y2[0] = av_clip(y2[0], 0, inlink->h - 1); ++ y2[0] = av_clip(y2[0], 0, inlink->w - 1); + + ah[1] = ah[2] = AV_CEIL_RSHIFT(h, s->desc->log2_chroma_h); + ah[0] = ah[3] = h; +@@ -170,20 +169,16 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) + lw[1] = lw[2] = AV_CEIL_RSHIFT(inlink->w, s->desc->log2_chroma_w); + lw[0] = lw[3] = inlink->w; + +- x1[1] = x1[2] = (x1[0] >> s->desc->log2_chroma_w); ++ x1[1] = x1[2] = AV_CEIL_RSHIFT(x1[0], s->desc->log2_chroma_w); + x1[0] = x1[3] = x1[0]; +- y1[1] = y1[2] = (y1[0] >> s->desc->log2_chroma_h); ++ y1[1] = y1[2] = AV_CEIL_RSHIFT(y1[0], s->desc->log2_chroma_h); + y1[0] = y1[3] = y1[0]; + +- x2[1] = x2[2] = (x2[0] >> s->desc->log2_chroma_w); ++ x2[1] = x2[2] = AV_CEIL_RSHIFT(x2[0], s->desc->log2_chroma_w); + x2[0] = x2[3] = x2[0]; +- y2[1] = y2[2] = (y2[0] >> s->desc->log2_chroma_h); ++ y2[1] = y2[2] = AV_CEIL_RSHIFT(y2[0], s->desc->log2_chroma_h); + y2[0] = y2[3] = y2[0]; + +- +- av_assert0(FFMAX(x1[1], x2[1]) + pw[1] <= lw[1]); +- av_assert0(FFMAX(y1[1], y2[1]) + ph[1] <= lh[1]); +- + for (p = 0; p < s->nb_planes; p++) { + if (ph[p] == ah[p] && pw[p] == aw[p]) { + uint8_t *src = in->data[p] + y1[p] * in->linesize[p] + x1[p] * s->pixsteps[p]; +diff --git a/libavfilter/vf_thumbnail_cuda.c b/libavfilter/vf_thumbnail_cuda.c +index de61afd1f5..0c06815643 100644 +--- a/libavfilter/vf_thumbnail_cuda.c ++++ b/libavfilter/vf_thumbnail_cuda.c +@@ -288,7 +288,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame) + hist[i] = 4 * hist[i]; } -- denom = (sc->divide) ? 1 : dh1 * (int64_t)dh2 * dw1 * dw2; -+ denom = (sc->divide) ? 1 : dh1 * dh2 * dw1 * dw2; - - for (i = 0; i < ELEMENT_COUNT; i++) { - const ElemCat* elemcat = elements[i]; -diff --git a/libavfilter/vf_subtitles.c b/libavfilter/vf_subtitles.c -index b57dd80b13..de74afa2b7 100644 ---- a/libavfilter/vf_subtitles.c -+++ b/libavfilter/vf_subtitles.c -@@ -145,16 +145,9 @@ static int config_input(AVFilterLink *inlink) - ff_draw_init(&ass->draw, inlink->format, ass->alpha ? FF_DRAW_PROCESS_ALPHA : 0); - - ass_set_frame_size (ass->renderer, inlink->w, inlink->h); -- if (ass->original_w && ass->original_h) { -+ if (ass->original_w && ass->original_h) - ass_set_aspect_ratio(ass->renderer, (double)inlink->w / inlink->h, - (double)ass->original_w / ass->original_h); --#if LIBASS_VERSION > 0x01010000 -- ass_set_storage_size(ass->renderer, ass->original_w, ass->original_h); -- } else { -- ass_set_storage_size(ass->renderer, inlink->w, inlink->h); --#endif -- } -- - if (ass->shaping != -1) - ass_set_shaper(ass->renderer, ass->shaping); +- ret = CHECK_CU(cu->cuCtxPopCurrent(&dummy)); ++ CHECK_CU(cu->cuCtxPopCurrent(&dummy)); + if (ret < 0) + return ret; diff --git a/libavfilter/vf_unsand.c b/libavfilter/vf_unsand.c new file mode 100644 @@ -68977,812 +69508,428 @@ index df805141e0..9a2eb24901 100644 } } } -diff --git a/libavfilter/vf_w3fdif.c b/libavfilter/vf_w3fdif.c -index d380fdd4de..1a64b2b953 100644 ---- a/libavfilter/vf_w3fdif.c -+++ b/libavfilter/vf_w3fdif.c -@@ -283,7 +283,7 @@ static int config_input(AVFilterLink *inlink) - AVFilterContext *ctx = inlink->dst; - W3FDIFContext *s = ctx->priv; - const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format); -- int ret, i, depth, nb_threads; -+ int ret, i, depth; - - if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0) - return ret; -@@ -297,11 +297,10 @@ static int config_input(AVFilterLink *inlink) +diff --git a/libavfilter/vf_vidstabdetect.c b/libavfilter/vf_vidstabdetect.c +index 7b4ba3df17..fd7ff3be24 100644 +--- a/libavfilter/vf_vidstabdetect.c ++++ b/libavfilter/vf_vidstabdetect.c +@@ -176,7 +176,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in) } + if (vsMotionDetection(md, &localmotions, &frame) != VS_OK) { + av_log(ctx, AV_LOG_ERROR, "motion detection failed"); +- return AVERROR_EXTERNAL; ++ return AVERROR(AVERROR_EXTERNAL); + } else { + if (vsWriteToFile(md, s->f, &localmotions) != VS_OK) { + int ret = AVERROR(errno); +diff --git a/libavfilter/vf_weave.c b/libavfilter/vf_weave.c +index a45d650f67..6139844b20 100644 +--- a/libavfilter/vf_weave.c ++++ b/libavfilter/vf_weave.c +@@ -30,7 +30,6 @@ typedef struct WeaveContext { + int double_weave; + int nb_planes; + int planeheight[4]; +- int outheight[4]; + int linesize[4]; + AVFrame *prev; +@@ -86,9 +85,6 @@ static int config_props_output(AVFilterLink *outlink) + s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h); + s->planeheight[0] = s->planeheight[3] = inlink->h; + +- s->outheight[1] = s->outheight[2] = AV_CEIL_RSHIFT(2*inlink->h, desc->log2_chroma_h); +- s->outheight[0] = s->outheight[3] = 2*inlink->h; +- s->nb_planes = av_pix_fmt_count_planes(inlink->format); -- nb_threads = ff_filter_get_nb_threads(ctx); -- s->work_line = av_calloc(nb_threads, sizeof(*s->work_line)); -+ s->nb_threads = ff_filter_get_nb_threads(ctx); -+ s->work_line = av_calloc(s->nb_threads, sizeof(*s->work_line)); - if (!s->work_line) - return AVERROR(ENOMEM); -- s->nb_threads = nb_threads; - for (i = 0; i < s->nb_threads; i++) { - s->work_line[i] = av_calloc(FFALIGN(s->linesize[0], 32), sizeof(*s->work_line[0])); -diff --git a/libavfilter/video.c b/libavfilter/video.c -index b049804419..7a8e587798 100644 ---- a/libavfilter/video.c -+++ b/libavfilter/video.c -@@ -41,7 +41,7 @@ AVFrame *ff_null_get_video_buffer(AVFilterLink *link, int w, int h) - return ff_get_video_buffer(link->dst->outputs[0], w, h); - } - --AVFrame *ff_default_get_video_buffer2(AVFilterLink *link, int w, int h, int align) -+AVFrame *ff_default_get_video_buffer(AVFilterLink *link, int w, int h) - { - AVFrame *frame = NULL; - int pool_width = 0; -@@ -96,11 +96,6 @@ AVFrame *ff_default_get_video_buffer2(AVFilterLink *link, int w, int h, int alig - return frame; - } - --AVFrame *ff_default_get_video_buffer(AVFilterLink *link, int w, int h) --{ -- return ff_default_get_video_buffer2(link, w, h, av_cpu_max_align()); --} -- - AVFrame *ff_get_video_buffer(AVFilterLink *link, int w, int h) - { - AVFrame *ret = NULL; -diff --git a/libavfilter/video.h b/libavfilter/video.h -index f9174a4a0b..56c58d6766 100644 ---- a/libavfilter/video.h -+++ b/libavfilter/video.h -@@ -24,7 +24,6 @@ - #include "avfilter.h" - - AVFrame *ff_default_get_video_buffer(AVFilterLink *link, int w, int h); --AVFrame *ff_default_get_video_buffer2(AVFilterLink *link, int w, int h, int align); - AVFrame *ff_null_get_video_buffer(AVFilterLink *link, int w, int h); - - /** -diff --git a/libavfilter/vsrc_mandelbrot.c b/libavfilter/vsrc_mandelbrot.c -index ed31a23c31..761c915103 100644 ---- a/libavfilter/vsrc_mandelbrot.c -+++ b/libavfilter/vsrc_mandelbrot.c -@@ -134,9 +134,6 @@ static av_cold int init(AVFilterContext *ctx) - s-> next_cache= av_malloc_array(s->cache_allocated, sizeof(*s-> next_cache)); - s-> zyklus = av_malloc_array(s->maxiter + 16, sizeof(*s->zyklus)); - -- if (!s->point_cache || !s->next_cache || !s->zyklus) -- return AVERROR(ENOMEM); -- return 0; - } +@@ -114,20 +110,19 @@ static int weave_slice(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) + const int height = s->planeheight[i]; + const int start = (height * jobnr) / nb_jobs; + const int end = (height * (jobnr+1)) / nb_jobs; +- const int compensation = 2*end > s->outheight[i]; -diff --git a/libavformat/4xm.c b/libavformat/4xm.c -index cfee8a02f4..30f1b05324 100644 ---- a/libavformat/4xm.c -+++ b/libavformat/4xm.c -@@ -137,8 +137,7 @@ static int parse_strk(AVFormatContext *s, - return AVERROR_INVALIDDATA; - - track = AV_RL32(buf + 8); -- if ((unsigned)track >= UINT_MAX / sizeof(AudioTrack) - 1 || -- track >= s->max_streams) { -+ if ((unsigned)track >= UINT_MAX / sizeof(AudioTrack) - 1) { - av_log(s, AV_LOG_ERROR, "current_track too large\n"); - return AVERROR_INVALIDDATA; - } -@@ -149,9 +148,6 @@ static int parse_strk(AVFormatContext *s, - memset(&fourxm->tracks[fourxm->track_count], 0, - sizeof(AudioTrack) * (track + 1 - fourxm->track_count)); - fourxm->track_count = track + 1; -- } else { -- if (fourxm->tracks[track].bits) -- return AVERROR_INVALIDDATA; - } - fourxm->tracks[track].adpcm = AV_RL32(buf + 12); - fourxm->tracks[track].channels = AV_RL32(buf + 36); -diff --git a/libavformat/aadec.c b/libavformat/aadec.c -index 90796c9599..2575e98153 100644 ---- a/libavformat/aadec.c -+++ b/libavformat/aadec.c -@@ -130,8 +130,8 @@ static int aa_read_header(AVFormatContext *s) - AV_WB32(&header_key[idx * 4], header_key_part[idx]); // convert each part to BE! - } - av_log(s, AV_LOG_DEBUG, "Processed HeaderKey is "); -- for (int j = 0; j < 16; j++) -- av_log(s, AV_LOG_DEBUG, "%02x", header_key[j]); -+ for (i = 0; i < 16; i++) -+ av_log(s, AV_LOG_DEBUG, "%02x", header_key[i]); - av_log(s, AV_LOG_DEBUG, "\n"); - } else { - av_dict_set(&s->metadata, key, val, 0); -diff --git a/libavformat/aaxdec.c b/libavformat/aaxdec.c -index ad893efadd..e69e5615ee 100644 ---- a/libavformat/aaxdec.c -+++ b/libavformat/aaxdec.c -@@ -262,8 +262,6 @@ static int aax_read_header(AVFormatContext *s) - - start = avio_rb32(pb); - size = avio_rb32(pb); -- if (!size) -- return AVERROR_INVALIDDATA; - a->segments[r].start = start + a->data_offset; - a->segments[r].end = a->segments[r].start + size; - } else { -diff --git a/libavformat/act.c b/libavformat/act.c -index f6edfb44ab..26425ca1bb 100644 ---- a/libavformat/act.c -+++ b/libavformat/act.c -@@ -66,7 +66,6 @@ static int read_header(AVFormatContext *s) - AVIOContext *pb = s->pb; - int size; - AVStream* st; -- int ret; - - int min,sec,msec; - -@@ -76,9 +75,7 @@ static int read_header(AVFormatContext *s) - - avio_skip(pb, 16); - size=avio_rl32(pb); -- ret = ff_get_wav_header(s, pb, st->codecpar, size, 0); -- if (ret < 0) -- return ret; -+ ff_get_wav_header(s, pb, st->codecpar, size, 0); - - /* - 8000Hz (Fine-rec) file format has 10 bytes long -diff --git a/libavformat/aiffdec.c b/libavformat/aiffdec.c -index f14044d61c..8b85fea809 100644 ---- a/libavformat/aiffdec.c -+++ b/libavformat/aiffdec.c -@@ -53,9 +53,9 @@ static enum AVCodecID aiff_codec_get_id(int bps) - } - - /* returns the size of the found tag */ --static int64_t get_tag(AVIOContext *pb, uint32_t * tag) -+static int get_tag(AVIOContext *pb, uint32_t * tag) - { -- int64_t size; -+ int size; - - if (avio_feof(pb)) - return AVERROR(EIO); -@@ -63,16 +63,16 @@ static int64_t get_tag(AVIOContext *pb, uint32_t * tag) - *tag = avio_rl32(pb); - size = avio_rb32(pb); - -+ if (size < 0) -+ size = 0x7fffffff; -+ - return size; - } - - /* Metadata string read */ --static void get_meta(AVFormatContext *s, const char *key, int64_t size) -+static void get_meta(AVFormatContext *s, const char *key, int size) - { -- uint8_t *str = NULL; -- -- if (size < SIZE_MAX) -- str = av_malloc(size+1); -+ uint8_t *str = av_malloc(size+1); - - if (str) { - int res = avio_read(s->pb, str, size); -@@ -89,7 +89,7 @@ static void get_meta(AVFormatContext *s, const char *key, int64_t size) - } - - /* Returns the number of sound data frames or negative on error */ --static int get_aiff_header(AVFormatContext *s, int64_t size, -+static int get_aiff_header(AVFormatContext *s, int size, - unsigned version) - { - AVIOContext *pb = s->pb; -@@ -100,6 +100,9 @@ static int get_aiff_header(AVFormatContext *s, int64_t size, - int sample_rate; - unsigned int num_frames; - -+ if (size == INT_MAX) -+ return AVERROR_INVALIDDATA; -+ - if (size & 1) - size++; - par->codec_type = AVMEDIA_TYPE_AUDIO; -@@ -117,9 +120,6 @@ static int get_aiff_header(AVFormatContext *s, int64_t size, - sample_rate = val << exp; - else - sample_rate = (val + (1ULL<<(-exp-1))) >> -exp; -- if (sample_rate <= 0) -- return AVERROR_INVALIDDATA; -- - par->sample_rate = sample_rate; - if (size < 18) - return AVERROR_INVALIDDATA; -@@ -182,10 +182,8 @@ static int get_aiff_header(AVFormatContext *s, int64_t size, - par->block_align = (av_get_bits_per_sample(par->codec_id) * par->channels) >> 3; - - if (aiff->block_duration) { -- par->bit_rate = av_rescale(par->sample_rate, par->block_align * 8LL, -- aiff->block_duration); -- if (par->bit_rate < 0) -- par->bit_rate = 0; -+ par->bit_rate = (int64_t)par->sample_rate * (par->block_align << 3) / -+ aiff->block_duration; + av_image_copy_plane(out->data[i] + out->linesize[i] * field1 + + out->linesize[i] * start * 2, + out->linesize[i] * 2, + in->data[i] + start * in->linesize[i], + in->linesize[i], +- s->linesize[i], end - start - compensation * field1); ++ s->linesize[i], end - start); + av_image_copy_plane(out->data[i] + out->linesize[i] * field2 + + out->linesize[i] * start * 2, + out->linesize[i] * 2, + s->prev->data[i] + start * s->prev->linesize[i], + s->prev->linesize[i], +- s->linesize[i], end - start - compensation * field2); ++ s->linesize[i], end - start); } - /* Chunk is over */ -@@ -210,8 +208,7 @@ static int aiff_probe(const AVProbeData *p) - /* aiff input */ - static int aiff_read_header(AVFormatContext *s) - { -- int ret; -- int64_t filesize, size; -+ int ret, size, filesize; - int64_t offset = 0, position; - uint32_t tag; - unsigned version = AIFF_C_VERSION1; -@@ -222,7 +219,7 @@ static int aiff_read_header(AVFormatContext *s) - - /* check FORM header */ - filesize = get_tag(pb, &tag); -- if (filesize < 4 || tag != MKTAG('F', 'O', 'R', 'M')) -+ if (filesize < 0 || tag != MKTAG('F', 'O', 'R', 'M')) - return AVERROR_INVALIDDATA; - - /* AIFF data type */ -@@ -249,7 +246,10 @@ static int aiff_read_header(AVFormatContext *s) - if (size < 0) - return size; - -- filesize -= size + 8; -+ if (size >= 0x7fffffff - 8) -+ filesize = 0; -+ else -+ filesize -= size + 8; - - switch (tag) { - case MKTAG('C', 'O', 'M', 'M'): /* Common chunk */ -@@ -365,12 +365,10 @@ got_sound: - if (!st->codecpar->block_align && st->codecpar->codec_id == AV_CODEC_ID_QCELP) { - av_log(s, AV_LOG_WARNING, "qcelp without wave chunk, assuming full rate\n"); - st->codecpar->block_align = 35; -- } else if (st->codecpar->block_align <= 0) { -+ } else if (!st->codecpar->block_align) { - av_log(s, AV_LOG_ERROR, "could not find COMM tag or invalid block_align value\n"); - return -1; - } -- if (aiff->block_duration < 0) -- return AVERROR_INVALIDDATA; - - /* Now positioned, get the sound data start and end */ - avpriv_set_pts_info(st, 64, 1, st->codecpar->sample_rate); -@@ -425,7 +423,7 @@ static int aiff_read_packet(AVFormatContext *s, - pkt->flags &= ~AV_PKT_FLAG_CORRUPT; - /* Only one stream in an AIFF file */ - pkt->stream_index = 0; -- pkt->duration = (res / st->codecpar->block_align) * (int64_t) aiff->block_duration; -+ pkt->duration = (res / st->codecpar->block_align) * aiff->block_duration; return 0; - } - diff --git a/libavformat/ape.c b/libavformat/ape.c -index 7ced92cf76..2698c770ee 100644 +index 8a876d7fd9..7ced92cf76 100644 --- a/libavformat/ape.c +++ b/libavformat/ape.c -@@ -42,8 +42,8 @@ - - typedef struct APEFrame { - int64_t pos; -- int64_t size; - int nblocks; -+ int size; - int skip; - int64_t pts; - } APEFrame; -@@ -130,7 +130,7 @@ static void ape_dumpinfo(AVFormatContext * s, APEContext * ape_ctx) - - av_log(s, AV_LOG_DEBUG, "\nFrames\n\n"); - for (i = 0; i < ape_ctx->totalframes; i++) -- av_log(s, AV_LOG_DEBUG, "%8d %8"PRId64" %8"PRId64" (%d samples)\n", i, -+ av_log(s, AV_LOG_DEBUG, "%8d %8"PRId64" %8d (%d samples)\n", i, - ape_ctx->frames[i].pos, ape_ctx->frames[i].size, - ape_ctx->frames[i].nblocks); - -@@ -148,8 +148,7 @@ static int ape_read_header(AVFormatContext * s) - AVStream *st; - uint32_t tag; - int i, ret; -- int total_blocks; -- int64_t final_size = 0; -+ int total_blocks, final_size = 0; - int64_t pts, file_size; - - /* Skip any leading junk such as id3v2 tags */ -@@ -301,8 +300,6 @@ static int ape_read_header(AVFormatContext * s) - ape->frames[i].pos -= ape->frames[i].skip; - ape->frames[i].size += ape->frames[i].skip; - } -- if (ape->frames[i].size > INT_MAX - 3) -- return AVERROR_INVALIDDATA; - ape->frames[i].size = (ape->frames[i].size + 3) & ~3; +@@ -293,7 +293,7 @@ static int ape_read_header(AVFormatContext * s) + final_size -= final_size & 3; } - if (ape->fileversion < 3810) { -@@ -400,7 +397,7 @@ static int ape_read_packet(AVFormatContext * s, AVPacket * pkt) + if (file_size <= 0 || final_size <= 0) +- final_size = ape->finalframeblocks * 8LL; ++ final_size = ape->finalframeblocks * 8; + ape->frames[ape->totalframes - 1].size = final_size; - if (ape->frames[ape->currentframe].size <= 0 || - ape->frames[ape->currentframe].size > INT_MAX - extra_size) { -- av_log(s, AV_LOG_ERROR, "invalid packet size: %8"PRId64"\n", -+ av_log(s, AV_LOG_ERROR, "invalid packet size: %d\n", - ape->frames[ape->currentframe].size); - ape->currentframe++; - return AVERROR(EIO); -diff --git a/libavformat/aqtitledec.c b/libavformat/aqtitledec.c -index 960a5d8ef5..81630d73b0 100644 ---- a/libavformat/aqtitledec.c -+++ b/libavformat/aqtitledec.c -@@ -74,8 +74,7 @@ static int aqt_read_header(AVFormatContext *s) - new_event = 1; - pos = avio_tell(s->pb); - if (sub) { -- if (frame >= sub->pts && (uint64_t)frame - sub->pts < INT64_MAX) -- sub->duration = frame - sub->pts; -+ sub->duration = frame - sub->pts; - sub = NULL; - } - } else if (*line) { + for (i = 0; i < ape->totalframes; i++) { diff --git a/libavformat/argo_asf.c b/libavformat/argo_asf.c -index 06d62442b3..8e2bf21c71 100644 +index e5da23a6bc..06d62442b3 100644 --- a/libavformat/argo_asf.c +++ b/libavformat/argo_asf.c -@@ -422,7 +422,7 @@ static int argo_asf_write_trailer(AVFormatContext *s) - ArgoASFMuxContext *ctx = s->priv_data; - int64_t ret; +@@ -257,7 +257,7 @@ static int argo_asf_seek(AVFormatContext *s, int stream_index, + return -1; -- if ((ret = avio_seek(s->pb, ASF_FILE_HEADER_SIZE, SEEK_SET)) < 0) -+ if ((ret = avio_seek(s->pb, ASF_FILE_HEADER_SIZE, SEEK_SET) < 0)) - return ret; + offset = asf->fhdr.chunk_offset + ASF_CHUNK_HEADER_SIZE + +- block * (int64_t)st->codecpar->block_align; ++ (block * st->codecpar->block_align); - avio_wl32(s->pb, (uint32_t)ctx->nb_blocks); + if ((offset = avio_seek(s->pb, offset, SEEK_SET)) < 0) + return offset; diff --git a/libavformat/asfdec_f.c b/libavformat/asfdec_f.c -index add0d33540..c0265af20d 100644 +index f0b1639b21..add0d33540 100644 --- a/libavformat/asfdec_f.c +++ b/libavformat/asfdec_f.c -@@ -104,7 +104,7 @@ typedef struct ASFContext { - int ts_is_pts; - int packet_multi_size; - int packet_time_delta; -- int64_t packet_time_start; -+ int packet_time_start; - int64_t packet_pos; +@@ -774,7 +774,7 @@ static int asf_read_marker(AVFormatContext *s, int64_t size) - int stream_index; -@@ -1321,12 +1321,10 @@ static int asf_parse_packet(AVFormatContext *s, AVIOContext *pb, AVPacket *pkt) - if ((ret = av_new_packet(&asf_st->pkt, asf_st->packet_obj_size)) < 0) - return ret; - asf_st->seq = asf->packet_seq; -- if (asf->packet_frag_timestamp != AV_NOPTS_VALUE) { -- if (asf->ts_is_pts) { -- asf_st->pkt.pts = asf->packet_frag_timestamp - asf->hdr.preroll; -- } else -- asf_st->pkt.dts = asf->packet_frag_timestamp - asf->hdr.preroll; -- } -+ if (asf->ts_is_pts) { -+ asf_st->pkt.pts = asf->packet_frag_timestamp - asf->hdr.preroll; -+ } else -+ asf_st->pkt.dts = asf->packet_frag_timestamp - asf->hdr.preroll; - asf_st->pkt.stream_index = asf->stream_index; - asf_st->pkt.pos = asf_st->packet_pos = asf->packet_pos; - asf_st->pkt_clean = 0; + avio_rl64(pb); // offset, 8 bytes + pres_time = avio_rl64(pb); // presentation time +- pres_time = av_sat_sub64(pres_time, asf->hdr.preroll * 10000LL); ++ pres_time -= asf->hdr.preroll * 10000; + avio_rl16(pb); // entry length + avio_rl32(pb); // send time + avio_rl32(pb); // flags diff --git a/libavformat/asfdec_o.c b/libavformat/asfdec_o.c -index 3a9e590a5b..f98ffc76fa 100644 +index defb3fe892..3a9e590a5b 100644 --- a/libavformat/asfdec_o.c +++ b/libavformat/asfdec_o.c -@@ -113,7 +113,6 @@ typedef struct ASFContext { - int64_t data_offset; - int64_t first_packet_offset; // packet offset - int64_t unknown_offset; // for top level header objects or subobjects without specified behavior -- int in_asf_read_unknown; +@@ -976,9 +976,6 @@ static int asf_read_simple_index(AVFormatContext *s, const GUIDParseTable *g) + int64_t offset; + uint64_t size = avio_rl64(pb); - // ASF file must not contain more than 128 streams according to the specification - ASFStream *asf_st[ASF_MAX_STREAMS]; -@@ -178,7 +177,7 @@ static int asf_read_unknown(AVFormatContext *s, const GUIDParseTable *g) - uint64_t size = avio_rl64(pb); - int ret; - -- if (size > INT64_MAX || asf->in_asf_read_unknown > 5) -+ if (size > INT64_MAX) - return AVERROR_INVALIDDATA; - - if (asf->is_header) -@@ -187,11 +186,8 @@ static int asf_read_unknown(AVFormatContext *s, const GUIDParseTable *g) - if (!g->is_subobject) { - if (!(ret = strcmp(g->name, "Header Extension"))) - avio_skip(pb, 22); // skip reserved fields and Data Size -- asf->in_asf_read_unknown ++; -- ret = detect_unknown_subobject(s, asf->unknown_offset, -- asf->unknown_size); -- asf->in_asf_read_unknown --; -- if (ret < 0) -+ if ((ret = detect_unknown_subobject(s, asf->unknown_offset, -+ asf->unknown_size)) < 0) - return ret; - } else { - if (size < 24) { -@@ -1354,8 +1350,6 @@ static int asf_read_packet_header(AVFormatContext *s) - unsigned char error_flags, len_flags, pay_flags; - - asf->packet_offset = avio_tell(pb); -- if (asf->packet_offset > INT64_MAX/2) -- asf->packet_offset = 0; - error_flags = avio_r8(pb); // read Error Correction Flags - if (error_flags & ASF_PACKET_FLAG_ERROR_CORRECTION_PRESENT) { - if (!(error_flags & ASF_ERROR_CORRECTION_LENGTH_TYPE)) { -diff --git a/libavformat/avidec.c b/libavformat/avidec.c -index 75b05ab5d5..542161e360 100644 ---- a/libavformat/avidec.c -+++ b/libavformat/avidec.c -@@ -79,8 +79,6 @@ typedef struct AVIContext { - int stream_index; - DVDemuxContext *dv_demux; - int odml_depth; -- int64_t odml_read; -- int64_t odml_max_pos; - int use_odml; - #define MAX_ODML_DEPTH 1000 - int64_t dts_max; -@@ -200,7 +198,7 @@ static int read_odml_index(AVFormatContext *s, int64_t frame_num) - st = s->streams[stream_id]; - ast = st->priv_data; - -- if (index_sub_type || entries_in_use < 0) -+ if (index_sub_type) - return AVERROR_INVALIDDATA; - - avio_rl32(pb); -@@ -221,18 +219,11 @@ static int read_odml_index(AVFormatContext *s, int64_t frame_num) - } - - for (i = 0; i < entries_in_use; i++) { -- avi->odml_max_pos = FFMAX(avi->odml_max_pos, avio_tell(pb)); -- -- // If we read more than there are bytes then we must have been reading something twice -- if (avi->odml_read > avi->odml_max_pos) -- return AVERROR_INVALIDDATA; -- - if (index_type) { - int64_t pos = avio_rl32(pb) + base - 8; - int len = avio_rl32(pb); - int key = len >= 0; - len &= 0x7FFFFFFF; -- avi->odml_read += 8; - - av_log(s, AV_LOG_TRACE, "pos:%"PRId64", len:%X\n", pos, len); - -@@ -250,9 +241,6 @@ static int read_odml_index(AVFormatContext *s, int64_t frame_num) - } else { - int64_t offset, pos; - int duration; -- int ret; -- avi->odml_read += 16; -- - offset = avio_rl64(pb); - avio_rl32(pb); /* size */ - duration = avio_rl32(pb); -@@ -270,7 +258,7 @@ static int read_odml_index(AVFormatContext *s, int64_t frame_num) - if (avio_seek(pb, offset + 8, SEEK_SET) < 0) - return -1; - avi->odml_depth++; -- ret = read_odml_index(s, frame_num); -+ read_odml_index(s, frame_num); - avi->odml_depth--; - frame_num += duration; - -@@ -278,8 +266,7 @@ static int read_odml_index(AVFormatContext *s, int64_t frame_num) - av_log(s, AV_LOG_ERROR, "Failed to restore position after reading index\n"); - return -1; - } -- if (ret < 0) -- return ret; -+ - } - } - avi->index_loaded = 2; -@@ -869,8 +856,6 @@ static int avi_read_header(AVFormatContext *s) - memcpy(st->codecpar->extradata + st->codecpar->extradata_size - 9, - "BottomUp", 9); - } -- if (st->codecpar->height == INT_MIN) -- return AVERROR_INVALIDDATA; - st->codecpar->height = FFABS(st->codecpar->height); - - // avio_skip(pb, size - 5 * 4); -diff --git a/libavformat/aviobuf.c b/libavformat/aviobuf.c -index 1fb30644ff..518cb11129 100644 ---- a/libavformat/aviobuf.c -+++ b/libavformat/aviobuf.c -@@ -1005,9 +1005,6 @@ int ffio_ensure_seekback(AVIOContext *s, int64_t buf_size) - if (buf_size <= s->buf_end - s->buf_ptr) - return 0; - -- if (buf_size > INT_MAX - max_buffer_size) -- return AVERROR(EINVAL); -- - buf_size += max_buffer_size - 1; - - if (buf_size + s->buf_ptr - s->buffer <= s->buffer_size || s->seekable || !s->read_packet) -diff --git a/libavformat/bfi.c b/libavformat/bfi.c -index 35b6816aad..f9e0bb2e30 100644 ---- a/libavformat/bfi.c -+++ b/libavformat/bfi.c -@@ -140,12 +140,12 @@ static int bfi_read_packet(AVFormatContext * s, AVPacket * pkt) - audio_offset = avio_rl32(pb); - avio_rl32(pb); - video_offset = avio_rl32(pb); -- if (audio_offset < 0 || video_offset < audio_offset || chunk_size < video_offset) { -+ audio_size = video_offset - audio_offset; -+ bfi->video_size = chunk_size - video_offset; -+ if (audio_size < 0 || bfi->video_size < 0) { - av_log(s, AV_LOG_ERROR, "Invalid audio/video offsets or chunk size\n"); - return AVERROR_INVALIDDATA; - } -- audio_size = video_offset - audio_offset; -- bfi->video_size = chunk_size - video_offset; - - //Tossing an audio packet at the audio decoder. - ret = av_get_packet(pb, pkt, audio_size); -diff --git a/libavformat/cafdec.c b/libavformat/cafdec.c -index 1842c3c0ae..7f09a27977 100644 ---- a/libavformat/cafdec.c -+++ b/libavformat/cafdec.c -@@ -241,8 +241,6 @@ static void read_info_chunk(AVFormatContext *s, int64_t size) - char value[1024]; - avio_get_str(pb, INT_MAX, key, sizeof(key)); - avio_get_str(pb, INT_MAX, value, sizeof(value)); -- if (!*key) -- continue; - av_dict_set(&s->metadata, key, value, 0); - } - } -@@ -342,7 +340,7 @@ static int read_header(AVFormatContext *s) - - found_data: - if (caf->bytes_per_packet > 0 && caf->frames_per_packet > 0) { -- if (caf->data_size > 0 && caf->data_size / caf->bytes_per_packet < INT64_MAX / caf->frames_per_packet) -+ if (caf->data_size > 0) - st->nb_frames = (caf->data_size / caf->bytes_per_packet) * caf->frames_per_packet; - } else if (st->nb_index_entries && st->duration > 0) { - if (st->codecpar->sample_rate && caf->data_size / st->duration > INT64_MAX / st->codecpar->sample_rate / 8) { -diff --git a/libavformat/cafenc.c b/libavformat/cafenc.c -index c5e47f20a6..7e44797a52 100644 ---- a/libavformat/cafenc.c -+++ b/libavformat/cafenc.c -@@ -28,6 +28,7 @@ - - typedef struct { - int64_t data; -+ uint8_t *pkt_sizes; - int size_buffer_size; - int size_entries_used; - int packets; -@@ -208,29 +209,30 @@ static int caf_write_header(AVFormatContext *s) - static int caf_write_packet(AVFormatContext *s, AVPacket *pkt) - { - CAFContext *caf = s->priv_data; -- AVStream *const st = s->streams[0]; - -- if (!st->codecpar->block_align) { -- uint8_t *pkt_sizes; -- int i, alloc_size = caf->size_entries_used + 5U; -- if (alloc_size < 0) -- return AVERROR(ERANGE); -- -- pkt_sizes = av_fast_realloc(st->priv_data, -- &caf->size_buffer_size, -- alloc_size); -- if (!pkt_sizes) -+ avio_write(s->pb, pkt->data, pkt->size); -+ if (!s->streams[0]->codecpar->block_align) { -+ void *pkt_sizes = caf->pkt_sizes; -+ int i, alloc_size = caf->size_entries_used + 5; -+ if (alloc_size < 0) { -+ caf->pkt_sizes = NULL; -+ } else { -+ caf->pkt_sizes = av_fast_realloc(caf->pkt_sizes, -+ &caf->size_buffer_size, -+ alloc_size); -+ } -+ if (!caf->pkt_sizes) { -+ av_free(pkt_sizes); - return AVERROR(ENOMEM); -- st->priv_data = pkt_sizes; -+ } - for (i = 4; i > 0; i--) { - unsigned top = pkt->size >> i * 7; - if (top) -- pkt_sizes[caf->size_entries_used++] = 128 | top; -+ caf->pkt_sizes[caf->size_entries_used++] = 128 | top; - } -- pkt_sizes[caf->size_entries_used++] = pkt->size & 127; -+ caf->pkt_sizes[caf->size_entries_used++] = pkt->size & 127; - caf->packets++; - } -- avio_write(s->pb, pkt->data, pkt->size); - return 0; - } - -@@ -238,8 +240,7 @@ static int caf_write_trailer(AVFormatContext *s) - { - CAFContext *caf = s->priv_data; - AVIOContext *pb = s->pb; -- AVStream *st = s->streams[0]; -- AVCodecParameters *par = st->codecpar; -+ AVCodecParameters *par = s->streams[0]->codecpar; - - if (pb->seekable & AVIO_SEEKABLE_NORMAL) { - int64_t file_size = avio_tell(pb); -@@ -249,14 +250,16 @@ static int caf_write_trailer(AVFormatContext *s) - avio_seek(pb, file_size, SEEK_SET); - if (!par->block_align) { - ffio_wfourcc(pb, "pakt"); -- avio_wb64(pb, caf->size_entries_used + 24U); -+ avio_wb64(pb, caf->size_entries_used + 24); - avio_wb64(pb, caf->packets); ///< mNumberPackets - avio_wb64(pb, caf->packets * samples_per_packet(par->codec_id, par->channels, par->block_align)); ///< mNumberValidFrames - avio_wb32(pb, 0); ///< mPrimingFrames - avio_wb32(pb, 0); ///< mRemainderFrames -- avio_write(pb, st->priv_data, caf->size_entries_used); -+ avio_write(pb, caf->pkt_sizes, caf->size_entries_used); -+ caf->size_buffer_size = 0; - } - } -+ av_freep(&caf->pkt_sizes); - return 0; - } - -diff --git a/libavformat/dxa.c b/libavformat/dxa.c -index 2a5487710f..cd9c489851 100644 ---- a/libavformat/dxa.c -+++ b/libavformat/dxa.c -@@ -118,12 +118,9 @@ static int dxa_read_header(AVFormatContext *s) - if(tag == MKTAG('d', 'a', 't', 'a')) break; - avio_skip(pb, fsize); - } -- c->bpc = (fsize + (int64_t)c->frames - 1) / c->frames; -- if(ast->codecpar->block_align) { -- if (c->bpc > INT_MAX - ast->codecpar->block_align + 1) -- return AVERROR_INVALIDDATA; -+ c->bpc = (fsize + c->frames - 1) / c->frames; -+ if(ast->codecpar->block_align) - c->bpc = ((c->bpc + ast->codecpar->block_align - 1) / ast->codecpar->block_align) * ast->codecpar->block_align; -- } - c->bytes_left = fsize; - c->wavpos = avio_tell(pb); - avio_seek(pb, c->vidpos, SEEK_SET); -diff --git a/libavformat/flvdec.c b/libavformat/flvdec.c -index 4a1c01a714..79c810f963 100644 ---- a/libavformat/flvdec.c -+++ b/libavformat/flvdec.c -@@ -64,7 +64,7 @@ typedef struct FLVContext { - uint8_t resync_buffer[2*RESYNC_BUFFER_SIZE]; - - int broken_sizes; -- int64_t sum_flv_tag_size; -+ int sum_flv_tag_size; - - int last_keyframe_stream_index; - int keyframe_count; -@@ -459,10 +459,6 @@ static int parse_keyframes_index(AVFormatContext *s, AVIOContext *ioc, int64_t m - d = av_int2double(avio_rb64(ioc)); - if (isnan(d) || d < INT64_MIN || d > INT64_MAX) - goto invalid; -- if (current_array == × && (d <= INT64_MIN / 1000 || d >= INT64_MAX / 1000)) -- goto invalid; -- if (avio_feof(ioc)) -- goto invalid; - current_array[0][i] = d; - } - if (times && filepositions) { -@@ -1033,7 +1029,7 @@ retry: - type = (avio_r8(s->pb) & 0x1F); - orig_size = - size = avio_rb24(s->pb); -- flv->sum_flv_tag_size += size + 11LL; -+ flv->sum_flv_tag_size += size + 11; - dts = avio_rb24(s->pb); - dts |= (unsigned)avio_r8(s->pb) << 24; - av_log(s, AV_LOG_TRACE, "type:%d, size:%d, last:%d, dts:%"PRId64" pos:%"PRId64"\n", type, size, last, dts, avio_tell(s->pb)); -@@ -1335,7 +1331,7 @@ leave: - !avio_feof(s->pb) && - (last != orig_size || !last) && last != flv->sum_flv_tag_size && - !flv->broken_sizes) { -- av_log(s, AV_LOG_ERROR, "Packet mismatch %d %d %"PRId64"\n", last, orig_size + 11, flv->sum_flv_tag_size); -+ av_log(s, AV_LOG_ERROR, "Packet mismatch %d %d %d\n", last, orig_size + 11, flv->sum_flv_tag_size); - avio_seek(s->pb, pos + 1, SEEK_SET); - ret = resync(s); - av_packet_unref(pkt); -diff --git a/libavformat/genh.c b/libavformat/genh.c -index 0b55a8884a..698104a9d6 100644 ---- a/libavformat/genh.c -+++ b/libavformat/genh.c -@@ -67,9 +67,6 @@ static int genh_read_header(AVFormatContext *s) - return AVERROR_INVALIDDATA; - st->codecpar->block_align = align * st->codecpar->channels; - st->codecpar->sample_rate = avio_rl32(s->pb); -- if (st->codecpar->sample_rate < 0) +- if (size < 24) - return AVERROR_INVALIDDATA; - - avio_skip(s->pb, 4); - st->duration = avio_rl32(s->pb); + // simple index objects should be ordered by stream number, this loop tries to find + // the first not indexed video stream + for (i = 0; i < asf->nb_streams; i++) { +diff --git a/libavformat/avr.c b/libavformat/avr.c +index a79898f146..c4ce70142a 100644 +--- a/libavformat/avr.c ++++ b/libavformat/avr.c +@@ -70,9 +70,6 @@ static int avr_read_header(AVFormatContext *s) + avio_skip(s->pb, 1); // replay speed + st->codecpar->sample_rate = avio_rb24(s->pb); +- if (st->codecpar->sample_rate == 0) +- return AVERROR_INVALIDDATA; +- + avio_skip(s->pb, 4 * 3); + avio_skip(s->pb, 2 * 3); + avio_skip(s->pb, 20); +diff --git a/libavformat/avs.c b/libavformat/avs.c +index 88694e951b..097c171908 100644 +--- a/libavformat/avs.c ++++ b/libavformat/avs.c +@@ -140,10 +140,6 @@ static int avs_read_audio_packet(AVFormatContext * s, AVPacket * pkt) + return 0; /* this indicate EOS */ + if (ret < 0) + return ret; +- if (size != (int)size) { +- av_packet_unref(pkt); +- return AVERROR(EDOM); +- } + + pkt->stream_index = avs->st_audio->index; + pkt->flags |= AV_PKT_FLAG_KEY; +diff --git a/libavformat/bintext.c b/libavformat/bintext.c +index f0c365b7be..bc0f6bd099 100644 +--- a/libavformat/bintext.c ++++ b/libavformat/bintext.c +@@ -90,12 +90,9 @@ static int next_tag_read(AVFormatContext *avctx, uint64_t *fsize) + AVIOContext *pb = avctx->pb; + char buf[36]; + int len; +- int64_t start_pos = avio_size(pb); ++ uint64_t start_pos = avio_size(pb) - 256; + +- if (start_pos < 256) +- return AVERROR_INVALIDDATA; +- +- avio_seek(pb, start_pos - 256, SEEK_SET); ++ avio_seek(pb, start_pos, SEEK_SET); + if (avio_read(pb, buf, sizeof(next_magic)) != sizeof(next_magic)) + return -1; + if (memcmp(buf, next_magic, sizeof(next_magic))) +@@ -253,10 +250,7 @@ static int xbin_read_header(AVFormatContext *s) + return AVERROR(EIO); + + if (pb->seekable & AVIO_SEEKABLE_NORMAL) { +- int64_t fsize = avio_size(pb); +- if (fsize < 9 + st->codecpar->extradata_size) +- return 0; +- bin->fsize = fsize - 9 - st->codecpar->extradata_size; ++ bin->fsize = avio_size(pb) - 9 - st->codecpar->extradata_size; + ff_sauce_read(s, &bin->fsize, NULL, 0); + avio_seek(pb, 9 + st->codecpar->extradata_size, SEEK_SET); + } +@@ -296,10 +290,7 @@ static int adf_read_header(AVFormatContext *s) + + if (pb->seekable & AVIO_SEEKABLE_NORMAL) { + int got_width = 0; +- int64_t fsize = avio_size(pb); +- if (fsize < 1 + 192 + 4096) +- return 0; +- bin->fsize = fsize - 1 - 192 - 4096; ++ bin->fsize = avio_size(pb) - 1 - 192 - 4096; + st->codecpar->width = 80<<3; + ff_sauce_read(s, &bin->fsize, &got_width, 0); + if (st->codecpar->width < 8) +@@ -332,7 +323,6 @@ static int idf_read_header(AVFormatContext *s) + AVIOContext *pb = s->pb; + AVStream *st; + int got_width = 0, ret; +- int64_t fsize; + + if (!(pb->seekable & AVIO_SEEKABLE_NORMAL)) + return AVERROR(EIO); +@@ -347,18 +337,14 @@ static int idf_read_header(AVFormatContext *s) + st->codecpar->extradata[0] = 16; + st->codecpar->extradata[1] = BINTEXT_PALETTE|BINTEXT_FONT; + +- fsize = avio_size(pb); +- if (fsize < 12 + 4096 + 48) +- return AVERROR_INVALIDDATA; +- bin->fsize = fsize - 12 - 4096 - 48; +- +- avio_seek(pb, bin->fsize + 12, SEEK_SET); ++ avio_seek(pb, avio_size(pb) - 4096 - 48, SEEK_SET); + + if (avio_read(pb, st->codecpar->extradata + 2 + 48, 4096) < 0) + return AVERROR(EIO); + if (avio_read(pb, st->codecpar->extradata + 2, 48) < 0) + return AVERROR(EIO); + ++ bin->fsize = avio_size(pb) - 12 - 4096 - 48; + ff_sauce_read(s, &bin->fsize, &got_width, 0); + if (st->codecpar->width < 8) + return AVERROR_INVALIDDATA; +diff --git a/libavformat/cafdec.c b/libavformat/cafdec.c +index a33b405f05..1842c3c0ae 100644 +--- a/libavformat/cafdec.c ++++ b/libavformat/cafdec.c +@@ -220,7 +220,7 @@ static int read_pakt_chunk(AVFormatContext *s, int64_t size) + } + } + +- if (avio_tell(pb) - ccount > size || size > INT64_MAX - ccount) { ++ if (avio_tell(pb) - ccount > size) { + av_log(s, AV_LOG_ERROR, "error reading packet table\n"); + return AVERROR_INVALIDDATA; + } +@@ -292,9 +292,6 @@ static int read_header(AVFormatContext *s) + avio_skip(pb, 4); /* edit count */ + caf->data_start = avio_tell(pb); + caf->data_size = size < 0 ? -1 : size - 4; +- if (caf->data_start < 0 || caf->data_size > INT64_MAX - caf->data_start) +- return AVERROR_INVALIDDATA; +- + if (caf->data_size > 0 && (pb->seekable & AVIO_SEEKABLE_NORMAL)) + avio_skip(pb, caf->data_size); + found_data = 1; +diff --git a/libavformat/concatdec.c b/libavformat/concatdec.c +index d45e52d7c6..32d4a99010 100644 +--- a/libavformat/concatdec.c ++++ b/libavformat/concatdec.c +@@ -317,7 +317,7 @@ static int64_t get_best_effort_duration(ConcatFile *file, AVFormatContext *avf) + if (file->user_duration != AV_NOPTS_VALUE) + return file->user_duration; + if (file->outpoint != AV_NOPTS_VALUE) +- return av_sat_sub64(file->outpoint, file->file_inpoint); ++ return file->outpoint - file->file_inpoint; + if (avf->duration > 0) + return avf->duration - (file->file_inpoint - file->file_start_time); + if (file->next_dts != AV_NOPTS_VALUE) +@@ -494,15 +494,11 @@ static int concat_read_header(AVFormatContext *avf) + else + time = cat->files[i].start_time; + if (cat->files[i].user_duration == AV_NOPTS_VALUE) { +- if (cat->files[i].inpoint == AV_NOPTS_VALUE || cat->files[i].outpoint == AV_NOPTS_VALUE || +- cat->files[i].outpoint - (uint64_t)cat->files[i].inpoint != av_sat_sub64(cat->files[i].outpoint, cat->files[i].inpoint) +- ) ++ if (cat->files[i].inpoint == AV_NOPTS_VALUE || cat->files[i].outpoint == AV_NOPTS_VALUE) + break; + cat->files[i].user_duration = cat->files[i].outpoint - cat->files[i].inpoint; + } + cat->files[i].duration = cat->files[i].user_duration; +- if (time + (uint64_t)cat->files[i].user_duration > INT64_MAX) +- return AVERROR_INVALIDDATA; + time += cat->files[i].user_duration; + } + if (i == cat->nb_files) { +diff --git a/libavformat/dxa.c b/libavformat/dxa.c +index e815f8a540..2a5487710f 100644 +--- a/libavformat/dxa.c ++++ b/libavformat/dxa.c +@@ -122,7 +122,7 @@ static int dxa_read_header(AVFormatContext *s) + if(ast->codecpar->block_align) { + if (c->bpc > INT_MAX - ast->codecpar->block_align + 1) + return AVERROR_INVALIDDATA; +- c->bpc = ((c->bpc - 1 + ast->codecpar->block_align) / ast->codecpar->block_align) * ast->codecpar->block_align; ++ c->bpc = ((c->bpc + ast->codecpar->block_align - 1) / ast->codecpar->block_align) * ast->codecpar->block_align; + } + c->bytes_left = fsize; + c->wavpos = avio_tell(pb); +diff --git a/libavformat/flacdec.c b/libavformat/flacdec.c +index f11aecce3c..1463e1caa8 100644 +--- a/libavformat/flacdec.c ++++ b/libavformat/flacdec.c +@@ -68,7 +68,7 @@ static int flac_read_header(AVFormatContext *s) + /* process metadata blocks */ + while (!avio_feof(s->pb) && !metadata_last) { + if (avio_read(s->pb, header, 4) != 4) +- return AVERROR_INVALIDDATA; ++ return AVERROR(AVERROR_INVALIDDATA); + flac_parse_block_header(header, &metadata_last, &metadata_type, + &metadata_size); + switch (metadata_type) { +diff --git a/libavformat/format.c b/libavformat/format.c +index 109aa4c92e..c47490c8eb 100644 +--- a/libavformat/format.c ++++ b/libavformat/format.c +@@ -228,7 +228,6 @@ int av_probe_input_buffer2(AVIOContext *pb, ff_const59 AVInputFormat **fmt, + int ret = 0, probe_size, buf_offset = 0; + int score = 0; + int ret2; +- int eof = 0; + + if (!max_probe_size) + max_probe_size = PROBE_BUF_MAX; +@@ -252,7 +251,7 @@ int av_probe_input_buffer2(AVIOContext *pb, ff_const59 AVInputFormat **fmt, + } + } + +- for (probe_size = PROBE_BUF_MIN; probe_size <= max_probe_size && !*fmt && !eof; ++ for (probe_size = PROBE_BUF_MIN; probe_size <= max_probe_size && !*fmt; + probe_size = FFMIN(probe_size << 1, + FFMAX(max_probe_size, probe_size + 1))) { + score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0; +@@ -268,7 +267,6 @@ int av_probe_input_buffer2(AVIOContext *pb, ff_const59 AVInputFormat **fmt, + + score = 0; + ret = 0; /* error was end of file, nothing read */ +- eof = 1; + } + buf_offset += ret; + if (buf_offset < offset) +diff --git a/libavformat/fwse.c b/libavformat/fwse.c +index 2fecd68e56..00e2e13b11 100644 +--- a/libavformat/fwse.c ++++ b/libavformat/fwse.c +@@ -67,7 +67,7 @@ static int fwse_read_header(AVFormatContext *s) + par->channel_layout = AV_CH_LAYOUT_STEREO; + st->duration = avio_rl32(pb); + par->sample_rate = avio_rl32(pb); +- if (par->sample_rate <= 0) ++ if (par->sample_rate <= 0 || par->sample_rate > INT_MAX) + return AVERROR_INVALIDDATA; + + par->block_align = 1; diff --git a/libavformat/hls.c b/libavformat/hls.c -index e17cb23897..597bea7f25 100644 +index 1b9f7d9e12..e17cb23897 100644 --- a/libavformat/hls.c +++ b/libavformat/hls.c -@@ -236,7 +236,6 @@ static void free_init_section_list(struct playlist *pls) - { - int i; - for (i = 0; i < pls->n_init_sections; i++) { -- av_freep(&pls->init_sections[i]->key); - av_freep(&pls->init_sections[i]->url); - av_freep(&pls->init_sections[i]); - } -@@ -811,26 +810,20 @@ static int parse_playlist(HLSContext *c, const char *url, - &info); - new_rendition(c, &info, url); - } else if (av_strstart(line, "#EXT-X-TARGETDURATION:", &ptr)) { -- int64_t t; - ret = ensure_playlist(c, &pls, url); - if (ret < 0) - goto fail; -- t = strtoll(ptr, NULL, 10); -- if (t < 0 || t >= INT64_MAX / AV_TIME_BASE) { -- ret = AVERROR_INVALIDDATA; -- goto fail; -- } -- pls->target_duration = t * AV_TIME_BASE; -+ pls->target_duration = strtoll(ptr, NULL, 10) * AV_TIME_BASE; - } else if (av_strstart(line, "#EXT-X-MEDIA-SEQUENCE:", &ptr)) { - uint64_t seq_no; - ret = ensure_playlist(c, &pls, url); - if (ret < 0) - goto fail; - seq_no = strtoull(ptr, NULL, 10); -- if (seq_no > INT64_MAX/2) { -+ if (seq_no > INT64_MAX) { - av_log(c->ctx, AV_LOG_DEBUG, "MEDIA-SEQUENCE higher than " -- "INT64_MAX/2, mask out the highest bit\n"); -- seq_no &= INT64_MAX/2; -+ "INT64_MAX, mask out the highest bit\n"); -+ seq_no &= INT64_MAX; - } - pls->start_seq_no = seq_no; - } else if (av_strstart(line, "#EXT-X-PLAYLIST-TYPE:", &ptr)) { -@@ -910,7 +903,7 @@ static int parse_playlist(HLSContext *c, const char *url, - if (has_iv) { - memcpy(seg->iv, iv, sizeof(iv)); - } else { -- uint64_t seq = pls->start_seq_no + (uint64_t)pls->n_segments; -+ int64_t seq = pls->start_seq_no + pls->n_segments; - memset(seg->iv, 0, sizeof(seg->iv)); - AV_WB64(seg->iv + 8, seq); +@@ -2397,7 +2397,7 @@ static const AVOption hls_options[] = { + {.str = "3gp,aac,avi,ac3,eac3,flac,mkv,m3u8,m4a,m4s,m4v,mpg,mov,mp2,mp3,mp4,mpeg,mpegts,ogg,ogv,oga,ts,vob,wav"}, + INT_MIN, INT_MAX, FLAGS}, + {"max_reload", "Maximum number of times a insufficient list is attempted to be reloaded", +- OFFSET(max_reload), AV_OPT_TYPE_INT, {.i64 = 3}, 0, INT_MAX, FLAGS}, ++ OFFSET(max_reload), AV_OPT_TYPE_INT, {.i64 = 1000}, 0, INT_MAX, FLAGS}, + {"m3u8_hold_counters", "The maximum number of times to load m3u8 when it refreshes without new segments", + OFFSET(m3u8_hold_counters), AV_OPT_TYPE_INT, {.i64 = 1000}, 0, INT_MAX, FLAGS}, + {"http_persistent", "Use persistent HTTP connections", +diff --git a/libavformat/hlsenc.c b/libavformat/hlsenc.c +index 3f3ab8844d..e222b70ffa 100644 +--- a/libavformat/hlsenc.c ++++ b/libavformat/hlsenc.c +@@ -412,11 +412,8 @@ static void write_codec_attr(AVStream *st, VariantStream *vs) + } else if (st->codecpar->codec_id == AV_CODEC_ID_MP3) { + snprintf(attr, sizeof(attr), "mp4a.40.34"); + } else if (st->codecpar->codec_id == AV_CODEC_ID_AAC) { +- if (st->codecpar->profile != FF_PROFILE_UNKNOWN) +- snprintf(attr, sizeof(attr), "mp4a.40.%d", st->codecpar->profile+1); +- else +- // This is for backward compatibility with the previous implementation. +- snprintf(attr, sizeof(attr), "mp4a.40.2"); ++ /* TODO : For HE-AAC, HE-AACv2, the last digit needs to be set to 5 and 29 respectively */ ++ snprintf(attr, sizeof(attr), "mp4a.40.2"); + } else if (st->codecpar->codec_id == AV_CODEC_ID_AC3) { + snprintf(attr, sizeof(attr), "ac-3"); + } else if (st->codecpar->codec_id == AV_CODEC_ID_EAC3) { +@@ -2596,10 +2593,8 @@ static int hls_write_packet(AVFormatContext *s, AVPacket *pkt) + " will retry with a new http session.\n"); + ff_format_io_close(s, &vs->out); + ret = hlsenc_io_open(s, &vs->out, filename, &options); +- if (ret >= 0) { +- reflush_dynbuf(vs, &range_length); +- ret = hlsenc_io_close(s, &vs->out, filename); +- } ++ reflush_dynbuf(vs, &range_length); ++ ret = hlsenc_io_close(s, &vs->out, filename); } -diff --git a/libavformat/icodec.c b/libavformat/icodec.c -index b321ad6007..93179bb41e 100644 ---- a/libavformat/icodec.c -+++ b/libavformat/icodec.c -@@ -203,9 +203,6 @@ static int read_packet(AVFormatContext *s, AVPacket *pkt) - AV_WL32(buf + 32, image->nb_pal); + av_dict_free(&options); + av_freep(&vs->temp_buffer); +@@ -2610,9 +2605,6 @@ static int hls_write_packet(AVFormatContext *s, AVPacket *pkt) + hls_rename_temp_file(s, oc); } -- if (image->nb_pal > INT_MAX / 4 - 14 - 40) -- return AVERROR_INVALIDDATA; +- if (ret < 0) +- return ret; - - AV_WL32(buf - 4, 14 + 40 + image->nb_pal * 4); - AV_WL32(buf + 8, AV_RL32(buf + 8) / 2); - } + old_filename = av_strdup(oc->url); + if (!old_filename) { + return AVERROR(ENOMEM); +diff --git a/libavformat/hnm.c b/libavformat/hnm.c +index 2cec5cb876..f06add5cf8 100644 +--- a/libavformat/hnm.c ++++ b/libavformat/hnm.c +@@ -113,8 +113,6 @@ static int hnm_read_packet(AVFormatContext *s, AVPacket *pkt) + if (hnm->superchunk_remaining == 0) { + /* parse next superchunk */ + superchunk_size = avio_rl24(pb); +- if (superchunk_size < 4) +- return AVERROR_INVALIDDATA; + avio_skip(pb, 1); + + hnm->superchunk_remaining = superchunk_size - 4; +@@ -125,7 +123,7 @@ static int hnm_read_packet(AVFormatContext *s, AVPacket *pkt) + chunk_id = avio_rl16(pb); + avio_skip(pb, 2); + +- if (chunk_size > hnm->superchunk_remaining || chunk_size < 8) { ++ if (chunk_size > hnm->superchunk_remaining || !chunk_size) { + av_log(s, AV_LOG_ERROR, + "invalid chunk size: %"PRIu32", offset: %"PRId64"\n", + chunk_size, avio_tell(pb)); diff --git a/libavformat/id3v2.c b/libavformat/id3v2.c -index a40f858477..1377cef4b8 100644 +index 9398897873..1377cef4b8 100644 --- a/libavformat/id3v2.c +++ b/libavformat/id3v2.c +@@ -365,7 +365,7 @@ static void read_uslt(AVFormatContext *s, AVIOContext *pb, int taglen, + int encoding; + int ok = 0; + +- if (taglen < 4) ++ if (taglen < 1) + goto error; + + encoding = avio_r8(pb); @@ -376,10 +376,10 @@ static void read_uslt(AVFormatContext *s, AVIOContext *pb, int taglen, lang[3] = '\0'; taglen -= 3; @@ -69797,235 +69944,225 @@ index a40f858477..1377cef4b8 100644 // FFmpeg does not support hierarchical metadata, so concatenate the keys. diff --git a/libavformat/iff.c b/libavformat/iff.c -index 06785c748b..c15302d3c5 100644 +index caa0ef882b..06785c748b 100644 --- a/libavformat/iff.c +++ b/libavformat/iff.c -@@ -385,7 +385,7 @@ static int read_dst_frame(AVFormatContext *s, AVPacket *pkt) - avio_skip(pb, 1); - pkt->flags |= AV_PKT_FLAG_KEY; - pkt->stream_index = 0; -- pkt->duration = s->streams[0]->codecpar->sample_rate / 75; -+ pkt->duration = 588LL * s->streams[0]->codecpar->sample_rate / 44100; - pkt->pos = chunk_pos; +@@ -217,7 +217,7 @@ static int parse_dsd_diin(AVFormatContext *s, AVStream *st, uint64_t eof) + { + AVIOContext *pb = s->pb; - chunk_pos = avio_tell(pb); -@@ -398,8 +398,7 @@ static int read_dst_frame(AVFormatContext *s, AVPacket *pkt) - case ID_FRTE: - if (data_size < 4) - return AVERROR_INVALIDDATA; -- s->streams[0]->duration = avio_rb32(pb) * (uint64_t)s->streams[0]->codecpar->sample_rate / 75; -- -+ s->streams[0]->duration = avio_rb32(pb) * 588LL * s->streams[0]->codecpar->sample_rate / 44100; +- while (av_sat_add64(avio_tell(pb), 12) <= eof && !avio_feof(pb)) { ++ while (avio_tell(pb) + 12 <= eof && !avio_feof(pb)) { + uint32_t tag = avio_rl32(pb); + uint64_t size = avio_rb64(pb); + uint64_t orig_pos = avio_tell(pb); +@@ -254,7 +254,7 @@ static int parse_dsd_prop(AVFormatContext *s, AVStream *st, uint64_t eof) + int dsd_layout[6]; + ID3v2ExtraMeta *id3v2_extra_meta; + +- while (av_sat_add64(avio_tell(pb), 12) <= eof && !avio_feof(pb)) { ++ while (avio_tell(pb) + 12 <= eof && !avio_feof(pb)) { + uint32_t tag = avio_rl32(pb); + uint64_t size = avio_rb64(pb); + uint64_t orig_pos = avio_tell(pb); +diff --git a/libavformat/img2dec.c b/libavformat/img2dec.c +index d8ffd7fb0b..65e3c9b1a3 100644 +--- a/libavformat/img2dec.c ++++ b/libavformat/img2dec.c +@@ -23,7 +23,6 @@ + #define _DEFAULT_SOURCE + #define _BSD_SOURCE + #include +-#include "libavutil/avassert.h" + #include "libavutil/avstring.h" + #include "libavutil/log.h" + #include "libavutil/opt.h" +@@ -498,7 +497,6 @@ int ff_img_read_packet(AVFormatContext *s1, AVPacket *pkt) + pkt->flags |= AV_PKT_FLAG_KEY; + if (s->ts_from_file) { + struct stat img_stat; +- av_assert0(!s->is_pipe); // The ts_from_file option is not supported by piped input demuxers + if (stat(filename, &img_stat)) { + res = AVERROR(EIO); + goto fail; +@@ -789,6 +787,7 @@ static int jpeg_probe(const AVProbeData *p) + return 0; + state = EOI; + break; ++ case DQT: + case APP0: + case APP1: + case APP2: +@@ -805,7 +804,6 @@ static int jpeg_probe(const AVProbeData *p) + case APP13: + case APP14: + case APP15: +- case DQT: /* fallthrough */ + case COM: + i += AV_RB16(&b[i + 2]) + 1; break; - } - -@@ -502,9 +501,6 @@ static int iff_read_header(AVFormatContext *s) - case ID_DST: - case ID_MDAT: - iff->body_pos = avio_tell(pb); -- if (iff->body_pos < 0 || iff->body_pos + data_size > INT64_MAX) -- return AVERROR_INVALIDDATA; -- - iff->body_end = iff->body_pos + data_size; - iff->body_size = data_size; - if (chunk_id == ID_DST) { diff --git a/libavformat/jacosubdec.c b/libavformat/jacosubdec.c -index 59544bb507..2ccbf4c9de 100644 +index a0d6b17e32..59544bb507 100644 --- a/libavformat/jacosubdec.c +++ b/libavformat/jacosubdec.c -@@ -152,7 +152,7 @@ static int get_shift(int timeres, const char *buf) +@@ -132,35 +132,35 @@ shift_and_ret: + return buf + len; + } + +-static int get_shift(unsigned timeres, const char *buf) ++static int get_shift(int timeres, const char *buf) + { + int sign = 1; +- int h = 0, m = 0, s = 0, d = 0; ++ int a = 0, b = 0, c = 0, d = 0; + int64_t ret; + #define SSEP "%*1[.:]" +- int n = sscanf(buf, "%d"SSEP"%d"SSEP"%d"SSEP"%d", &h, &m, &s, &d); ++ int n = sscanf(buf, "%d"SSEP"%d"SSEP"%d"SSEP"%d", &a, &b, &c, &d); + #undef SSEP + +- if (h == INT_MIN) ++ if (a == INT_MIN) + return 0; + +- if (*buf == '-' || h < 0) { ++ if (*buf == '-' || a < 0) { + sign = -1; +- h = FFABS(h); ++ a = FFABS(a); + } + ret = 0; switch (n) { - case 4: -- ret = sign * (((int64_t)a*3600 + (int64_t)b*60 + c) * timeres + d); -+ ret = sign * (((int64_t)a*3600 + b*60 + c) * timeres + d); - break; - case 3: - ret = sign * (( (int64_t)a*60 + b) * timeres + c); -diff --git a/libavformat/jacosubenc.c b/libavformat/jacosubenc.c -index 1213a58d52..77575c6b3c 100644 ---- a/libavformat/jacosubenc.c -+++ b/libavformat/jacosubenc.c -@@ -24,7 +24,7 @@ static int jacosub_write_header(AVFormatContext *s) - const AVCodecParameters *par = s->streams[0]->codecpar; - - if (par->extradata_size) { -- avio_write(s->pb, par->extradata, par->extradata_size); -+ avio_write(s->pb, par->extradata, par->extradata_size - 1); +- case 1: h = 0; //clear all in case of a single parameter +- case 2: s = m; m = h; h = 0; //shift into second subsecondd +- case 3: d = s; s = m; m = h; h = 0; //shift into minute second subsecond ++ case 4: ++ ret = sign * (((int64_t)a*3600 + (int64_t)b*60 + c) * timeres + d); ++ break; ++ case 3: ++ ret = sign * (( (int64_t)a*60 + b) * timeres + c); ++ break; ++ case 2: ++ ret = sign * (( (int64_t)a) * timeres + b); ++ break; } - return 0; - } +- +- ret = (int64_t)h*3600 + (int64_t)m*60 + s; +- if (FFABS(ret) > (INT64_MAX - FFABS((int64_t)d)) / timeres) +- return 0; +- ret = sign * (ret * timeres + d); +- + if ((int)ret != ret) + ret = 0; + +@@ -236,17 +236,14 @@ static int jacosub_read_header(AVFormatContext *s) + } + av_bprintf(&header, "#S %s", p); + break; +- case 'T': { // ...but must be placed after TIMERES +- int64_t timeres = strtol(p, NULL, 10); +- if (timeres <= 0 || timeres > UINT32_MAX) { ++ case 'T': // ...but must be placed after TIMERES ++ jacosub->timeres = strtol(p, NULL, 10); ++ if (!jacosub->timeres) + jacosub->timeres = 30; +- } else { +- jacosub->timeres = timeres; ++ else + av_bprintf(&header, "#T %s", p); +- } + break; + } +- } + } + + /* general/essential directives in the extradata */ +diff --git a/libavformat/kvag.c b/libavformat/kvag.c +index 64574905f0..91d1d8a518 100644 +--- a/libavformat/kvag.c ++++ b/libavformat/kvag.c +@@ -31,7 +31,7 @@ + typedef struct KVAGHeader { + uint32_t magic; + uint32_t data_size; +- int sample_rate; ++ uint32_t sample_rate; + uint16_t stereo; + } KVAGHeader; + +@@ -65,9 +65,6 @@ static int kvag_read_header(AVFormatContext *s) + hdr.sample_rate = AV_RL32(buf + 8); + hdr.stereo = AV_RL16(buf + 12); + +- if (hdr.sample_rate <= 0) +- return AVERROR_INVALIDDATA; +- + par = st->codecpar; + par->codec_type = AVMEDIA_TYPE_AUDIO; + par->codec_id = AV_CODEC_ID_ADPCM_IMA_SSI; diff --git a/libavformat/libzmq.c b/libavformat/libzmq.c -index 04c72ac601..1b0d8638db 100644 +index f4bb849e46..04c72ac601 100644 --- a/libavformat/libzmq.c +++ b/libavformat/libzmq.c -@@ -51,7 +51,7 @@ static int zmq_proto_wait(URLContext *h, void *socket, int write) - zmq_pollitem_t items = { .socket = socket, .fd = 0, .events = ev, .revents = 0 }; - ret = zmq_poll(&items, 1, POLLING_TIME); - if (ret == -1) { -- av_log(h, AV_LOG_ERROR, "Error occurred during zmq_poll(): %s\n", ZMQ_STRERROR); -+ av_log(h, AV_LOG_ERROR, "Error occured during zmq_poll(): %s\n", ZMQ_STRERROR); - return AVERROR_EXTERNAL; - } - return items.revents & ev ? 0 : AVERROR(EAGAIN); -@@ -90,7 +90,7 @@ static int zmq_proto_open(URLContext *h, const char *uri, int flags) - s->context = zmq_ctx_new(); - if (!s->context) { - /*errno not set on failure during zmq_ctx_new()*/ -- av_log(h, AV_LOG_ERROR, "Error occurred during zmq_ctx_new()\n"); -+ av_log(h, AV_LOG_ERROR, "Error occured during zmq_ctx_new()\n"); +@@ -94,10 +94,7 @@ static int zmq_proto_open(URLContext *h, const char *uri, int flags) return AVERROR_EXTERNAL; } -@@ -100,13 +100,13 @@ static int zmq_proto_open(URLContext *h, const char *uri, int flags) +- if (av_strstart(uri, "zmq:", &uri)) { +- av_log(h, AV_LOG_ERROR, "URL %s lacks prefix\n", uri); +- return AVERROR(EINVAL); +- } ++ av_strstart(uri, "zmq:", &uri); + + /*publish during write*/ if (h->flags & AVIO_FLAG_WRITE) { - s->socket = zmq_socket(s->context, ZMQ_PUB); - if (!s->socket) { -- av_log(h, AV_LOG_ERROR, "Error occurred during zmq_socket(): %s\n", ZMQ_STRERROR); -+ av_log(h, AV_LOG_ERROR, "Error occured during zmq_socket(): %s\n", ZMQ_STRERROR); - goto fail_term; - } - - ret = zmq_bind(s->socket, uri); - if (ret == -1) { -- av_log(h, AV_LOG_ERROR, "Error occurred during zmq_bind(): %s\n", ZMQ_STRERROR); -+ av_log(h, AV_LOG_ERROR, "Error occured during zmq_bind(): %s\n", ZMQ_STRERROR); - goto fail_close; - } - } -@@ -115,19 +115,19 @@ static int zmq_proto_open(URLContext *h, const char *uri, int flags) - if (h->flags & AVIO_FLAG_READ) { - s->socket = zmq_socket(s->context, ZMQ_SUB); - if (!s->socket) { -- av_log(h, AV_LOG_ERROR, "Error occurred during zmq_socket(): %s\n", ZMQ_STRERROR); -+ av_log(h, AV_LOG_ERROR, "Error occured during zmq_socket(): %s\n", ZMQ_STRERROR); - goto fail_term; - } - - ret = zmq_setsockopt(s->socket, ZMQ_SUBSCRIBE, "", 0); - if (ret == -1) { -- av_log(h, AV_LOG_ERROR, "Error occurred during zmq_setsockopt(): %s\n", ZMQ_STRERROR); -+ av_log(h, AV_LOG_ERROR, "Error occured during zmq_setsockopt(): %s\n", ZMQ_STRERROR); - goto fail_close; - } - - ret = zmq_connect(s->socket, uri); - if (ret == -1) { -- av_log(h, AV_LOG_ERROR, "Error occurred during zmq_connect(): %s\n", ZMQ_STRERROR); -+ av_log(h, AV_LOG_ERROR, "Error occured during zmq_connect(): %s\n", ZMQ_STRERROR); - goto fail_close; - } - } -@@ -150,7 +150,7 @@ static int zmq_proto_write(URLContext *h, const unsigned char *buf, int size) - return ret; - ret = zmq_send(s->socket, buf, size, 0); - if (ret == -1) { -- av_log(h, AV_LOG_ERROR, "Error occurred during zmq_send(): %s\n", ZMQ_STRERROR); -+ av_log(h, AV_LOG_ERROR, "Error occured during zmq_send(): %s\n", ZMQ_STRERROR); - return AVERROR_EXTERNAL; - } - return ret; /*number of bytes sent*/ -@@ -166,7 +166,7 @@ static int zmq_proto_read(URLContext *h, unsigned char *buf, int size) - return ret; - ret = zmq_recv(s->socket, buf, size, 0); - if (ret == -1) { -- av_log(h, AV_LOG_ERROR, "Error occurred during zmq_recv(): %s\n", ZMQ_STRERROR); -+ av_log(h, AV_LOG_ERROR, "Error occured during zmq_recv(): %s\n", ZMQ_STRERROR); - return AVERROR_EXTERNAL; - } - if (ret > size) { diff --git a/libavformat/matroskadec.c b/libavformat/matroskadec.c -index c47518b73a..fb1849f9c3 100644 +index 3551087b3e..c47518b73a 100644 --- a/libavformat/matroskadec.c +++ b/libavformat/matroskadec.c -@@ -1690,7 +1690,7 @@ static int matroska_decode_buffer(uint8_t **buf, int *buf_size, - case MATROSKA_TRACK_ENCODING_COMP_ZLIB: - { - z_stream zstream = { 0 }; -- if (!pkt_size || inflateInit(&zstream) != Z_OK) -+ if (inflateInit(&zstream) != Z_OK) - return -1; - zstream.next_in = data; - zstream.avail_in = isize; -@@ -1723,7 +1723,7 @@ static int matroska_decode_buffer(uint8_t **buf, int *buf_size, - case MATROSKA_TRACK_ENCODING_COMP_BZLIB: - { - bz_stream bzstream = { 0 }; -- if (!pkt_size || BZ2_bzDecompressInit(&bzstream, 0, 0) != BZ_OK) -+ if (BZ2_bzDecompressInit(&bzstream, 0, 0) != BZ_OK) - return -1; - bzstream.next_in = data; - bzstream.avail_in = isize; -@@ -2802,14 +2802,11 @@ static int matroska_parse_tracks(AVFormatContext *s) - mkv_stereo_mode_display_mul(track->video.stereo_mode, &display_width_mul, &display_height_mul); - - if (track->video.display_unit < MATROSKA_VIDEO_DISPLAYUNIT_UNKNOWN) { -- if (track->video.display_width && track->video.display_height && -- st->codecpar->height < INT64_MAX / track->video.display_width / display_width_mul && -- st->codecpar->width < INT64_MAX / track->video.display_height / display_height_mul) -- av_reduce(&st->sample_aspect_ratio.num, -- &st->sample_aspect_ratio.den, -- st->codecpar->height * track->video.display_width * display_width_mul, -- st->codecpar->width * track->video.display_height * display_height_mul, -- INT_MAX); -+ av_reduce(&st->sample_aspect_ratio.num, -+ &st->sample_aspect_ratio.den, -+ st->codecpar->height * track->video.display_width * display_width_mul, -+ st->codecpar->width * track->video.display_height * display_height_mul, -+ 255); - } - if (st->codecpar->codec_id != AV_CODEC_ID_HEVC) - st->need_parsing = AVSTREAM_PARSE_HEADERS; -@@ -2978,8 +2975,6 @@ static int matroska_read_header(AVFormatContext *s) - - if (!matroska->time_scale) - matroska->time_scale = 1000000; -- if (isnan(matroska->duration)) -- matroska->duration = 0; - if (matroska->duration) - matroska->ctx->duration = matroska->duration * matroska->time_scale * - 1000 / AV_TIME_BASE; -@@ -3940,9 +3935,7 @@ static CueDesc get_cue_desc(AVFormatContext *s, int64_t ts, int64_t cues_start) - int i; - int nb_index_entries = s->streams[0]->nb_index_entries; - AVIndexEntry *index_entries = s->streams[0]->index_entries; +@@ -2740,10 +2740,6 @@ static int matroska_parse_tracks(AVFormatContext *s) + track->time_scale); + track->time_scale = 1.0; + } - -- if (ts >= (int64_t)(matroska->duration * matroska->time_scale)) -- return (CueDesc) {-1, -1, -1, -1}; -+ if (ts >= matroska->duration * matroska->time_scale) return (CueDesc) {-1, -1, -1, -1}; - for (i = 1; i < nb_index_entries; i++) { - if (index_entries[i - 1].timestamp * matroska->time_scale <= ts && - index_entries[i].timestamp * matroska->time_scale > ts) { -@@ -4131,8 +4124,6 @@ static int64_t webm_dash_manifest_compute_bandwidth(AVFormatContext *s, int64_t - // prebuffered. - pre_bytes = desc_end.end_offset - desc_end.start_offset; - pre_ns = desc_end.end_time_ns - desc_end.start_time_ns; -- if (pre_ns <= 0) -- return -1; - pre_sec = pre_ns / nano_seconds_per_second; - prebuffer_bytes += - pre_bytes * ((temp_prebuffer_ns / nano_seconds_per_second) / pre_sec); -@@ -4144,16 +4135,12 @@ static int64_t webm_dash_manifest_compute_bandwidth(AVFormatContext *s, int64_t - do { - int64_t desc_bytes = desc_end.end_offset - desc_beg.start_offset; - int64_t desc_ns = desc_end.end_time_ns - desc_beg.start_time_ns; -- double desc_sec, calc_bits_per_second, percent, mod_bits_per_second; -- if (desc_bytes <= 0) -- return -1; +- if (matroska->time_scale * track->time_scale > UINT_MAX) +- return AVERROR_INVALIDDATA; - -- desc_sec = desc_ns / nano_seconds_per_second; -- calc_bits_per_second = (desc_bytes * 8) / desc_sec; -+ double desc_sec = desc_ns / nano_seconds_per_second; -+ double calc_bits_per_second = (desc_bytes * 8) / desc_sec; + avpriv_set_pts_info(st, 64, matroska->time_scale * track->time_scale, + 1000 * 1000 * 1000); /* 64 bit pts in ns */ - // Drop the bps by the percentage of bytes buffered. -- percent = (desc_bytes - prebuffer_bytes) / desc_bytes; -- mod_bits_per_second = calc_bits_per_second * percent; -+ double percent = (desc_bytes - prebuffer_bytes) / desc_bytes; -+ double mod_bits_per_second = calc_bits_per_second * percent; +@@ -3776,7 +3772,7 @@ static int matroska_parse_cluster(MatroskaDemuxContext *matroska) + MatroskaBlock *block = &cluster->block; + int res; - if (prebuffer < desc_sec) { - double search_sec = +- av_assert0(matroska->num_levels <= 2U); ++ av_assert0(matroska->num_levels <= 2); + + if (matroska->num_levels == 1) { + res = ebml_parse(matroska, matroska_segment, NULL); +@@ -4107,19 +4103,16 @@ static int64_t webm_dash_manifest_compute_bandwidth(AVFormatContext *s, int64_t + int64_t prebuffer_ns = 1000000000; + int64_t time_ns = st->index_entries[i].timestamp * matroska->time_scale; + double nano_seconds_per_second = 1000000000.0; +- int64_t prebuffered_ns; ++ int64_t prebuffered_ns = time_ns + prebuffer_ns; + double prebuffer_bytes = 0.0; + int64_t temp_prebuffer_ns = prebuffer_ns; + int64_t pre_bytes, pre_ns; + double pre_sec, prebuffer, bits_per_second; + CueDesc desc_beg = get_cue_desc(s, time_ns, cues_start); ++ + // Start with the first Cue. + CueDesc desc_end = desc_beg; + +- if (time_ns > INT64_MAX - prebuffer_ns) +- return -1; +- prebuffered_ns = time_ns + prebuffer_ns; +- + // Figure out how much data we have downloaded for the prebuffer. This will + // be used later to adjust the bits per sample to try. + while (desc_end.start_time_ns != -1 && desc_end.end_time_ns < prebuffered_ns) { diff --git a/libavformat/matroskaenc.c b/libavformat/matroskaenc.c index b4284a8778..692265593c 100644 --- a/libavformat/matroskaenc.c @@ -70105,92 +70242,73 @@ index b4284a8778..692265593c 100644 ffio_free_dyn_buf(&dyn_cp); ret = ff_alloc_extradata(par, side_data_size); if (ret < 0) -diff --git a/libavformat/moflex.c b/libavformat/moflex.c -index ca40b51c3e..0706f88e64 100644 ---- a/libavformat/moflex.c -+++ b/libavformat/moflex.c -@@ -172,7 +172,7 @@ static int moflex_read_sync(AVFormatContext *s) - unsigned type, ssize, codec_id = 0; - unsigned codec_type, width = 0, height = 0, sample_rate = 0, channels = 0; - int stream_index = -1; -- AVRational tb = av_make_q(0, 1); -+ AVRational fps; +diff --git a/libavformat/mm.c b/libavformat/mm.c +index 097c3efc1c..02ffbcd824 100644 +--- a/libavformat/mm.c ++++ b/libavformat/mm.c +@@ -94,7 +94,7 @@ static int read_header(AVFormatContext *s) + type = avio_rl16(pb); + length = avio_rl32(pb); - read_var_byte(s, &type); - read_var_byte(s, &ssize); -@@ -195,7 +195,6 @@ static int moflex_read_sync(AVFormatContext *s) - return AVERROR_PATCHWELCOME; - } - sample_rate = avio_rb24(pb) + 1; -- tb = av_make_q(1, sample_rate); - channels = avio_r8(pb) + 1; - break; - case 1: -@@ -209,8 +208,8 @@ static int moflex_read_sync(AVFormatContext *s) - av_log(s, AV_LOG_ERROR, "Unsupported video codec: %d\n", codec_id); - return AVERROR_PATCHWELCOME; - } -- tb.den = avio_rb16(pb); -- tb.num = avio_rb16(pb); -+ fps.num = avio_rb16(pb); -+ fps.den = avio_rb16(pb); - width = avio_rb16(pb); - height = avio_rb16(pb); - avio_skip(pb, type == 3 ? 3 : 2); -@@ -238,8 +237,10 @@ static int moflex_read_sync(AVFormatContext *s) - if (!st->priv_data) - return AVERROR(ENOMEM); - -- if (tb.num) -- avpriv_set_pts_info(st, 63, tb.num, tb.den); -+ if (sample_rate) -+ avpriv_set_pts_info(st, 63, 1, sample_rate); -+ else -+ avpriv_set_pts_info(st, 63, fps.den, fps.num); - } - } +- if (type != MM_TYPE_HEADER || length < 10) ++ if (type != MM_TYPE_HEADER) + return AVERROR_INVALIDDATA; + /* read header */ diff --git a/libavformat/mov.c b/libavformat/mov.c -index 295d9826de..4af796ee31 100644 +index 6ec6ebf0a2..46d8e628fd 100644 --- a/libavformat/mov.c +++ b/libavformat/mov.c -@@ -607,13 +607,11 @@ static int mov_read_dref(MOVContext *c, AVIOContext *pb, MOVAtom atom) - for (i = 0; i < entries; i++) { - MOVDref *dref = &sc->drefs[i]; - uint32_t size = avio_rb32(pb); -- int64_t next = avio_tell(pb); -+ int64_t next = avio_tell(pb) + size - 4; - -- if (size < 12 || next < 0 || next > INT64_MAX - size) -+ if (size < 12) - return AVERROR_INVALIDDATA; - -- next += size - 4; -- - dref->type = avio_rl32(pb); - avio_rb32(pb); // version + flags - -@@ -1944,8 +1942,6 @@ static int mov_read_glbl(MOVContext *c, AVIOContext *pb, MOVAtom atom) - // wrap a whole fiel atom inside of a glbl atom. - unsigned size = avio_rb32(pb); - unsigned type = avio_rl32(pb); -- if (avio_feof(pb)) +@@ -306,8 +306,7 @@ static int mov_read_udta_string(MOVContext *c, AVIOContext *pb, MOVAtom atom) + char *str = NULL; + const char *key = NULL; + uint16_t langcode = 0; +- uint32_t data_type = 0, str_size_alloc; +- uint64_t str_size; ++ uint32_t data_type = 0, str_size, str_size_alloc; + int (*parse)(MOVContext*, AVIOContext*, unsigned, const char*) = NULL; + int raw = 0; + int num = 0; +@@ -1135,12 +1134,6 @@ static int mov_read_ftyp(MOVContext *c, AVIOContext *pb, MOVAtom atom) + int ret = ffio_read_size(pb, type, 4); + if (ret < 0) + return ret; +- if (c->fc->nb_streams) { +- if (c->fc->strict_std_compliance >= FF_COMPLIANCE_STRICT) - return AVERROR_INVALIDDATA; - avio_seek(pb, -8, SEEK_CUR); - if (type == MKTAG('f','i','e','l') && size == atom.size) - return mov_read_default(c, pb, atom); -@@ -2555,10 +2551,6 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext *pb, int entries) - av_log(c->fc, AV_LOG_ERROR, "Invalid sample rate %d\n", st->codecpar->sample_rate); - return AVERROR_INVALIDDATA; - } -- if (st->codecpar->channels < 0) { -- av_log(c->fc, AV_LOG_ERROR, "Invalid channels %d\n", st->codecpar->channels); -- return AVERROR_INVALIDDATA; +- av_log(c->fc, AV_LOG_DEBUG, "Ignoring duplicate FTYP\n"); +- return 0; +- } + + if (strcmp(type, "qt ")) + c->isom = 1; +@@ -2065,13 +2058,8 @@ static int mov_read_stco(MOVContext *c, AVIOContext *pb, MOVAtom atom) + for (i = 0; i < entries && !pb->eof_reached; i++) + sc->chunk_offsets[i] = avio_rb32(pb); + else if (atom.type == MKTAG('c','o','6','4')) +- for (i = 0; i < entries && !pb->eof_reached; i++) { ++ for (i = 0; i < entries && !pb->eof_reached; i++) + sc->chunk_offsets[i] = avio_rb64(pb); +- if (sc->chunk_offsets[i] < 0) { +- av_log(c->fc, AV_LOG_WARNING, "Impossible chunk_offset\n"); +- sc->chunk_offsets[i] = 0; - } - } else if (st->codecpar->codec_type==AVMEDIA_TYPE_SUBTITLE){ - mov_parse_stsd_subtitle(c, pb, st, sc, - size - (avio_tell(pb) - start_pos)); -@@ -3963,13 +3955,6 @@ static void mov_build_index(MOVContext *mov, AVStream *st) +- } + else + return AVERROR_INVALIDDATA; + +@@ -3202,10 +3190,6 @@ static int get_edit_list_entry(MOVContext *mov, + } + *edit_list_duration = av_rescale(*edit_list_duration, msc->time_scale, + global_timescale); +- +- if (*edit_list_duration + (uint64_t)*edit_list_media_time > INT64_MAX) +- *edit_list_duration = 0; +- + return 1; + } + +@@ -3979,13 +3963,6 @@ static void mov_build_index(MOVContext *mov, AVStream *st) if (keyframe) distance = 0; sample_size = sc->stsz_sample_size > 0 ? sc->stsz_sample_size : sc->sample_sizes[current_sample]; @@ -70204,57 +70322,95 @@ index 295d9826de..4af796ee31 100644 if (sc->pseudo_stream_id == -1 || sc->stsc_data[stsc_index].id - 1 == sc->pseudo_stream_id) { AVIndexEntry *e; -@@ -5131,8 +5116,6 @@ static int mov_read_sidx(MOVContext *c, AVIOContext *pb, MOVAtom atom) - avio_rb16(pb); // reserved - - item_count = avio_rb16(pb); -- if (item_count == 0) -- return AVERROR_INVALIDDATA; - - for (i = 0; i < item_count; i++) { - int index; -@@ -5458,9 +5441,6 @@ static int mov_read_smdm(MOVContext *c, AVIOContext *pb, MOVAtom atom) +@@ -4431,13 +4408,12 @@ static int mov_read_keys(MOVContext *c, AVIOContext *pb, MOVAtom atom) + for (i = 1; i <= count; ++i) { + uint32_t key_size = avio_rb32(pb); + uint32_t type = avio_rl32(pb); +- if (key_size < 8 || key_size > atom.size) { ++ if (key_size < 8) { + av_log(c->fc, AV_LOG_ERROR, + "The key# %"PRIu32" in meta has invalid size:" + "%"PRIu32"\n", i, key_size); + return AVERROR_INVALIDDATA; + } +- atom.size -= key_size; + key_size -= 8; + if (type != MKTAG('m','d','t','a')) { + avio_skip(pb, key_size); +@@ -5475,10 +5451,8 @@ static int mov_read_smdm(MOVContext *c, AVIOContext *pb, MOVAtom atom) av_log(c->fc, AV_LOG_WARNING, "Unsupported Mastering Display Metadata box version %d\n", version); return 0; } -- if (sc->mastering) -- return AVERROR_INVALIDDATA; -- +- if (sc->mastering) { +- av_log(c->fc, AV_LOG_WARNING, "Ignoring duplicate Mastering Display Metadata\n"); +- return 0; +- } ++ if (sc->mastering) ++ return AVERROR_INVALIDDATA; + avio_skip(pb, 3); /* flags */ - sc->mastering = av_mastering_display_metadata_alloc(); -@@ -6149,8 +6129,6 @@ static int mov_read_senc(MOVContext *c, AVIOContext *pb, MOVAtom atom) - } - if (pb->eof_reached) { - av_log(c->fc, AV_LOG_ERROR, "Hit EOF while reading senc\n"); -- if (ret >= 0) -- av_encryption_info_free(encryption_index->encrypted_samples[i]); - ret = AVERROR_INVALIDDATA; - } +@@ -5515,16 +5489,11 @@ static int mov_read_mdcv(MOVContext *c, AVIOContext *pb, MOVAtom atom) -@@ -7089,8 +7067,6 @@ static int mov_read_default(MOVContext *c, AVIOContext *pb, MOVAtom atom) - if (a.size == 0) { - a.size = atom.size - total_size + 8; - } -- if (a.size < 0) -- break; - a.size -= 8; - if (a.size < 0) - break; + sc = c->fc->streams[c->fc->nb_streams - 1]->priv_data; + +- if (atom.size < 24) { ++ if (atom.size < 24 || sc->mastering) { + av_log(c->fc, AV_LOG_ERROR, "Invalid Mastering Display Color Volume box\n"); + return AVERROR_INVALIDDATA; + } + +- if (sc->mastering) { +- av_log(c->fc, AV_LOG_WARNING, "Ignoring duplicate Mastering Display Color Volume\n"); +- return 0; +- } +- + sc->mastering = av_mastering_display_metadata_alloc(); + if (!sc->mastering) + return AVERROR(ENOMEM); +@@ -7886,13 +7855,12 @@ static AVIndexEntry *mov_find_next_sample(AVFormatContext *s, AVStream **st) + if (msc->pb && msc->current_sample < avst->nb_index_entries) { + AVIndexEntry *current_sample = &avst->index_entries[msc->current_sample]; + int64_t dts = av_rescale(current_sample->timestamp, AV_TIME_BASE, msc->time_scale); +- uint64_t dtsdiff = best_dts > dts ? best_dts - (uint64_t)dts : ((uint64_t)dts - best_dts); + av_log(s, AV_LOG_TRACE, "stream %d, sample %d, dts %"PRId64"\n", i, msc->current_sample, dts); + if (!sample || (!(s->pb->seekable & AVIO_SEEKABLE_NORMAL) && current_sample->pos < sample->pos) || + ((s->pb->seekable & AVIO_SEEKABLE_NORMAL) && + ((msc->pb != s->pb && dts < best_dts) || (msc->pb == s->pb && dts != AV_NOPTS_VALUE && +- ((dtsdiff <= AV_TIME_BASE && current_sample->pos < sample->pos) || +- (dtsdiff > AV_TIME_BASE && dts < best_dts)))))) { ++ ((FFABS(best_dts - dts) <= AV_TIME_BASE && current_sample->pos < sample->pos) || ++ (FFABS(best_dts - dts) > AV_TIME_BASE && dts < best_dts)))))) { + sample = current_sample; + best_dts = dts; + *st = avst; +@@ -8079,7 +8047,7 @@ static int mov_read_packet(AVFormatContext *s, AVPacket *pkt) + pkt->flags |= AV_PKT_FLAG_DISCARD; + } + if (sc->ctts_data && sc->ctts_index < sc->ctts_count) { +- pkt->pts = av_sat_add64(pkt->dts, av_sat_add64(sc->dts_shift, sc->ctts_data[sc->ctts_index].duration)); ++ pkt->pts = pkt->dts + sc->dts_shift + sc->ctts_data[sc->ctts_index].duration; + /* update ctts context */ + sc->ctts_sample++; + if (sc->ctts_index < sc->ctts_count && diff --git a/libavformat/movenc.c b/libavformat/movenc.c -index 8a06de2fd2..0cbbc094de 100644 +index b7914e5e56..9f83c4da2a 100644 --- a/libavformat/movenc.c +++ b/libavformat/movenc.c -@@ -91,7 +91,7 @@ static const AVOption options[] = { - { "frag_duration", "Maximum fragment duration", offsetof(MOVMuxContext, max_fragment_duration), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM}, - { "min_frag_duration", "Minimum fragment duration", offsetof(MOVMuxContext, min_fragment_duration), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM}, - { "frag_size", "Maximum fragment size", offsetof(MOVMuxContext, max_fragment_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM}, -- { "ism_lookahead", "Number of lookahead entries for ISM files", offsetof(MOVMuxContext, ism_lookahead), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 255, AV_OPT_FLAG_ENCODING_PARAM}, -+ { "ism_lookahead", "Number of lookahead entries for ISM files", offsetof(MOVMuxContext, ism_lookahead), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM}, - { "video_track_timescale", "set timescale of all video tracks", offsetof(MOVMuxContext, video_track_timescale), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM}, - { "brand", "Override major brand", offsetof(MOVMuxContext, major_brand), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = AV_OPT_FLAG_ENCODING_PARAM }, - { "use_editlist", "use edit list", offsetof(MOVMuxContext, use_editlist), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, AV_OPT_FLAG_ENCODING_PARAM}, -@@ -5926,6 +5926,7 @@ static int mov_write_single_packet(AVFormatContext *s, AVPacket *pkt) +@@ -5581,12 +5581,6 @@ int ff_mov_write_packet(AVFormatContext *s, AVPacket *pkt) + if (ret < 0) + return ret; + +- if (pkt->pts != AV_NOPTS_VALUE && +- (uint64_t)pkt->dts - pkt->pts != (int32_t)((uint64_t)pkt->dts - pkt->pts)) { +- av_log(s, AV_LOG_WARNING, "pts/dts pair unsupported\n"); +- return AVERROR_PATCHWELCOME; +- } +- + if (mov->flags & FF_MOV_FLAG_FRAGMENT) { + int ret; + if (mov->moov_written || mov->flags & FF_MOV_FLAG_EMPTY_MOOV) { +@@ -5932,6 +5926,7 @@ static int mov_write_single_packet(AVFormatContext *s, AVPacket *pkt) if (trk->par->codec_id == AV_CODEC_ID_MP4ALS || trk->par->codec_id == AV_CODEC_ID_AAC || trk->par->codec_id == AV_CODEC_ID_AV1 || @@ -70262,95 +70418,171 @@ index 8a06de2fd2..0cbbc094de 100644 trk->par->codec_id == AV_CODEC_ID_FLAC) { buffer_size_t side_size; uint8_t *side = av_packet_get_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size); +diff --git a/libavformat/mp3dec.c b/libavformat/mp3dec.c +index e3cafc30ce..5e7f273c6a 100644 +--- a/libavformat/mp3dec.c ++++ b/libavformat/mp3dec.c +@@ -137,10 +137,9 @@ static void read_xing_toc(AVFormatContext *s, int64_t filesize, int64_t duration + int fill_index = (mp3->usetoc || fast_seek) && duration > 0; + + if (!filesize && +- (filesize = avio_size(s->pb)) <= 0) { ++ !(filesize = avio_size(s->pb))) { + av_log(s, AV_LOG_WARNING, "Cannot determine file size, skipping TOC table.\n"); + fill_index = 0; +- filesize = 0; + } + + for (i = 0; i < XING_TOC_COUNT; i++) { +@@ -585,7 +584,7 @@ static int mp3_seek(AVFormatContext *s, int stream_index, int64_t timestamp, + if (best_pos < 0) + return best_pos; + +- if (mp3->is_cbr && ie == &ie1 && mp3->frames && mp3->header_filesize > 0) { ++ if (mp3->is_cbr && ie == &ie1 && mp3->frames) { + int frame_duration = av_rescale(st->duration, 1, mp3->frames); + ie1.timestamp = frame_duration * av_rescale(best_pos - s->internal->data_offset, mp3->frames, mp3->header_filesize); + } +diff --git a/libavformat/mpeg.c b/libavformat/mpeg.c +index ad1f3edc47..79610ec600 100644 +--- a/libavformat/mpeg.c ++++ b/libavformat/mpeg.c +@@ -71,9 +71,6 @@ static int mpegps_probe(const AVProbeData *p) + int pes = endpes <= i && check_pes(p->buf + i, p->buf + p->buf_size); + int pack = check_pack_header(p->buf + i); + +- if (len > INT_MAX - i) +- break; +- + if (code == SYSTEM_HEADER_START_CODE) + sys++; + else if (code == PACK_START_CODE && pack) +diff --git a/libavformat/mpegts.c b/libavformat/mpegts.c +index 198f377a90..a3033134f7 100644 +--- a/libavformat/mpegts.c ++++ b/libavformat/mpegts.c +@@ -2556,8 +2556,7 @@ static void pat_cb(MpegTSFilter *filter, const uint8_t *section, int section_len + FFSWAP(struct Program, ts->prg[nb_prg], ts->prg[prg_idx]); + if (prg_idx >= nb_prg) + nb_prg++; +- } else +- nb_prg = 0; ++ } + } + } + ts->nb_prg = nb_prg; diff --git a/libavformat/mxfdec.c b/libavformat/mxfdec.c -index 32c2464fb1..50174fcd5f 100644 +index 1b68b01908..d37eeb603a 100644 --- a/libavformat/mxfdec.c +++ b/libavformat/mxfdec.c -@@ -60,7 +60,6 @@ - #include "mxf.h" - - #define MXF_MAX_CHUNK_SIZE (32 << 20) --#define RUN_IN_MAX (65535+1) // S377m-2004 section 5.5 and S377-1-2009 section 6.5, the +1 is to be slightly more tolerant - - typedef enum { - Header, -@@ -876,27 +875,15 @@ static int mxf_read_cryptographic_context(void *arg, AVIOContext *pb, int tag, i - - static int mxf_read_strong_ref_array(AVIOContext *pb, UID **refs, int *count) - { -- int64_t ret; -- unsigned c = avio_rb32(pb); -- -- //avio_read() used int -- if (c > INT_MAX / sizeof(UID)) -- return AVERROR_PATCHWELCOME; -- *count = c; -- -+ *count = avio_rb32(pb); - av_free(*refs); -- *refs = av_malloc_array(*count, sizeof(UID)); -+ *refs = av_calloc(*count, sizeof(UID)); - if (!*refs) { - *count = 0; - return AVERROR(ENOMEM); - } - avio_skip(pb, 4); /* useless size of objects, always 16 according to specs */ -- ret = avio_read(pb, (uint8_t *)*refs, *count * sizeof(UID)); -- if (ret != *count * sizeof(UID)) { -- *count = ret < 0 ? 0 : ret / sizeof(UID); -- return ret < 0 ? ret : AVERROR_INVALIDDATA; -- } -- -+ avio_read(pb, (uint8_t *)*refs, *count * sizeof(UID)); - return 0; +@@ -224,7 +224,7 @@ typedef struct MXFDescriptor { + typedef struct MXFIndexTableSegment { + UID uid; + enum MXFMetadataSetType type; +- unsigned edit_unit_byte_count; ++ int edit_unit_byte_count; + int index_sid; + int body_sid; + AVRational index_edit_rate; +@@ -420,15 +420,12 @@ static int mxf_read_sync(AVIOContext *pb, const uint8_t *key, unsigned size) + return i == size; } -@@ -1105,9 +1092,6 @@ static int mxf_read_index_entry_array(AVIOContext *pb, MXFIndexTableSegment *seg +-static int klv_read_packet(MXFContext *mxf, KLVPacket *klv, AVIOContext *pb) ++static int klv_read_packet(KLVPacket *klv, AVIOContext *pb) { - int i, length; - -- if (segment->temporal_offset_entries) + int64_t length, pos; + if (!mxf_read_sync(pb, mxf_klv_key, 4)) + return AVERROR_INVALIDDATA; + klv->offset = avio_tell(pb) - 4; +- if (klv->offset < mxf->run_in) - return AVERROR_INVALIDDATA; - - segment->nb_index_entries = avio_rb32(pb); - - length = avio_rb32(pb); -@@ -2269,12 +2253,12 @@ static enum AVColorRange mxf_get_color_range(MXFContext *mxf, MXFDescriptor *des - /* CDCI range metadata */ - if (!descriptor->component_depth) - return AVCOL_RANGE_UNSPECIFIED; -- if (descriptor->black_ref_level == 0 && descriptor->component_depth < 31 && -+ if (descriptor->black_ref_level == 0 && - descriptor->white_ref_level == ((1<component_depth) - 1) && - (descriptor->color_range == (1<component_depth) || - descriptor->color_range == ((1<component_depth) - 1))) - return AVCOL_RANGE_JPEG; -- if (descriptor->component_depth >= 8 && descriptor->component_depth < 31 && -+ if (descriptor->component_depth >= 8 && - descriptor->black_ref_level == (1 <<(descriptor->component_depth - 4)) && - descriptor->white_ref_level == (235<<(descriptor->component_depth - 8)) && - descriptor->color_range == ((14<<(descriptor->component_depth - 4)) + 1)) -@@ -3358,7 +3342,6 @@ static int mxf_read_header(AVFormatContext *s) - KLVPacket klv; - int64_t essence_offset = 0; - int ret; -- int64_t run_in; - - mxf->last_forward_tell = INT64_MAX; - -@@ -3369,10 +3352,7 @@ static int mxf_read_header(AVFormatContext *s) - } - avio_seek(s->pb, -14, SEEK_CUR); - mxf->fc = s; -- run_in = avio_tell(s->pb); -- if (run_in < 0 || run_in > RUN_IN_MAX) + memcpy(klv->key, mxf_klv_key, 4); + avio_read(pb, klv->key + 4, 12); + length = klv_decode_ber_length(pb); +@@ -744,9 +741,6 @@ static int mxf_read_partition_pack(void *arg, AVIOContext *pb, int tag, int size + partition->index_sid = avio_rb32(pb); + partition->body_offset = avio_rb64(pb); + partition->body_sid = avio_rb32(pb); +- if (partition->body_offset < 0) - return AVERROR_INVALIDDATA; -- mxf->run_in = run_in; -+ mxf->run_in = avio_tell(s->pb); +- + if (avio_read(pb, op, sizeof(UID)) != sizeof(UID)) { + av_log(mxf->fc, AV_LOG_ERROR, "Failed reading UID\n"); + return AVERROR_INVALIDDATA; +@@ -1162,9 +1156,6 @@ static int mxf_read_index_table_segment(void *arg, AVIOContext *pb, int tag, int + case 0x3F0B: + segment->index_edit_rate.num = avio_rb32(pb); + segment->index_edit_rate.den = avio_rb32(pb); +- if (segment->index_edit_rate.num <= 0 || +- segment->index_edit_rate.den <= 0) +- return AVERROR_INVALIDDATA; + av_log(NULL, AV_LOG_TRACE, "IndexEditRate %d/%d\n", segment->index_edit_rate.num, + segment->index_edit_rate.den); + break; +@@ -1697,13 +1688,9 @@ static int mxf_edit_unit_absolute_offset(MXFContext *mxf, MXFIndexTable *index_t + if (edit_unit < s->index_start_position + s->index_duration) { + int64_t index = edit_unit - s->index_start_position; - mxf_read_random_index_pack(s); +- if (s->edit_unit_byte_count) { +- if (index > INT64_MAX / s->edit_unit_byte_count || +- s->edit_unit_byte_count * index > INT64_MAX - offset_temp) +- return AVERROR_INVALIDDATA; +- ++ if (s->edit_unit_byte_count) + offset_temp += s->edit_unit_byte_count * index; +- } else { ++ else { + if (s->nb_index_entries == 2 * s->index_duration + 1) + index *= 2; /* Avid index */ -@@ -3516,8 +3496,8 @@ static int64_t mxf_compute_sample_count(MXFContext *mxf, AVStream *st, +@@ -2705,7 +2692,6 @@ static int mxf_parse_structural_metadata(MXFContext *mxf) + if (container_ul->desc) + av_dict_set(&st->metadata, "data_type", container_ul->desc, 0); + if (mxf->eia608_extract && +- container_ul->desc && + !strcmp(container_ul->desc, "vbi_vanc_smpte_436M")) { + st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE; + st->codecpar->codec_id = AV_CODEC_ID_EIA_608; +@@ -3063,7 +3049,7 @@ static int mxf_seek_to_previous_partition(MXFContext *mxf) + /* Make sure this is actually a PartitionPack, and if so parse it. + * See deadlock2.mxf + */ +- if ((ret = klv_read_packet(mxf, &klv, pb)) < 0) { ++ if ((ret = klv_read_packet(&klv, pb)) < 0) { + av_log(mxf->fc, AV_LOG_ERROR, "failed to read PartitionPack KLV\n"); + return ret; + } +@@ -3345,7 +3331,7 @@ static void mxf_read_random_index_pack(AVFormatContext *s) + if (length < min_rip_length || length > max_rip_length) + goto end; + avio_seek(s->pb, file_size - length, SEEK_SET); +- if (klv_read_packet(mxf, &klv, s->pb) < 0 || ++ if (klv_read_packet(&klv, s->pb) < 0 || + !IS_KLV_KEY(klv.key, ff_mxf_random_index_pack_key)) + goto end; + if (klv.next_klv != file_size || klv.length <= 4 || (klv.length - 4) % 12) { +@@ -3393,7 +3379,7 @@ static int mxf_read_header(AVFormatContext *s) + while (!avio_feof(s->pb)) { + const MXFMetadataReadTableEntry *metadata; + +- if (klv_read_packet(mxf, &klv, s->pb) < 0) { ++ if (klv_read_packet(&klv, s->pb) < 0) { + /* EOF - seek to previous partition or stop */ + if(mxf_parse_handle_partition_or_eof(mxf) <= 0) + break; +@@ -3500,7 +3486,8 @@ static int mxf_get_next_track_edit_unit(MXFContext *mxf, MXFTrack *track, int64_ + + a = -1; + b = track->original_duration; +- while (b - 1 > a) { ++ ++ while (b - a > 1) { + m = (a + b) >> 1; + if (mxf_edit_unit_absolute_offset(mxf, t, m, track->edit_rate, NULL, &offset, NULL, 0) < 0) + return -1; +@@ -3529,8 +3516,8 @@ static int64_t mxf_compute_sample_count(MXFContext *mxf, AVStream *st, if ((sample_rate.num / sample_rate.den) == 48000) { return av_rescale_q(edit_unit, sample_rate, track->edit_rate); } else { @@ -70361,41 +70593,33 @@ index 32c2464fb1..50174fcd5f 100644 if (remainder) av_log(mxf->fc, AV_LOG_WARNING, "seeking detected on stream #%d with time base (%d/%d) and " -@@ -3785,7 +3765,7 @@ static int mxf_read_close(AVFormatContext *s) +@@ -3648,7 +3635,7 @@ static int mxf_read_packet(AVFormatContext *s, AVPacket *pkt) - static int mxf_probe(const AVProbeData *p) { - const uint8_t *bufp = p->buf; -- const uint8_t *end = p->buf + FFMIN(p->buf_size, RUN_IN_MAX + 1 + sizeof(mxf_header_partition_pack_key)); -+ const uint8_t *end = p->buf + p->buf_size; - - if (p->buf_size < sizeof(mxf_header_partition_pack_key)) - return 0; + if (pos < mxf->current_klv_data.next_klv - mxf->current_klv_data.length || pos >= mxf->current_klv_data.next_klv) { + mxf->current_klv_data = (KLVPacket){{0}}; +- ret = klv_read_packet(mxf, &klv, s->pb); ++ ret = klv_read_packet(&klv, s->pb); + if (ret < 0) + break; + max_data_size = klv.length; +diff --git a/libavformat/nsvdec.c b/libavformat/nsvdec.c +index d57a644c67..eb26b29450 100644 +--- a/libavformat/nsvdec.c ++++ b/libavformat/nsvdec.c +@@ -603,7 +603,7 @@ null_chunk_retry: + pkt = &nsv->ahead[NSV_ST_AUDIO]; + /* read raw audio specific header on the first audio chunk... */ + /* on ALL audio chunks ?? seems so! */ +- if (asize >= 4 && st[NSV_ST_AUDIO]->codecpar->codec_tag == MKTAG('P', 'C', 'M', ' ')/* && fill_header*/) { ++ if (asize && st[NSV_ST_AUDIO]->codecpar->codec_tag == MKTAG('P', 'C', 'M', ' ')/* && fill_header*/) { + uint8_t bps; + uint8_t channels; + uint16_t samplerate; diff --git a/libavformat/nutdec.c b/libavformat/nutdec.c -index 5de3ee553a..58a74612a4 100644 +index 5de3ee553a..7df84bc6d4 100644 --- a/libavformat/nutdec.c +++ b/libavformat/nutdec.c -@@ -199,8 +199,6 @@ static int decode_main_header(NUTContext *nut) - int tmp_stream, tmp_mul, tmp_pts, tmp_size, tmp_res, tmp_head_idx; - - length = get_packetheader(nut, bc, 1, MAIN_STARTCODE); -- if (length == (uint64_t)-1) -- return AVERROR_INVALIDDATA; - end = length + avio_tell(bc); - - nut->version = ffio_read_varlen(bc); -@@ -244,11 +242,6 @@ static int decode_main_header(NUTContext *nut) - for (i = 0; i < 256;) { - int tmp_flags = ffio_read_varlen(bc); - int tmp_fields = ffio_read_varlen(bc); -- if (tmp_fields < 0) { -- av_log(s, AV_LOG_ERROR, "fields %d is invalid\n", tmp_fields); -- ret = AVERROR_INVALIDDATA; -- goto fail; -- } - - if (tmp_fields > 0) - tmp_pts = get_s(bc); -@@ -358,12 +351,8 @@ static int decode_main_header(NUTContext *nut) +@@ -358,12 +358,8 @@ static int decode_main_header(NUTContext *nut) ret = AVERROR(ENOMEM); goto fail; } @@ -70410,7 +70634,7 @@ index 5de3ee553a..58a74612a4 100644 return 0; fail: -@@ -811,23 +800,19 @@ static int nut_read_header(AVFormatContext *s) +@@ -811,23 +807,19 @@ static int nut_read_header(AVFormatContext *s) NUTContext *nut = s->priv_data; AVIOContext *bc = s->pb; int64_t pos; @@ -70436,19 +70660,32 @@ index 5de3ee553a..58a74612a4 100644 /* stream headers */ pos = 0; -diff --git a/libavformat/omadec.c b/libavformat/omadec.c -index 0f1c93c0be..d31b475fd2 100644 ---- a/libavformat/omadec.c -+++ b/libavformat/omadec.c -@@ -494,7 +494,7 @@ static int oma_read_header(AVFormatContext *s) - AV_WL16(&edata[6], jsflag); // coding mode - AV_WL16(&edata[8], jsflag); // coding mode - AV_WL16(&edata[10], 1); // always 1 -- AV_WL16(&edata[12], 0); // always 0 -+ // AV_WL16(&edata[12], 0); // always 0 +diff --git a/libavformat/oggparsetheora.c b/libavformat/oggparsetheora.c +index 293d2928b5..d1064e4328 100644 +--- a/libavformat/oggparsetheora.c ++++ b/libavformat/oggparsetheora.c +@@ -196,7 +196,7 @@ static int theora_packet(AVFormatContext *s, int idx) + if(s->streams[idx]->start_time == AV_NOPTS_VALUE && os->lastpts != AV_NOPTS_VALUE) { + s->streams[idx]->start_time = os->lastpts; + if (s->streams[idx]->duration > 0) +- s->streams[idx]->duration = av_sat_sub64(s->streams[idx]->duration, s->streams[idx]->start_time); ++ s->streams[idx]->duration -= s->streams[idx]->start_time; + } + } - avpriv_set_pts_info(st, 64, 1, st->codecpar->sample_rate); - break; +diff --git a/libavformat/rdt.c b/libavformat/rdt.c +index 990a828b9b..e5824f6a48 100644 +--- a/libavformat/rdt.c ++++ b/libavformat/rdt.c +@@ -204,8 +204,6 @@ ff_rdt_parse_header(const uint8_t *buf, int len, + return -1; /* not followed by a data packet */ + + pkt_len = AV_RB16(buf+3); +- if (pkt_len > len) +- return AVERROR_INVALIDDATA; + buf += pkt_len; + len -= pkt_len; + consumed += pkt_len; diff --git a/libavformat/replaygain.c b/libavformat/replaygain.c index 01db483257..707d3cd4f1 100644 --- a/libavformat/replaygain.c @@ -70463,438 +70700,391 @@ index 01db483257..707d3cd4f1 100644 return db * 100000 + sign * mb; diff --git a/libavformat/rmdec.c b/libavformat/rmdec.c -index c3945a9166..97378703d1 100644 +index c7d18abc0c..c3945a9166 100644 --- a/libavformat/rmdec.c +++ b/libavformat/rmdec.c -@@ -128,6 +128,10 @@ static int rm_read_audio_stream_info(AVFormatContext *s, AVIOContext *pb, - uint32_t version; - int ret; - -+ // Duplicate tags -+ if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) -+ return AVERROR_INVALIDDATA; -+ - /* ra type header */ - version = avio_rb16(pb); /* version */ - if (version == 3) { -@@ -327,11 +331,6 @@ int ff_rm_read_mdpr_codecdata(AVFormatContext *s, AVIOContext *pb, - if (codec_data_size == 0) - return 0; - -- // Duplicate tags -- if ( st->codecpar->codec_type != AVMEDIA_TYPE_UNKNOWN -- && st->codecpar->codec_type != AVMEDIA_TYPE_DATA) -- return AVERROR_INVALIDDATA; -- - avpriv_set_pts_info(st, 64, 1, 1000); - codec_pos = avio_tell(pb); - v = avio_rb32(pb); -@@ -565,8 +564,6 @@ static int rm_read_header(AVFormatContext *s) - } - - tag_size = avio_rb32(pb); -- if (tag_size < 0) -- return AVERROR_INVALIDDATA; - avio_skip(pb, tag_size - 8); - - for(;;) { +@@ -269,9 +269,9 @@ static int rm_read_audio_stream_info(AVFormatContext *s, AVIOContext *pb, + case DEINT_ID_INT4: + if (ast->coded_framesize > ast->audio_framesize || + sub_packet_h <= 1 || +- ast->coded_framesize * (uint64_t)sub_packet_h > (2LL + (sub_packet_h & 1)) * ast->audio_framesize) ++ ast->coded_framesize * (uint64_t)sub_packet_h > (2 + (sub_packet_h & 1)) * ast->audio_framesize) + return AVERROR_INVALIDDATA; +- if (ast->coded_framesize * (uint64_t)sub_packet_h != 2LL*ast->audio_framesize) { ++ if (ast->coded_framesize * (uint64_t)sub_packet_h != 2*ast->audio_framesize) { + avpriv_request_sample(s, "mismatching interleaver parameters"); + return AVERROR_INVALIDDATA; + } diff --git a/libavformat/rpl.c b/libavformat/rpl.c -index 10cde679f8..ad3659e936 100644 +index ac82940b7a..10cde679f8 100644 --- a/libavformat/rpl.c +++ b/libavformat/rpl.c -@@ -276,7 +276,7 @@ static int rpl_read_header(AVFormatContext *s) - error |= read_line(pb, line, sizeof(line)); // size of "helpful" sprite - if (vst) { - error |= read_line(pb, line, sizeof(line)); // offset to key frame list -- vst->duration = number_of_chunks * (int64_t)rpl->frames_per_chunk; -+ vst->duration = number_of_chunks * rpl->frames_per_chunk; +@@ -117,7 +117,7 @@ static int rpl_read_header(AVFormatContext *s) + AVIOContext *pb = s->pb; + RPLContext *rpl = s->priv_data; + AVStream *vst = NULL, *ast = NULL; +- int64_t total_audio_size; ++ int total_audio_size; + int error = 0; + const char *endptr; + char audio_type[RPL_LINE_LENGTH]; +@@ -265,9 +265,6 @@ static int rpl_read_header(AVFormatContext *s) + "Video stream will be broken!\n", av_fourcc2str(vst->codecpar->codec_tag)); + + number_of_chunks = read_line_and_int(pb, &error); // number of chunks in the file +- if (number_of_chunks == INT_MAX) +- return AVERROR_INVALIDDATA; +- + // The number in the header is actually the index of the last chunk. + number_of_chunks++; + +@@ -299,8 +296,6 @@ static int rpl_read_header(AVFormatContext *s) + if (ast) + av_add_index_entry(ast, offset + video_size, total_audio_size, + audio_size, audio_size * 8, 0); +- if (total_audio_size/8 + (uint64_t)audio_size >= INT64_MAX/8) +- return AVERROR_INVALIDDATA; + total_audio_size += audio_size * 8; } - // Read the index -diff --git a/libavformat/rtpenc.c b/libavformat/rtpenc.c -index 38e4c65c4e..5e04c1df08 100644 ---- a/libavformat/rtpenc.c -+++ b/libavformat/rtpenc.c -@@ -19,6 +19,7 @@ - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ +diff --git a/libavformat/rtmppkt.c b/libavformat/rtmppkt.c +index 886d1f4d6f..00eb0873b2 100644 +--- a/libavformat/rtmppkt.c ++++ b/libavformat/rtmppkt.c +@@ -429,6 +429,7 @@ static int amf_tag_skip(GetByteContext *gb) + { + AMFDataType type; + unsigned nb = -1; ++ int parse_key = 1; -+#include "avc.h" - #include "avformat.h" - #include "mpegts.h" - #include "internal.h" -@@ -582,8 +583,25 @@ static int rtp_write_packet(AVFormatContext *s1, AVPacket *pkt) - ff_rtp_send_vc2hq(s1, pkt->data, size, st->codecpar->field_order != AV_FIELD_PROGRESSIVE ? 1 : 0); - break; - case AV_CODEC_ID_H264: -+ { -+ uint8_t *side_data; -+ int side_data_size = 0; -+ -+ side_data = av_packet_get_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA, -+ &side_data_size); -+ -+ if (side_data_size != 0) { -+ int ps_size = side_data_size; -+ uint8_t * ps_buf = NULL; -+ -+ ff_avc_write_annexb_extradata(side_data, &ps_buf, &ps_size); -+ av_log(s1, AV_LOG_TRACE, "H264: write side data=%d\n", ps_size); -+ ff_rtp_send_h264_hevc(s1, ps_buf ? ps_buf : side_data, ps_size); -+ av_free(ps_buf); -+ } - ff_rtp_send_h264_hevc(s1, pkt->data, size); - break; -+ } - case AV_CODEC_ID_H261: - ff_rtp_send_h261(s1, pkt->data, size); - break; + if (bytestream2_get_bytes_left(gb) < 1) + return -1; +@@ -453,12 +454,13 @@ static int amf_tag_skip(GetByteContext *gb) + bytestream2_skip(gb, 10); + return 0; + case AMF_DATA_TYPE_ARRAY: ++ parse_key = 0; + case AMF_DATA_TYPE_MIXEDARRAY: + nb = bytestream2_get_be32(gb); + case AMF_DATA_TYPE_OBJECT: +- while (type != AMF_DATA_TYPE_ARRAY || nb-- > 0) { ++ while (nb-- > 0 || type != AMF_DATA_TYPE_ARRAY) { + int t; +- if (type != AMF_DATA_TYPE_ARRAY) { ++ if (parse_key) { + int size = bytestream2_get_be16(gb); + if (!size) { + bytestream2_get_byte(gb); +diff --git a/libavformat/rtpenc_vc2hq.c b/libavformat/rtpenc_vc2hq.c +index cf548191d2..085204fa64 100644 +--- a/libavformat/rtpenc_vc2hq.c ++++ b/libavformat/rtpenc_vc2hq.c +@@ -45,7 +45,7 @@ static void send_packet(AVFormatContext *ctx, uint8_t parse_code, int info_hdr_s + ff_rtp_send_data(ctx, rtp_ctx->buf, RTP_VC2HQ_PL_HEADER_SIZE + info_hdr_size + size, rtp_m); + } + +-static int send_picture(AVFormatContext *ctx, const uint8_t *buf, int size, int interlaced) ++static void send_picture(AVFormatContext *ctx, const uint8_t *buf, int size, int interlaced) + { + RTPMuxContext *rtp_ctx = ctx->priv_data; + GetBitContext gc; +@@ -54,9 +54,6 @@ static int send_picture(AVFormatContext *ctx, const uint8_t *buf, int size, int + uint16_t frag_len; + char *info_hdr = &rtp_ctx->buf[4]; + +- if (size < DIRAC_PIC_NR_SIZE) +- return AVERROR(EINVAL); +- + pic_nr = AV_RB32(&buf[0]); + buf += DIRAC_PIC_NR_SIZE; + size -= DIRAC_PIC_NR_SIZE; +@@ -100,7 +97,6 @@ static int send_picture(AVFormatContext *ctx, const uint8_t *buf, int size, int + send_packet(ctx, DIRAC_RTP_PCODE_HQ_PIC_FRAGMENT, 16, buf, frag_len, interlaced, second_field, size > 0 ? 0 : 1); + buf += frag_len; + } +- return 0; + } + + void ff_rtp_send_vc2hq(AVFormatContext *ctx, const uint8_t *frame_buf, int frame_size, int interlaced) +@@ -114,21 +110,16 @@ void ff_rtp_send_vc2hq(AVFormatContext *ctx, const uint8_t *frame_buf, int frame + parse_code = unit[4]; + unit_size = AV_RB32(&unit[5]); + +- if (unit_size > end - unit) +- break; +- + switch (parse_code) { + /* sequence header */ + /* end of sequence */ + case DIRAC_PCODE_SEQ_HEADER: + case DIRAC_PCODE_END_SEQ: +- if (unit_size >= DIRAC_DATA_UNIT_HEADER_SIZE) +- send_packet(ctx, parse_code, 0, unit + DIRAC_DATA_UNIT_HEADER_SIZE, unit_size - DIRAC_DATA_UNIT_HEADER_SIZE, 0, 0, 0); ++ send_packet(ctx, parse_code, 0, unit + DIRAC_DATA_UNIT_HEADER_SIZE, unit_size - DIRAC_DATA_UNIT_HEADER_SIZE, 0, 0, 0); + break; + /* HQ picture */ + case DIRAC_PCODE_PICTURE_HQ: +- if (unit_size >= DIRAC_DATA_UNIT_HEADER_SIZE) +- send_picture(ctx, unit + DIRAC_DATA_UNIT_HEADER_SIZE, unit_size - DIRAC_DATA_UNIT_HEADER_SIZE, interlaced); ++ send_picture(ctx, unit + DIRAC_DATA_UNIT_HEADER_SIZE, unit_size - DIRAC_DATA_UNIT_HEADER_SIZE, interlaced); + break; + /* parse codes without specification */ + case DIRAC_PCODE_AUX: diff --git a/libavformat/rtsp.c b/libavformat/rtsp.c -index fae3a371e0..25bdf475b3 100644 +index 2ee430a424..fae3a371e0 100644 --- a/libavformat/rtsp.c +++ b/libavformat/rtsp.c -@@ -952,8 +952,6 @@ static void rtsp_parse_transport(AVFormatContext *s, - ";,", &p); - } - th->transport = RTSP_TRANSPORT_RAW; -- } else { -- break; +@@ -412,7 +412,7 @@ static void parse_fmtp(AVFormatContext *s, RTSPState *rt, + if (rtsp_st->sdp_payload_type == payload_type && + rtsp_st->dynamic_handler && + rtsp_st->dynamic_handler->parse_sdp_a_line) { +- rtsp_st->dynamic_handler->parse_sdp_a_line(s, rtsp_st->stream_index, ++ rtsp_st->dynamic_handler->parse_sdp_a_line(s, i, + rtsp_st->dynamic_protocol_context, line); } - if (!av_strcasecmp(lower_transport, "TCP")) - th->lower_transport = RTSP_LOWER_TRANSPORT_TCP; + } +@@ -1421,7 +1421,7 @@ retry: + cur_auth_type = rt->auth_state.auth_type; + if ((ret = rtsp_send_cmd_with_content_async(s, method, url, header, + send_content, +- send_content_length)) < 0) ++ send_content_length))) + return ret; + + if ((ret = ff_rtsp_read_reply(s, reply, content_ptr, 0, method) ) < 0) +@@ -1454,8 +1454,6 @@ int ff_rtsp_make_setup_request(AVFormatContext *s, const char *host, int port, + char cmd[MAX_URL_SIZE]; + const char *trans_pref; + +- memset(&reply1, 0, sizeof(reply1)); +- + if (rt->transport == RTSP_TRANSPORT_RDT) + trans_pref = "x-pn-tng"; + else if (rt->transport == RTSP_TRANSPORT_RAW) +@@ -1568,11 +1566,7 @@ int ff_rtsp_make_setup_request(AVFormatContext *s, const char *host, int port, + else if (lower_transport == RTSP_LOWER_TRANSPORT_UDP_MULTICAST) { + snprintf(transport, sizeof(transport) - 1, + "%s/UDP;multicast", trans_pref); +- } else { +- err = AVERROR(EINVAL); +- goto fail; // transport would be uninitialized + } +- + if (s->oformat) { + av_strlcat(transport, ";mode=record", sizeof(transport)); + } else if (rt->server_type == RTSP_SERVER_REAL || +diff --git a/libavformat/sapdec.c b/libavformat/sapdec.c +index ef60a1272b..eec73aa2f4 100644 +--- a/libavformat/sapdec.c ++++ b/libavformat/sapdec.c +@@ -198,9 +198,6 @@ static int sap_fetch_packet(AVFormatContext *s, AVPacket *pkt) + struct pollfd p = {fd, POLLIN, 0}; + uint8_t recvbuf[RTP_MAX_PACKET_LENGTH]; + +- if (fd < 0) +- return fd; +- + if (sap->eof) + return AVERROR_EOF; + +diff --git a/libavformat/sauce.c b/libavformat/sauce.c +index a9ee9697a3..5ac9ca9d14 100644 +--- a/libavformat/sauce.c ++++ b/libavformat/sauce.c +@@ -34,12 +34,7 @@ int ff_sauce_read(AVFormatContext *avctx, uint64_t *fsize, int *got_width, int g + AVIOContext *pb = avctx->pb; + char buf[36]; + int datatype, filetype, t1, t2, nb_comments; +- int64_t start_pos = avio_size(pb); +- +- if (start_pos < 128) +- return AVERROR_INVALIDDATA; +- +- start_pos -= 128; ++ uint64_t start_pos = avio_size(pb) - 128; + + avio_seek(pb, start_pos, SEEK_SET); + if (avio_read(pb, buf, 7) != 7) diff --git a/libavformat/sbgdec.c b/libavformat/sbgdec.c -index c86bc40862..36cfff20fc 100644 +index a20bc13c8f..c86bc40862 100644 --- a/libavformat/sbgdec.c +++ b/libavformat/sbgdec.c -@@ -1316,8 +1316,6 @@ static int generate_intervals(void *log, struct sbg_script *s, int sample_rate, - - /* Pseudo event before the first one */ - ev0 = s->events[s->nb_events - 1]; -- if (av_sat_sub64(ev0.ts_int, period) != (uint64_t)ev0.ts_int - period) +@@ -384,7 +384,7 @@ static int parse_options(struct sbg_parser *p) + case 'L': + FORWARD_ERROR(parse_optarg(p, opt, &oarg)); + r = str_to_time(oarg.s, &p->scs.opt_duration); +- if (oarg.e != oarg.s + r || p->scs.opt_duration < 0) { ++ if (oarg.e != oarg.s + r) { + snprintf(p->err_msg, sizeof(p->err_msg), + "syntax error for option -L"); + return AVERROR_INVALIDDATA; +@@ -1288,10 +1288,7 @@ static int generate_intervals(void *log, struct sbg_script *s, int sample_rate, + /* SBaGen handles the time before and after the extremal events, + and the corresponding transitions, as if the sequence were cyclic + with a 24-hours period. */ +- period = s->events[s->nb_events - 1].ts - (uint64_t)s->events[0].ts; +- if (period < 0) - return AVERROR_INVALIDDATA; - ev0.ts_int -= period; - ev0.ts_trans -= period; - ev0.ts_next -= period; -diff --git a/libavformat/sccdec.c b/libavformat/sccdec.c -index d420f3c461..1786520944 100644 ---- a/libavformat/sccdec.c -+++ b/libavformat/sccdec.c -@@ -63,7 +63,8 @@ static int scc_read_header(AVFormatContext *s) - { - SCCContext *scc = s->priv_data; - AVStream *st = avformat_new_stream(s, NULL); -- AVPacket *sub = NULL; -+ char line2[4096], line[4096]; -+ int64_t pos, ts, next_ts = AV_NOPTS_VALUE; - ptrdiff_t len; - uint8_t out[4096]; - FFTextReader tr; -@@ -76,26 +77,47 @@ static int scc_read_header(AVFormatContext *s) - st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE; - st->codecpar->codec_id = AV_CODEC_ID_EIA_608; - -- while (1) { -+ while (!ff_text_eof(&tr) || next_ts == AV_NOPTS_VALUE || line2[0]) { - char *saveptr = NULL, *lline; - int hh, mm, ss, fs, i; -- char line[4096]; -- int64_t pos, ts; -+ AVPacket *sub; - -- len = ff_subtitles_read_line(&tr, line, sizeof(line)); -- if (len <= 13) { -- if (ff_text_eof(&tr)) -- break; -- continue; -- } -+ if (next_ts == AV_NOPTS_VALUE) { -+ while (!ff_text_eof(&tr)) { -+ len = ff_subtitles_read_line(&tr, line, sizeof(line)); -+ if (len <= 13) -+ continue; - if (!strncmp(line, "Scenarist_SCC V1.0", 18)) - continue; -- if (av_sscanf(line, "%d:%d:%d%*[:;]%d", &hh, &mm, &ss, &fs) != 4) -- continue; -+ if (av_sscanf(line, "%d:%d:%d%*[:;]%d", &hh, &mm, &ss, &fs) == 4) -+ break; -+ } -+ -+ ts = (hh * 3600LL + mm * 60LL + ss) * 1000LL + fs * 33LL; -+ -+ while (!ff_text_eof(&tr)) { -+ len = ff_subtitles_read_line(&tr, line2, sizeof(line2)); -+ if (len <= 13) -+ continue; -+ -+ if (av_sscanf(line2, "%d:%d:%d%*[:;]%d", &hh, &mm, &ss, &fs) == 4) -+ break; -+ } -+ } else { -+ memmove(line, line2, sizeof(line)); -+ line2[0] = 0; -+ -+ while (!ff_text_eof(&tr)) { -+ len = ff_subtitles_read_line(&tr, line2, sizeof(line2)); -+ if (len <= 13) -+ continue; -+ -+ if (av_sscanf(line2, "%d:%d:%d%*[:;]%d", &hh, &mm, &ss, &fs) == 4) -+ break; -+ } -+ } - -- ts = (hh * 3600LL + mm * 60LL + ss) * 1000LL + fs * 33LL; -- if (sub) -- sub->duration = ts - sub->pts; -+ next_ts = (hh * 3600LL + mm * 60LL + ss) * 1000LL + fs * 33LL; - - pos = ff_text_pos(&tr); - lline = (char *)&line; -@@ -146,6 +168,8 @@ static int scc_read_header(AVFormatContext *s) - - sub->pos = pos; - sub->pts = ts; -+ sub->duration = next_ts - ts; -+ ts = next_ts; - } - - ff_subtitles_queue_finalize(s, &scc->q); -diff --git a/libavformat/sctp.c b/libavformat/sctp.c -index be0cb47865..9a80e9b015 100644 ---- a/libavformat/sctp.c -+++ b/libavformat/sctp.c -@@ -282,8 +282,6 @@ fail: - goto restart; - } - fail1: -- if (fd >= 0) -- closesocket(fd); - ret = AVERROR(EIO); - freeaddrinfo(ai); - return ret; -diff --git a/libavformat/sdsdec.c b/libavformat/sdsdec.c -index 2289e1bdac..c70f5af849 100644 ---- a/libavformat/sdsdec.c -+++ b/libavformat/sdsdec.c -@@ -112,7 +112,7 @@ static int sds_read_header(AVFormatContext *ctx) - st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO; - st->codecpar->channels = 1; - st->codecpar->sample_rate = sample_period ? 1000000000 / sample_period : 16000; -- st->duration = av_rescale((avio_size(pb) - 21) / 127, s->size, 4); -+ st->duration = (avio_size(pb) - 21) / (127) * s->size / 4; - - avpriv_set_pts_info(st, 64, 1, st->codecpar->sample_rate); - -diff --git a/libavformat/spdifdec.c b/libavformat/spdifdec.c -index 03b95bd48a..1808fa9d65 100644 ---- a/libavformat/spdifdec.c -+++ b/libavformat/spdifdec.c -@@ -226,7 +226,7 @@ int ff_spdif_read_packet(AVFormatContext *s, AVPacket *pkt) - if (!s->bit_rate && s->streams[0]->codecpar->sample_rate) - /* stream bitrate matches 16-bit stereo PCM bitrate for currently - supported codecs */ -- s->bit_rate = 2 * 16LL * s->streams[0]->codecpar->sample_rate; -+ s->bit_rate = 2 * 16 * s->streams[0]->codecpar->sample_rate; - - return 0; - } -diff --git a/libavformat/subtitles.c b/libavformat/subtitles.c -index 576b2c49f1..6368ec74f9 100644 ---- a/libavformat/subtitles.c -+++ b/libavformat/subtitles.c -@@ -418,7 +418,6 @@ ptrdiff_t ff_subtitles_read_line(FFTextReader *tr, char *buf, size_t size) - size_t cur = 0; - if (!size) - return 0; -- buf[0] = '\0'; - while (cur + 1 < size) { - unsigned char c = ff_text_r8(tr); - if (!c) -diff --git a/libavformat/subviewerdec.c b/libavformat/subviewerdec.c -index 0a2f0da3b1..5c2fe676f1 100644 ---- a/libavformat/subviewerdec.c -+++ b/libavformat/subviewerdec.c -@@ -51,32 +51,26 @@ static int subviewer_probe(const AVProbeData *p) - return 0; - } - --static int get_multiplier(int e) { -- switch (e) { -- case 1 : return 100; -- case 2 : return 10; -- case 3 : return 1; -- default : return -1; -- } --} - - static int read_ts(const char *s, int64_t *start, int *duration) - { - int64_t end; - int hh1, mm1, ss1, ms1; - int hh2, mm2, ss2, ms2; -- int multiplier1, multiplier2; -- int ms1p1, ms1p2, ms2p1, ms2p2; -- -- if (sscanf(s, "%u:%u:%u.%n%u%n,%u:%u:%u.%n%u%n", -- &hh1, &mm1, &ss1, &ms1p1, &ms1, &ms1p2, &hh2, &mm2, &ss2, &ms2p1, &ms2, &ms2p2) == 8) { -- multiplier1 = get_multiplier(ms1p2 - ms1p1); -- multiplier2 = get_multiplier(ms2p2 - ms2p1); -- if (multiplier1 <= 0 ||multiplier2 <= 0) -- return -1; -- -- end = (hh2*3600LL + mm2*60LL + ss2) * 1000LL + ms2 * multiplier2; -- *start = (hh1*3600LL + mm1*60LL + ss1) * 1000LL + ms1 * multiplier1; -+ int multiplier = 1; -+ -+ if (sscanf(s, "%u:%u:%u.%2u,%u:%u:%u.%2u", -+ &hh1, &mm1, &ss1, &ms1, &hh2, &mm2, &ss2, &ms2) == 8) { -+ multiplier = 10; -+ } else if (sscanf(s, "%u:%u:%u.%1u,%u:%u:%u.%1u", -+ &hh1, &mm1, &ss1, &ms1, &hh2, &mm2, &ss2, &ms2) == 8) { -+ multiplier = 100; -+ } -+ if (sscanf(s, "%u:%u:%u.%u,%u:%u:%u.%u", -+ &hh1, &mm1, &ss1, &ms1, &hh2, &mm2, &ss2, &ms2) == 8) { -+ ms1 = FFMIN(ms1, 999); -+ ms2 = FFMIN(ms2, 999); -+ end = (hh2*3600LL + mm2*60LL + ss2) * 1000LL + ms2 * multiplier; -+ *start = (hh1*3600LL + mm1*60LL + ss1) * 1000LL + ms1 * multiplier; - *duration = end - *start; - return 0; - } -diff --git a/libavformat/tee.c b/libavformat/tee.c -index 6fafc0a99d..c0b69a386c 100644 ---- a/libavformat/tee.c -+++ b/libavformat/tee.c -@@ -124,7 +124,6 @@ static int close_slave(TeeSlave *tee_slave) - unsigned i; - int ret = 0; ++ period = s->events[s->nb_events - 1].ts - s->events[0].ts; + period = (period + (DAY_TS - 1)) / DAY_TS * DAY_TS; + period = FFMAX(period, DAY_TS); -- av_dict_free(&tee_slave->fifo_options); - avf = tee_slave->avf; - if (!avf) - return 0; -@@ -230,7 +229,6 @@ static int open_slave(AVFormatContext *avf, char *slave, TeeSlave *tee_slave) - - av_dict_free(&options); - options = tee_slave->fifo_options; -- tee_slave->fifo_options = NULL; - } - ret = avformat_alloc_output_context2(&avf2, NULL, - tee_slave->use_fifo ? "fifo" :format, filename); -@@ -405,8 +403,6 @@ end: - av_free(format); - av_free(select); - av_free(on_fail); -- av_free(use_fifo); -- av_free(fifo_options_str); - av_dict_free(&options); - av_dict_free(&bsf_options); - av_freep(&tmp_select); -diff --git a/libavformat/tls_mbedtls.c b/libavformat/tls_mbedtls.c -index beb6e1cf08..aadf17760d 100644 ---- a/libavformat/tls_mbedtls.c -+++ b/libavformat/tls_mbedtls.c -@@ -19,7 +19,8 @@ - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - --#include -+#include -+#include - #include - #include - #include -@@ -129,15 +130,9 @@ static void handle_pk_parse_error(URLContext *h, int ret) - static void handle_handshake_error(URLContext *h, int ret) - { - switch (ret) { --#if MBEDTLS_VERSION_MAJOR < 3 - case MBEDTLS_ERR_SSL_NO_USABLE_CIPHERSUITE: - av_log(h, AV_LOG_ERROR, "None of the common ciphersuites is usable. Was the local certificate correctly set?\n"); - break; --#else -- case MBEDTLS_ERR_SSL_HANDSHAKE_FAILURE: -- av_log(h, AV_LOG_ERROR, "TLS handshake failed.\n"); -- break; --#endif - case MBEDTLS_ERR_SSL_FATAL_ALERT_MESSAGE: - av_log(h, AV_LOG_ERROR, "A fatal alert message was received from the peer, has the peer a correct certificate?\n"); - break; -@@ -200,30 +195,25 @@ static int tls_open(URLContext *h, const char *uri, int flags, AVDictionary **op +diff --git a/libavformat/sdp.c b/libavformat/sdp.c +index 334ee0f9df..95f3fbb876 100644 +--- a/libavformat/sdp.c ++++ b/libavformat/sdp.c +@@ -195,8 +195,6 @@ static char *extradata2psets(AVFormatContext *s, AVCodecParameters *par) + continue; } - } - -- // seed the random number generator -- if ((ret = mbedtls_ctr_drbg_seed(&tls_ctx->ctr_drbg_context, -- mbedtls_entropy_func, -- &tls_ctx->entropy_context, -- NULL, 0)) != 0) { -- av_log(h, AV_LOG_ERROR, "mbedtls_ctr_drbg_seed returned %d\n", ret); -- goto fail; -- } -- - // load key file - if (shr->key_file) { - if ((ret = mbedtls_pk_parse_keyfile(&tls_ctx->priv_key, - shr->key_file, -- tls_ctx->priv_key_pw --#if MBEDTLS_VERSION_MAJOR >= 3 -- , mbedtls_ctr_drbg_random, -- &tls_ctx->ctr_drbg_context --#endif -- )) != 0) { -+ tls_ctx->priv_key_pw)) != 0) { - handle_pk_parse_error(h, ret); - goto fail; + if (p != (psets + strlen(pset_string))) { +- if (p - psets >= MAX_PSET_SIZE) +- goto fail_in_loop; + *p = ','; + p++; } +@@ -206,7 +204,6 @@ static char *extradata2psets(AVFormatContext *s, AVCodecParameters *par) + } + if (!av_base64_encode(p, MAX_PSET_SIZE - (p - psets), r, r1 - r)) { + av_log(s, AV_LOG_ERROR, "Cannot Base64-encode %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"!\n", MAX_PSET_SIZE - (p - psets), r1 - r); +-fail_in_loop: + av_free(psets); + av_free(tmpbuf); + +diff --git a/libavformat/siff.c b/libavformat/siff.c +index af39bb3734..60a867df14 100644 +--- a/libavformat/siff.c ++++ b/libavformat/siff.c +@@ -199,10 +199,7 @@ static int siff_read_packet(AVFormatContext *s, AVPacket *pkt) + if (c->cur_frame >= c->frames) + return AVERROR_EOF; + if (c->curstrm == -1) { +- unsigned pktsize = avio_rl32(s->pb); +- if (pktsize < 4) +- return AVERROR_INVALIDDATA; +- c->pktsize = pktsize - 4; ++ c->pktsize = avio_rl32(s->pb) - 4; + c->flags = avio_rl16(s->pb); + if (c->flags & VB_HAS_AUDIO && !c->has_audio) + return AVERROR_INVALIDDATA; +diff --git a/libavformat/subfile.c b/libavformat/subfile.c +index e6712806a9..300672e657 100644 +--- a/libavformat/subfile.c ++++ b/libavformat/subfile.c +@@ -125,9 +125,9 @@ static int64_t subfile_seek(URLContext *h, int64_t pos, int whence) + return end; } -+ // seed the random number generator -+ if ((ret = mbedtls_ctr_drbg_seed(&tls_ctx->ctr_drbg_context, -+ mbedtls_entropy_func, -+ &tls_ctx->entropy_context, -+ NULL, 0)) != 0) { -+ av_log(h, AV_LOG_ERROR, "mbedtls_ctr_drbg_seed returned %d\n", ret); -+ goto fail; -+ } -+ - if ((ret = mbedtls_ssl_config_defaults(&tls_ctx->ssl_config, - shr->listen ? MBEDTLS_SSL_IS_SERVER : MBEDTLS_SSL_IS_CLIENT, - MBEDTLS_SSL_TRANSPORT_STREAM, -diff --git a/libavformat/udp.c b/libavformat/udp.c -index 1f8b85cfca..9b9d3de197 100644 ---- a/libavformat/udp.c -+++ b/libavformat/udp.c -@@ -740,10 +740,8 @@ static int udp_open(URLContext *h, const char *uri, int flags) - /* XXX: fix av_url_split */ - if (hostname[0] == '\0' || hostname[0] == '?') { - /* only accepts null hostname if input */ -- if (!(flags & AVIO_FLAG_READ)) { -- ret = AVERROR(EINVAL); -+ if (!(flags & AVIO_FLAG_READ)) - goto fail; -- } - } else { - if ((ret = ff_udp_set_remote_url(h, uri)) < 0) - goto fail; -@@ -756,10 +754,8 @@ static int udp_open(URLContext *h, const char *uri, int flags) - udp_fd = udp_socket_create(h, &my_addr, &len, localaddr); - else - udp_fd = udp_socket_create(h, &my_addr, &len, s->localaddr); -- if (udp_fd < 0) { -- ret = AVERROR(EIO); -+ if (udp_fd < 0) - goto fail; +- switch (whence) { +- case AVSEEK_SIZE: ++ if (whence == AVSEEK_SIZE) + return end - c->start; ++ switch (whence) { + case SEEK_SET: + new_pos = c->start + pos; + break; +@@ -137,8 +137,6 @@ static int64_t subfile_seek(URLContext *h, int64_t pos, int whence) + case SEEK_END: + new_pos = end + pos; + break; +- default: +- av_assert0(0); + } + if (new_pos < c->start) + return AVERROR(EINVAL); +diff --git a/libavformat/tls_schannel.c b/libavformat/tls_schannel.c +index bbeb8a81f2..d4959f75fa 100644 +--- a/libavformat/tls_schannel.c ++++ b/libavformat/tls_schannel.c +@@ -388,7 +388,7 @@ static int tls_read(URLContext *h, uint8_t *buf, int len) + SECURITY_STATUS sspi_ret = SEC_E_OK; + SecBuffer inbuf[4]; + SecBufferDesc inbuf_desc; +- int size, ret = 0; ++ int size, ret; + int min_enc_buf_size = len + SCHANNEL_FREE_BUFFER_SIZE; + + /* If we have some left-over data from previous network activity, +diff --git a/libavformat/tmv.c b/libavformat/tmv.c +index 57f2e09df6..b74af547c5 100644 +--- a/libavformat/tmv.c ++++ b/libavformat/tmv.c +@@ -103,10 +103,6 @@ static int tmv_read_header(AVFormatContext *s) + char_cols = avio_r8(pb); + char_rows = avio_r8(pb); + tmv->video_chunk_size = char_cols * char_rows * 2; +- if (!tmv->video_chunk_size) { +- av_log(s, AV_LOG_ERROR, "invalid video chunk size\n"); +- return AVERROR_INVALIDDATA; - } - s->local_addr_storage=my_addr; //store for future multicast join + features = avio_r8(pb); + if (features & ~(TMV_PADDING | TMV_STEREO)) { +diff --git a/libavformat/tta.c b/libavformat/tta.c +index b34630d1d7..6aa72b5d1d 100644 +--- a/libavformat/tta.c ++++ b/libavformat/tta.c +@@ -91,7 +91,7 @@ static int tta_read_header(AVFormatContext *s) + c->totalframes = nb_samples / c->frame_size + (c->last_frame_size < c->frame_size); + c->currentframe = 0; +- if(c->totalframes >= (INT_MAX - 4)/sizeof(uint32_t) || c->totalframes <= 0){ ++ if(c->totalframes >= UINT_MAX/sizeof(uint32_t) || c->totalframes <= 0){ + av_log(s, AV_LOG_ERROR, "totalframes %d invalid\n", c->totalframes); + return AVERROR_INVALIDDATA; + } +diff --git a/libavformat/tty.c b/libavformat/tty.c +index b47f874c60..aed5c888c3 100644 +--- a/libavformat/tty.c ++++ b/libavformat/tty.c +@@ -122,16 +122,13 @@ static int read_header(AVFormatContext *avctx) + s->chars_per_frame = FFMAX(av_q2d(st->time_base)*s->chars_per_frame, 1); + + if (avctx->pb->seekable & AVIO_SEEKABLE_NORMAL) { +- int64_t fsize = avio_size(avctx->pb); +- if (fsize > 0) { +- s->fsize = fsize; +- st->duration = (s->fsize + s->chars_per_frame - 1) / s->chars_per_frame; ++ s->fsize = avio_size(avctx->pb); ++ st->duration = (s->fsize + s->chars_per_frame - 1) / s->chars_per_frame; + +- if (ff_sauce_read(avctx, &s->fsize, 0, 0) < 0) +- efi_read(avctx, s->fsize - 51); ++ if (ff_sauce_read(avctx, &s->fsize, 0, 0) < 0) ++ efi_read(avctx, s->fsize - 51); + +- avio_seek(avctx->pb, 0, SEEK_SET); +- } ++ avio_seek(avctx->pb, 0, SEEK_SET); + } + + fail: +diff --git a/libavformat/ty.c b/libavformat/ty.c +index c5ea4bf98d..c8e1067c0e 100644 +--- a/libavformat/ty.c ++++ b/libavformat/ty.c +@@ -47,7 +47,7 @@ static const uint8_t ty_AC3AudioPacket[] = { 0x00, 0x00, 0x01, 0xbd }; + #define CHUNK_PEEK_COUNT 3 /* number of chunks to probe */ + + typedef struct TyRecHdr { +- int32_t rec_size; ++ int64_t rec_size; + uint8_t ex[2]; + uint8_t rec_type; + uint8_t subrec_type; diff --git a/libavformat/utils.c b/libavformat/utils.c -index b2d011a0db..e10b493dae 100644 +index d79a13ca7a..9bc842aa75 100644 --- a/libavformat/utils.c +++ b/libavformat/utils.c +@@ -2146,7 +2146,7 @@ int ff_seek_frame_binary(AVFormatContext *s, int stream_index, + int64_t target_ts, int flags) + { + const AVInputFormat *avif = s->iformat; +- int64_t pos_min = 0, pos_max = 0, pos, pos_limit; ++ int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit; + int64_t ts_min, ts_max, ts; + int index; + int64_t ret; @@ -3013,6 +3013,40 @@ static int has_codec_parameters(AVStream *st, const char **errmsg_ptr) return 1; } @@ -70988,118 +71178,96 @@ index b2d011a0db..e10b493dae 100644 } if (!options) av_dict_free(&thread_opt); -@@ -4997,7 +5054,7 @@ void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf, - key_len = ptr - key; - - callback_get_buf(context, key, key_len, &dest, &dest_len); -- dest_end = dest ? dest + dest_len - 1 : NULL; -+ dest_end = dest + dest_len - 1; - - if (*ptr == '\"') { - ptr++; -diff --git a/libavformat/vividas.c b/libavformat/vividas.c -index e253b376ab..d35a646bde 100644 ---- a/libavformat/vividas.c -+++ b/libavformat/vividas.c -@@ -683,7 +683,6 @@ static int viv_read_packet(AVFormatContext *s, - - if (viv->sb_entries[viv->current_sb_entry].flag == 0) { - uint64_t v_size = ffio_read_varlen(pb); -- int last = 0, last_start; - - if (!viv->num_audio) - return AVERROR_INVALIDDATA; -@@ -707,18 +706,12 @@ static int viv_read_packet(AVFormatContext *s, - - if (i > 0 && start == 0) - break; -- if (start < last) -- return AVERROR_INVALIDDATA; - - viv->n_audio_subpackets = i + 1; -- last = - viv->audio_subpackets[i].start = start; - viv->audio_subpackets[i].pcm_bytes = pcm_bytes; - } -- last_start = - viv->audio_subpackets[viv->n_audio_subpackets].start = (int)(off - avio_tell(pb)); -- if (last_start < last) -- return AVERROR_INVALIDDATA; - viv->current_audio_subpacket = 0; - - } else { -diff --git a/libavformat/vivo.c b/libavformat/vivo.c -index 78d1377e6b..fb58aa6178 100644 ---- a/libavformat/vivo.c -+++ b/libavformat/vivo.c -@@ -26,7 +26,6 @@ - * @sa http://wiki.multimedia.cx/index.php?title=Vivo - */ - --#include "libavutil/avstring.h" - #include "libavutil/parseutils.h" - #include "avformat.h" - #include "internal.h" -@@ -121,7 +120,7 @@ static int vivo_get_packet_header(AVFormatContext *s) - static int vivo_read_header(AVFormatContext *s) - { - VivoContext *vivo = s->priv_data; -- AVRational fps = { 0 }; -+ AVRational fps = { 1, 25}; - AVStream *ast, *vst; - unsigned char *line, *line_end, *key, *value; - long value_int; -@@ -207,21 +206,17 @@ static int vivo_read_header(AVFormatContext *s) - return AVERROR_INVALIDDATA; - value_used = 1; - } else if (!strcmp(key, "FPS")) { -- double d; -- if (av_sscanf(value, "%f", &d) != 1) -- return AVERROR_INVALIDDATA; -+ AVRational tmp; - - value_used = 1; -- if (!fps.num && !fps.den) -- fps = av_inv_q(av_d2q(d, 10000)); -+ if (!av_parse_ratio(&tmp, value, 10000, AV_LOG_WARNING, s)) -+ fps = av_inv_q(tmp); +diff --git a/libavformat/wavdec.c b/libavformat/wavdec.c +index 0fb81e7ed7..b11c6091bc 100644 +--- a/libavformat/wavdec.c ++++ b/libavformat/wavdec.c +@@ -442,7 +442,7 @@ static int wav_read_header(AVFormatContext *s) } - if (!value_used) - av_dict_set(&s->metadata, key, value, 0); + if (rf64 || bw64) { +- next_tag_ofs = wav->data_end = av_sat_add64(avio_tell(pb), data_size); ++ next_tag_ofs = wav->data_end = avio_tell(pb) + data_size; + } else if (size != 0xFFFFFFFF) { + data_size = size; + next_tag_ofs = wav->data_end = size ? next_tag_ofs : INT64_MAX; +@@ -726,10 +726,6 @@ smv_retry: + goto smv_out; + } + size = avio_rl24(s->pb); +- if (size > wav->smv_block_size) { +- ret = AVERROR_EOF; +- goto smv_out; +- } + ret = av_get_packet(s->pb, pkt, size); + if (ret < 0) + goto smv_out; +@@ -765,8 +761,6 @@ smv_out: + goto smv_retry; + return AVERROR_EOF; } +- if (INT64_MAX - left < avio_tell(s->pb)) +- return AVERROR_INVALIDDATA; + wav->data_end = avio_tell(s->pb) + left; } -- if (!fps.num || !fps.den) -- fps = (AVRational){ 1, 25 }; - avpriv_set_pts_info(ast, 64, 1, ast->codecpar->sample_rate); - avpriv_set_pts_info(vst, 64, fps.num, fps.den); -diff --git a/libavformat/webmdashenc.c b/libavformat/webmdashenc.c -index 181ae9db69..1304c1a8c3 100644 ---- a/libavformat/webmdashenc.c -+++ b/libavformat/webmdashenc.c -@@ -93,7 +93,7 @@ static int write_header(AVFormatContext *s) - } - avio_printf(pb, " minBufferTime=\"PT%gS\"\n", min_buffer_time); - avio_printf(pb, " profiles=\"%s\"%s", -- w->is_live ? "urn:mpeg:dash:profile:isoff-live:2011" : "urn:mpeg:dash:profile:webm-on-demand:2012", -+ w->is_live ? "urn:mpeg:dash:profile:isoff-live:2011" : "urn:webm:dash:profile:webm-on-demand:2012", - w->is_live ? "\n" : ">\n"); - if (w->is_live) { - time_t local_time = time(NULL); +diff --git a/libavformat/westwood_vqa.c b/libavformat/westwood_vqa.c +index c83b84c4ba..a0db854b1c 100644 +--- a/libavformat/westwood_vqa.c ++++ b/libavformat/westwood_vqa.c +@@ -163,15 +163,13 @@ static int wsvqa_read_packet(AVFormatContext *s, + int ret = -1; + uint8_t preamble[VQA_PREAMBLE_SIZE]; + uint32_t chunk_type; +- int chunk_size; +- unsigned skip_byte; ++ uint32_t chunk_size; ++ int skip_byte; + + while (avio_read(pb, preamble, VQA_PREAMBLE_SIZE) == VQA_PREAMBLE_SIZE) { + chunk_type = AV_RB32(&preamble[0]); + chunk_size = AV_RB32(&preamble[4]); + +- if (chunk_size < 0) +- return AVERROR_INVALIDDATA; + skip_byte = chunk_size & 0x01; + + if ((chunk_type == SND0_TAG) || (chunk_type == SND1_TAG) || +@@ -232,7 +230,7 @@ static int wsvqa_read_packet(AVFormatContext *s, + break; + case SND2_TAG: + /* 2 samples/byte, 1 or 2 samples per frame depending on stereo */ +- pkt->duration = (chunk_size * 2LL) / wsvqa->channels; ++ pkt->duration = (chunk_size * 2) / wsvqa->channels; + break; + } + break; +diff --git a/libavformat/xmv.c b/libavformat/xmv.c +index 6f94d20514..0c69d267de 100644 +--- a/libavformat/xmv.c ++++ b/libavformat/xmv.c +@@ -223,8 +223,6 @@ static int xmv_read_header(AVFormatContext *s) + /* Initialize the packet context */ + + xmv->next_packet_offset = avio_tell(pb); +- if (this_packet_size < xmv->next_packet_offset) +- return AVERROR_INVALIDDATA; + xmv->next_packet_size = this_packet_size - xmv->next_packet_offset; + xmv->stream_count = xmv->audio_track_count + 1; + diff --git a/libavformat/xwma.c b/libavformat/xwma.c -index 6997d5420b..aedadcf140 100644 +index 61372b08cb..6997d5420b 100644 --- a/libavformat/xwma.c +++ b/libavformat/xwma.c -@@ -278,7 +278,7 @@ static int xwma_read_header(AVFormatContext *s) - * the total duration using the average bits per sample and the - * total data length. - */ -- st->duration = av_rescale((size<<3), st->codecpar->sample_rate, st->codecpar->bit_rate); -+ st->duration = (size<<3) * st->codecpar->sample_rate / st->codecpar->bit_rate; +@@ -151,7 +151,7 @@ static int xwma_read_header(AVFormatContext *s) + st->codecpar->channels); + return AVERROR_INVALIDDATA; } - - fail: +- if (!st->codecpar->bits_per_coded_sample || st->codecpar->bits_per_coded_sample > 64) { ++ if (!st->codecpar->bits_per_coded_sample) { + av_log(s, AV_LOG_WARNING, "Invalid bits_per_coded_sample: %d\n", + st->codecpar->bits_per_coded_sample); + return AVERROR_INVALIDDATA; diff --git a/libavutil/Makefile b/libavutil/Makefile index 27bafe9e12..c9075ddf8a 100644 --- a/libavutil/Makefile @@ -73048,8 +73216,55 @@ index 0000000000..d457c10870 + +#endif // AVUTIL_ARM_SAND_NEON_H + +diff --git a/libavutil/base64.c b/libavutil/base64.c +index 80919d8bee..a1316b9438 100644 +--- a/libavutil/base64.c ++++ b/libavutil/base64.c +@@ -125,12 +125,10 @@ validity_check: + } + + out3: +- if (end - dst) +- *dst++ = v >> 10; ++ *dst++ = v >> 10; + v <<= 2; + out2: +- if (end - dst) +- *dst++ = v >> 4; ++ *dst++ = v >> 4; + out1: + out0: + return bits & 1 ? AVERROR_INVALIDDATA : out ? dst - out : 0; +diff --git a/libavutil/buffer.c b/libavutil/buffer.c +index 1785a284a2..858633e8c7 100644 +--- a/libavutil/buffer.c ++++ b/libavutil/buffer.c +@@ -250,10 +250,7 @@ AVBufferPool *av_buffer_pool_init2(buffer_size_t size, void *opaque, + if (!pool) + return NULL; + +- if (ff_mutex_init(&pool->mutex, NULL)) { +- av_free(pool); +- return NULL; +- } ++ ff_mutex_init(&pool->mutex, NULL); + + pool->size = size; + pool->opaque = opaque; +@@ -272,10 +269,7 @@ AVBufferPool *av_buffer_pool_init(buffer_size_t size, AVBufferRef* (*alloc)(buff + if (!pool) + return NULL; + +- if (ff_mutex_init(&pool->mutex, NULL)) { +- av_free(pool); +- return NULL; +- } ++ ff_mutex_init(&pool->mutex, NULL); + + pool->size = size; + pool->alloc = alloc ? alloc : av_buffer_alloc; diff --git a/libavutil/frame.c b/libavutil/frame.c -index 75e347bf2f..daa6477485 100644 +index 5418e28f3b..daa6477485 100644 --- a/libavutil/frame.c +++ b/libavutil/frame.c @@ -16,6 +16,8 @@ @@ -73084,6 +73299,15 @@ index 75e347bf2f..daa6477485 100644 desc = av_pix_fmt_desc_get(frame->format); if (!desc) return AVERROR_BUG; +@@ -937,7 +948,7 @@ int av_frame_apply_cropping(AVFrame *frame, int flags) + if (log2_crop_align < min_log2_align) + return AVERROR_BUG; + +- if (min_log2_align < 5 && log2_crop_align != INT_MAX) { ++ if (min_log2_align < 5) { + frame->crop_left &= ~((1 << (5 + log2_crop_align - min_log2_align)) - 1); + calc_cropping_offsets(offsets, frame, desc); + } diff --git a/libavutil/frame.h b/libavutil/frame.h index 7d1f8e2935..a4e7dc915d 100644 --- a/libavutil/frame.h @@ -73105,6 +73329,19 @@ index 7d1f8e2935..a4e7dc915d 100644 /** * @} */ +diff --git a/libavutil/hwcontext_d3d11va.c b/libavutil/hwcontext_d3d11va.c +index 3d4dfe1943..2a3549ebd8 100644 +--- a/libavutil/hwcontext_d3d11va.c ++++ b/libavutil/hwcontext_d3d11va.c +@@ -164,7 +164,7 @@ static AVBufferRef *wrap_texture_buf(ID3D11Texture2D *tex, int index) + desc->texture = tex; + desc->index = index; + +- buf = av_buffer_create((uint8_t *)desc, sizeof(*desc), free_texture, tex, 0); ++ buf = av_buffer_create((uint8_t *)desc, sizeof(desc), free_texture, tex, 0); + if (!buf) { + ID3D11Texture2D_Release(tex); + av_free(desc); diff --git a/libavutil/hwcontext_drm.c b/libavutil/hwcontext_drm.c index 7a9fdbd263..2f825b7e16 100644 --- a/libavutil/hwcontext_drm.c @@ -73296,6 +73533,32 @@ index 7a9fdbd263..2f825b7e16 100644 map = av_frame_alloc(); if (!map) +diff --git a/libavutil/hwcontext_dxva2.c b/libavutil/hwcontext_dxva2.c +index 88961d05e8..33b3dc80ff 100644 +--- a/libavutil/hwcontext_dxva2.c ++++ b/libavutil/hwcontext_dxva2.c +@@ -133,7 +133,7 @@ static AVBufferRef *dxva2_pool_alloc(void *opaque, buffer_size_t size) + if (s->nb_surfaces_used < hwctx->nb_surfaces) { + s->nb_surfaces_used++; + return av_buffer_create((uint8_t*)s->surfaces_internal[s->nb_surfaces_used - 1], +- sizeof(**hwctx->surfaces), dxva2_pool_release_dummy, 0, 0); ++ sizeof(*hwctx->surfaces), dxva2_pool_release_dummy, 0, 0); + } + + return NULL; +diff --git a/libavutil/imgutils.c b/libavutil/imgutils.c +index 1cdffd4dfe..bd1333170a 100644 +--- a/libavutil/imgutils.c ++++ b/libavutil/imgutils.c +@@ -297,7 +297,7 @@ int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enu + stride = 8LL*w; + stride += 128*8; + +- if (w==0 || h==0 || w > INT32_MAX || h > INT32_MAX || stride >= INT_MAX || stride*(h + 128ULL) >= INT_MAX) { ++ if ((int)w<=0 || (int)h<=0 || stride >= INT_MAX || stride*(uint64_t)(h+128) >= INT_MAX) { + av_log(&imgutils, AV_LOG_ERROR, "Picture size %ux%u is invalid\n", w, h); + return AVERROR(EINVAL); + } diff --git a/libavutil/pixdesc.c b/libavutil/pixdesc.c index 18c7a0efc8..bab13a4d50 100644 --- a/libavutil/pixdesc.c @@ -73370,6 +73633,64 @@ index 46ef211add..9195ead15f 100644 AV_PIX_FMT_X2RGB10LE, ///< packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), little-endian, X=unused/undefined AV_PIX_FMT_X2RGB10BE, ///< packed RGB 10:10:10, 30bpp, (msb)2X 10R 10G 10B(lsb), big-endian, X=unused/undefined +diff --git a/libavutil/ppc/cpu.c b/libavutil/ppc/cpu.c +index 2b13cda662..b022149fa0 100644 +--- a/libavutil/ppc/cpu.c ++++ b/libavutil/ppc/cpu.c +@@ -27,8 +27,8 @@ + #if HAVE_UNISTD_H + #include + #endif +-#elif defined(__NetBSD__) || defined(__OpenBSD__) +-#include ++#elif defined(__OpenBSD__) ++#include + #include + #include + #elif defined(__AMIGAOS4__) +@@ -56,8 +56,8 @@ int ff_get_cpu_flags_ppc(void) + if (result == VECTORTYPE_ALTIVEC) + return AV_CPU_FLAG_ALTIVEC; + return 0; +-#elif defined(__APPLE__) || defined(__NetBSD__) || defined(__OpenBSD__) +-#if defined(__NetBSD__) || defined(__OpenBSD__) ++#elif defined(__APPLE__) || defined(__OpenBSD__) ++#ifdef __OpenBSD__ + int sels[2] = {CTL_MACHDEP, CPU_ALTIVEC}; + #else + int sels[2] = {CTL_HW, HW_VECTORUNIT}; +@@ -95,15 +95,12 @@ int ff_get_cpu_flags_ppc(void) + #endif + if (ret & AV_CPU_FLAG_VSX) + av_assert0(ret & AV_CPU_FLAG_ALTIVEC); +- } +-#ifdef AT_HWCAP2 /* not introduced until glibc 2.18 */ +- else if (buf[i] == AT_HWCAP2) { ++ } else if (buf[i] == AT_HWCAP2) { + #ifdef PPC_FEATURE2_ARCH_2_07 + if (buf[i + 1] & PPC_FEATURE2_ARCH_2_07) + ret |= AV_CPU_FLAG_POWER8; + #endif + } +-#endif /* AT_HWCAP2 */ + } + } + +diff --git a/libavutil/rational.h b/libavutil/rational.h +index 790f089527..cbb08a0baf 100644 +--- a/libavutil/rational.h ++++ b/libavutil/rational.h +@@ -168,10 +168,6 @@ static av_always_inline AVRational av_inv_q(AVRational q) + * In case of infinity, the returned value is expressed as `{1, 0}` or + * `{-1, 0}` depending on the sign. + * +- * In general rational numbers with |num| <= 1<<26 && |den| <= 1<<26 +- * can be recovered exactly from their double representation. +- * (no exceptions were found within 1B random ones) +- * + * @param d `double` to convert + * @param max Maximum allowed numerator and denominator + * @return `d` in AVRational form diff --git a/libavutil/rpi_sand_fn_pw.h b/libavutil/rpi_sand_fn_pw.h new file mode 100644 index 0000000000..0d5d203dc3 @@ -74248,533 +74569,178 @@ index 0000000000..462ccb8abd + +#endif + -diff --git a/libavutil/utils.c b/libavutil/utils.c -index ea9b5097b8..c1cd452eee 100644 ---- a/libavutil/utils.c -+++ b/libavutil/utils.c -@@ -37,6 +37,10 @@ const char *av_version_info(void) - - unsigned avutil_version(void) +diff --git a/libavutil/slicethread.c b/libavutil/slicethread.c +index 45d085001c..dfbe551ef2 100644 +--- a/libavutil/slicethread.c ++++ b/libavutil/slicethread.c +@@ -98,7 +98,6 @@ int avpriv_slicethread_create(AVSliceThread **pctx, void *priv, { -+ static int checks_done; -+ if (checks_done) -+ return LIBAVUTIL_VERSION_INT; -+ - av_assert0(AV_SAMPLE_FMT_DBLP == 9); - av_assert0(AVMEDIA_TYPE_ATTACHMENT == 4); - av_assert0(AV_PICTURE_TYPE_BI == 7); -@@ -54,6 +58,7 @@ unsigned avutil_version(void) - av_log(NULL, AV_LOG_ERROR, "Libavutil has been linked to a broken llrint()\n"); - } + AVSliceThread *ctx; + int nb_workers, i; +- int ret; -+ checks_done = 1; - return LIBAVUTIL_VERSION_INT; + av_assert0(nb_threads >= 0); + if (!nb_threads) { +@@ -132,37 +131,16 @@ int avpriv_slicethread_create(AVSliceThread **pctx, void *priv, + + atomic_init(&ctx->first_job, 0); + atomic_init(&ctx->current_job, 0); +- ret = pthread_mutex_init(&ctx->done_mutex, NULL); +- if (ret) { +- av_freep(&ctx->workers); +- av_freep(pctx); +- return AVERROR(ret); +- } +- ret = pthread_cond_init(&ctx->done_cond, NULL); +- if (ret) { +- ctx->nb_threads = main_func ? 0 : 1; +- avpriv_slicethread_free(pctx); +- return AVERROR(ret); +- } ++ pthread_mutex_init(&ctx->done_mutex, NULL); ++ pthread_cond_init(&ctx->done_cond, NULL); + ctx->done = 0; + + for (i = 0; i < nb_workers; i++) { + WorkerContext *w = &ctx->workers[i]; + int ret; + w->ctx = ctx; +- ret = pthread_mutex_init(&w->mutex, NULL); +- if (ret) { +- ctx->nb_threads = main_func ? i : i + 1; +- avpriv_slicethread_free(pctx); +- return AVERROR(ret); +- } +- ret = pthread_cond_init(&w->cond, NULL); +- if (ret) { +- pthread_mutex_destroy(&w->mutex); +- ctx->nb_threads = main_func ? i : i + 1; +- avpriv_slicethread_free(pctx); +- return AVERROR(ret); +- } ++ pthread_mutex_init(&w->mutex, NULL); ++ pthread_cond_init(&w->cond, NULL); + pthread_mutex_lock(&w->mutex); + w->done = 0; + +diff --git a/libavutil/softfloat.h b/libavutil/softfloat.h +index 399ca6d682..a651406f74 100644 +--- a/libavutil/softfloat.h ++++ b/libavutil/softfloat.h +@@ -236,10 +236,6 @@ static av_always_inline SoftFloat av_sqrt_sf(SoftFloat val) + + /** + * Rounding-to-nearest used. +- * +- * @param a angle in units of (1ULL<<30)/M_PI radians +- * @param s pointer to where sine in units of (1<<30) is returned +- * @param c pointer to where cosine in units of (1<<30) is returned + */ + static av_unused void av_sincos_sf(int a, int *s, int *c) + { +@@ -285,7 +281,7 @@ static av_unused void av_sincos_sf(int a, int *s, int *c) + (int64_t)av_sintbl_4_sf[(idx & 0x1f) + 1] * (a & 0x7ff) + + 0x400) >> 11); + +- *c = (int)(((int64_t)cv * ct - (int64_t)sv * st + 0x20000000) >> 30); ++ *c = (int)(((int64_t)cv * ct + (int64_t)sv * st + 0x20000000) >> 30); + + *s = (int)(((int64_t)cv * st + (int64_t)sv * ct + 0x20000000) >> 30); + } +diff --git a/libavutil/tests/base64.c b/libavutil/tests/base64.c +index 66d0fdc1fc..400e01cefe 100644 +--- a/libavutil/tests/base64.c ++++ b/libavutil/tests/base64.c +@@ -64,16 +64,6 @@ static int test_encode_decode(const uint8_t *data, unsigned int data_size, + printf("Failed: decode to NULL buffer\n"); + return 1; + } +- if (data_size > 0 && (data2_size = av_base64_decode(data2, encoded, data_size - 1)) != data_size - 1) { +- printf("Failed: out of array write\n" +- "Encoded:\n%s\n", encoded); +- return 1; +- } +- if (data_size > 1 && (data2_size = av_base64_decode(data2, encoded, data_size - 2)) != data_size - 2) { +- printf("Failed: out of array write\n" +- "Encoded:\n%s\n", encoded); +- return 1; +- } + if (strlen(encoded)) { + char *end = strchr(encoded, '='); + if (!end) +diff --git a/libavutil/tests/dict.c b/libavutil/tests/dict.c +index 4c526f72f4..56e98557a7 100644 +--- a/libavutil/tests/dict.c ++++ b/libavutil/tests/dict.c +@@ -122,15 +122,12 @@ int main(void) + + //valgrind sensible test + printf("\nTesting av_dict_set() with existing AVDictionaryEntry.key as key\n"); +- if (av_dict_set(&dict, "key", "old", 0) < 0) +- return 1; ++ av_dict_set(&dict, "key", "old", 0); + e = av_dict_get(dict, "key", NULL, 0); +- if (av_dict_set(&dict, e->key, "new val OK", 0) < 0) +- return 1; ++ av_dict_set(&dict, e->key, "new val OK", 0); + e = av_dict_get(dict, "key", NULL, 0); + printf("%s\n", e->value); +- if (av_dict_set(&dict, e->key, e->value, 0) < 0) +- return 1; ++ av_dict_set(&dict, e->key, e->value, 0); + e = av_dict_get(dict, "key", NULL, 0); + printf("%s\n", e->value); + av_dict_free(&dict); +diff --git a/libavutil/tests/opt.c b/libavutil/tests/opt.c +index cdbe66e336..3134ffd354 100644 +--- a/libavutil/tests/opt.c ++++ b/libavutil/tests/opt.c +@@ -216,7 +216,6 @@ int main(void) + { + TestContext test_ctx = { 0 }; + char *buf; +- int ret; + test_ctx.class = &test_class; + + av_log_set_level(AV_LOG_QUIET); +@@ -227,10 +226,8 @@ int main(void) + av_opt_free(&test_ctx); + memset(&test_ctx, 0, sizeof(test_ctx)); + test_ctx.class = &test_class; +- ret = av_set_options_string(&test_ctx, buf, "=", ","); ++ av_set_options_string(&test_ctx, buf, "=", ","); + av_free(buf); +- if (ret < 0) +- printf("Error ret '%d'\n", ret); + if (av_opt_serialize(&test_ctx, 0, 0, &buf, '=', ',') >= 0) { + printf("%s\n", buf); + av_free(buf); +diff --git a/libavutil/tests/softfloat.c b/libavutil/tests/softfloat.c +index a2e628fe81..c06de44933 100644 +--- a/libavutil/tests/softfloat.c ++++ b/libavutil/tests/softfloat.c +@@ -148,7 +148,7 @@ int main(void){ + av_sincos_sf(i*(1ULL<<32)/36/4, &s, &c); + errs = (double)s/ (1<<30) - sin(i*M_PI/36); + errc = (double)c/ (1<<30) - cos(i*M_PI/36); +- if (fabs(errs) > 0.000000004 || fabs(errc) >0.000000004) { ++ if (fabs(errs) > 0.00000002 || fabs(errc) >0.001) { + printf("sincos FAIL %d %f %f %f %f\n", i, (float)s/ (1<<30), (float)c/ (1<<30), sin(i*M_PI/36), cos(i*M_PI/36)); + } + +diff --git a/libavutil/timecode.c b/libavutil/timecode.c +index af21d88e11..2fc3295e25 100644 +--- a/libavutil/timecode.c ++++ b/libavutil/timecode.c +@@ -208,7 +208,7 @@ static int fps_from_frame_rate(AVRational rate) + { + if (!rate.den || !rate.num) + return -1; +- return (rate.num + rate.den/2LL) / rate.den; ++ return (rate.num + rate.den/2) / rate.den; } -diff --git a/libswscale/aarch64/rgb2rgb.c b/libswscale/aarch64/rgb2rgb.c -index a9bf6ff9e0..6a0e2dcc09 100644 ---- a/libswscale/aarch64/rgb2rgb.c -+++ b/libswscale/aarch64/rgb2rgb.c -@@ -30,6 +30,12 @@ - void ff_interleave_bytes_neon(const uint8_t *src1, const uint8_t *src2, - uint8_t *dest, int width, int height, - int src1Stride, int src2Stride, int dstStride); -+void ff_bgr24toyv12_aarch64(const uint8_t *src, uint8_t *ydst, uint8_t *udst, -+ uint8_t *vdst, int width, int height, int lumStride, -+ int chromStride, int srcStride, int32_t *rgb2yuv); -+void ff_rgb24toyv12_aarch64(const uint8_t *src, uint8_t *ydst, uint8_t *udst, -+ uint8_t *vdst, int width, int height, int lumStride, -+ int chromStride, int srcStride, int32_t *rgb2yuv); - - av_cold void rgb2rgb_init_aarch64(void) - { -@@ -37,5 +43,7 @@ av_cold void rgb2rgb_init_aarch64(void) - - if (have_neon(cpu_flags)) { - interleaveBytes = ff_interleave_bytes_neon; -+ ff_rgb24toyv12 = ff_rgb24toyv12_aarch64; -+ ff_bgr24toyv12 = ff_bgr24toyv12_aarch64; - } - } -diff --git a/libswscale/aarch64/rgb2rgb_neon.S b/libswscale/aarch64/rgb2rgb_neon.S -index d81110ec57..978ab443ea 100644 ---- a/libswscale/aarch64/rgb2rgb_neon.S -+++ b/libswscale/aarch64/rgb2rgb_neon.S -@@ -77,3 +77,448 @@ function ff_interleave_bytes_neon, export=1 - 0: - ret - endfunc -+ -+// void ff_rgb24toyv12_aarch64( -+// const uint8_t *src, // x0 -+// uint8_t *ydst, // x1 -+// uint8_t *udst, // x2 -+// uint8_t *vdst, // x3 -+// int width, // w4 -+// int height, // w5 -+// int lumStride, // w6 -+// int chromStride, // w7 -+// int srcStr, // [sp, #0] -+// int32_t *rgb2yuv); // [sp, #8] -+ -+function ff_rgb24toyv12_aarch64, export=1 -+ ldr x15, [sp, #8] -+ ld1 {v3.s}[2], [x15], #4 -+ ld1 {v3.s}[1], [x15], #4 -+ ld1 {v3.s}[0], [x15], #4 -+ ld1 {v4.s}[2], [x15], #4 -+ ld1 {v4.s}[1], [x15], #4 -+ ld1 {v4.s}[0], [x15], #4 -+ ld1 {v5.s}[2], [x15], #4 -+ ld1 {v5.s}[1], [x15], #4 -+ ld1 {v5.s}[0], [x15] -+ b 99f -+endfunc -+ -+// void ff_bgr24toyv12_aarch64( -+// const uint8_t *src, // x0 -+// uint8_t *ydst, // x1 -+// uint8_t *udst, // x2 -+// uint8_t *vdst, // x3 -+// int width, // w4 -+// int height, // w5 -+// int lumStride, // w6 -+// int chromStride, // w7 -+// int srcStr, // [sp, #0] -+// int32_t *rgb2yuv); // [sp, #8] -+ -+// regs -+// v0-2 Src bytes - reused as chroma src -+// v3-5 Coeffs (packed very inefficiently - could be squashed) -+// v6 128b -+// v7 128h -+// v8-15 Reserved -+// v16-18 Lo Src expanded as H -+// v19 - -+// v20-22 Hi Src expanded as H -+// v23 - -+// v24 U out -+// v25 U tmp -+// v26 Y out -+// v27-29 Y tmp -+// v30 V out -+// v31 V tmp -+ -+// Assumes Little Endian in tail stores & conversion matrix -+ -+function ff_bgr24toyv12_aarch64, export=1 -+ ldr x15, [sp, #8] -+ ld3 {v3.s, v4.s, v5.s}[0], [x15], #12 -+ ld3 {v3.s, v4.s, v5.s}[1], [x15], #12 -+ ld3 {v3.s, v4.s, v5.s}[2], [x15] -+99: -+ ldr w14, [sp, #0] -+ movi v7.8b, #128 -+ uxtl v6.8h, v7.8b -+ // Ensure if nothing to do then we do nothing -+ cmp w4, #0 -+ b.le 90f -+ cmp w5, #0 -+ b.le 90f -+ // If w % 16 != 0 then -16 so we do main loop 1 fewer times with -+ // the remainder done in the tail -+ tst w4, #15 -+ b.eq 1f -+ sub w4, w4, #16 -+1: -+ -+// -------------------- Even line body - YUV -+11: -+ subs w9, w4, #0 -+ mov x10, x0 -+ mov x11, x1 -+ mov x12, x2 -+ mov x13, x3 -+ b.lt 12f -+ -+ ld3 {v0.16b, v1.16b, v2.16b}, [x10], #48 -+ subs w9, w9, #16 -+ b.le 13f -+ -+10: -+ uxtl v16.8h, v0.8b -+ uxtl v17.8h, v1.8b -+ uxtl v18.8h, v2.8b -+ -+ uxtl2 v20.8h, v0.16b -+ uxtl2 v21.8h, v1.16b -+ uxtl2 v22.8h, v2.16b -+ -+ bic v0.8h, #0xff, LSL #8 -+ bic v1.8h, #0xff, LSL #8 -+ bic v2.8h, #0xff, LSL #8 -+ -+ // Testing shows it is faster to stack the smull/smlal ops together -+ // rather than interleave them between channels and indeed even the -+ // shift/add sections seem happier not interleaved -+ -+ // Y0 -+ smull v26.4s, v16.4h, v3.h[0] -+ smlal v26.4s, v17.4h, v4.h[0] -+ smlal v26.4s, v18.4h, v5.h[0] -+ smull2 v27.4s, v16.8h, v3.h[0] -+ smlal2 v27.4s, v17.8h, v4.h[0] -+ smlal2 v27.4s, v18.8h, v5.h[0] -+ // Y1 -+ smull v28.4s, v20.4h, v3.h[0] -+ smlal v28.4s, v21.4h, v4.h[0] -+ smlal v28.4s, v22.4h, v5.h[0] -+ smull2 v29.4s, v20.8h, v3.h[0] -+ smlal2 v29.4s, v21.8h, v4.h[0] -+ smlal2 v29.4s, v22.8h, v5.h[0] -+ shrn v26.4h, v26.4s, #12 -+ shrn2 v26.8h, v27.4s, #12 -+ add v26.8h, v26.8h, v6.8h // +128 (>> 3 = 16) -+ uqrshrn v26.8b, v26.8h, #3 -+ shrn v28.4h, v28.4s, #12 -+ shrn2 v28.8h, v29.4s, #12 -+ add v28.8h, v28.8h, v6.8h -+ uqrshrn2 v26.16b, v28.8h, #3 -+ // Y0/Y1 -+ -+ // U -+ // Vector subscript *2 as we loaded into S but are only using H -+ smull v24.4s, v0.4h, v3.h[2] -+ smlal v24.4s, v1.4h, v4.h[2] -+ smlal v24.4s, v2.4h, v5.h[2] -+ smull2 v25.4s, v0.8h, v3.h[2] -+ smlal2 v25.4s, v1.8h, v4.h[2] -+ smlal2 v25.4s, v2.8h, v5.h[2] -+ -+ // V -+ smull v30.4s, v0.4h, v3.h[4] -+ smlal v30.4s, v1.4h, v4.h[4] -+ smlal v30.4s, v2.4h, v5.h[4] -+ smull2 v31.4s, v0.8h, v3.h[4] -+ smlal2 v31.4s, v1.8h, v4.h[4] -+ smlal2 v31.4s, v2.8h, v5.h[4] -+ -+ ld3 {v0.16b, v1.16b, v2.16b}, [x10], #48 -+ -+ shrn v24.4h, v24.4s, #14 -+ shrn2 v24.8h, v25.4s, #14 -+ sqrshrn v24.8b, v24.8h, #1 -+ add v24.8b, v24.8b, v7.8b // +128 -+ shrn v30.4h, v30.4s, #14 -+ shrn2 v30.8h, v31.4s, #14 -+ sqrshrn v30.8b, v30.8h, #1 -+ add v30.8b, v30.8b, v7.8b // +128 -+ -+ subs w9, w9, #16 -+ -+ st1 {v26.16b}, [x11], #16 -+ st1 {v24.8b}, [x12], #8 -+ st1 {v30.8b}, [x13], #8 -+ -+ b.gt 10b -+ -+// -------------------- Even line tail - YUV -+// If width % 16 == 0 then simply runs once with preloaded RGB -+// If other then deals with preload & then does remaining tail -+ -+13: -+ // Body is simple copy of main loop body minus preload -+ -+ uxtl v16.8h, v0.8b -+ uxtl v17.8h, v1.8b -+ uxtl v18.8h, v2.8b -+ -+ uxtl2 v20.8h, v0.16b -+ uxtl2 v21.8h, v1.16b -+ uxtl2 v22.8h, v2.16b -+ -+ bic v0.8h, #0xff, LSL #8 -+ bic v1.8h, #0xff, LSL #8 -+ bic v2.8h, #0xff, LSL #8 -+ -+ // Y0 -+ smull v26.4s, v16.4h, v3.h[0] -+ smlal v26.4s, v17.4h, v4.h[0] -+ smlal v26.4s, v18.4h, v5.h[0] -+ smull2 v27.4s, v16.8h, v3.h[0] -+ smlal2 v27.4s, v17.8h, v4.h[0] -+ smlal2 v27.4s, v18.8h, v5.h[0] -+ // Y1 -+ smull v28.4s, v20.4h, v3.h[0] -+ smlal v28.4s, v21.4h, v4.h[0] -+ smlal v28.4s, v22.4h, v5.h[0] -+ smull2 v29.4s, v20.8h, v3.h[0] -+ smlal2 v29.4s, v21.8h, v4.h[0] -+ smlal2 v29.4s, v22.8h, v5.h[0] -+ shrn v26.4h, v26.4s, #12 -+ shrn2 v26.8h, v27.4s, #12 -+ add v26.8h, v26.8h, v6.8h // +128 (>> 3 = 16) -+ uqrshrn v26.8b, v26.8h, #3 -+ shrn v28.4h, v28.4s, #12 -+ shrn2 v28.8h, v29.4s, #12 -+ add v28.8h, v28.8h, v6.8h -+ uqrshrn2 v26.16b, v28.8h, #3 -+ // Y0/Y1 -+ -+ // U -+ // Vector subscript *2 as we loaded into S but are only using H -+ smull v24.4s, v0.4h, v3.h[2] -+ smlal v24.4s, v1.4h, v4.h[2] -+ smlal v24.4s, v2.4h, v5.h[2] -+ smull2 v25.4s, v0.8h, v3.h[2] -+ smlal2 v25.4s, v1.8h, v4.h[2] -+ smlal2 v25.4s, v2.8h, v5.h[2] -+ -+ // V -+ smull v30.4s, v0.4h, v3.h[4] -+ smlal v30.4s, v1.4h, v4.h[4] -+ smlal v30.4s, v2.4h, v5.h[4] -+ smull2 v31.4s, v0.8h, v3.h[4] -+ smlal2 v31.4s, v1.8h, v4.h[4] -+ smlal2 v31.4s, v2.8h, v5.h[4] -+ -+ cmp w9, #-16 -+ -+ shrn v24.4h, v24.4s, #14 -+ shrn2 v24.8h, v25.4s, #14 -+ sqrshrn v24.8b, v24.8h, #1 -+ add v24.8b, v24.8b, v7.8b // +128 -+ shrn v30.4h, v30.4s, #14 -+ shrn2 v30.8h, v31.4s, #14 -+ sqrshrn v30.8b, v30.8h, #1 -+ add v30.8b, v30.8b, v7.8b // +128 -+ -+ // Here: -+ // w9 == 0 width % 16 == 0, tail done -+ // w9 > -16 1st tail done (16 pels), remainder still to go -+ // w9 == -16 shouldn't happen -+ // w9 > -32 2nd tail done -+ // w9 <= -32 shouldn't happen -+ -+ b.lt 2f -+ st1 {v26.16b}, [x11], #16 -+ st1 {v24.8b}, [x12], #8 -+ st1 {v30.8b}, [x13], #8 -+ cbz w9, 3f -+ -+12: -+ sub w9, w9, #16 -+ -+ tbz w9, #3, 1f -+ ld3 {v0.8b, v1.8b, v2.8b}, [x10], #24 -+1: tbz w9, #2, 1f -+ ld3 {v0.b, v1.b, v2.b}[8], [x10], #3 -+ ld3 {v0.b, v1.b, v2.b}[9], [x10], #3 -+ ld3 {v0.b, v1.b, v2.b}[10], [x10], #3 -+ ld3 {v0.b, v1.b, v2.b}[11], [x10], #3 -+1: tbz w9, #1, 1f -+ ld3 {v0.b, v1.b, v2.b}[12], [x10], #3 -+ ld3 {v0.b, v1.b, v2.b}[13], [x10], #3 -+1: tbz w9, #0, 13b -+ ld3 {v0.b, v1.b, v2.b}[14], [x10], #3 -+ b 13b -+ -+2: -+ tbz w9, #3, 1f -+ st1 {v26.8b}, [x11], #8 -+ st1 {v24.s}[0], [x12], #4 -+ st1 {v30.s}[0], [x13], #4 -+1: tbz w9, #2, 1f -+ st1 {v26.s}[2], [x11], #4 -+ st1 {v24.h}[2], [x12], #2 -+ st1 {v30.h}[2], [x13], #2 -+1: tbz w9, #1, 1f -+ st1 {v26.h}[6], [x11], #2 -+ st1 {v24.b}[6], [x12], #1 -+ st1 {v30.b}[6], [x13], #1 -+1: tbz w9, #0, 1f -+ st1 {v26.b}[14], [x11] -+ st1 {v24.b}[7], [x12] -+ st1 {v30.b}[7], [x13] -+1: -+3: -+ -+// -------------------- Odd line body - Y only -+ -+ subs w5, w5, #1 -+ b.eq 90f -+ -+ subs w9, w4, #0 -+ add x0, x0, w14, SXTX -+ add x1, x1, w6, SXTX -+ mov x10, x0 -+ mov x11, x1 -+ b.lt 12f -+ -+ ld3 {v0.16b, v1.16b, v2.16b}, [x10], #48 -+ subs w9, w9, #16 -+ b.le 13f -+ -+10: -+ uxtl v16.8h, v0.8b -+ uxtl v17.8h, v1.8b -+ uxtl v18.8h, v2.8b -+ -+ uxtl2 v20.8h, v0.16b -+ uxtl2 v21.8h, v1.16b -+ uxtl2 v22.8h, v2.16b -+ -+ // Testing shows it is faster to stack the smull/smlal ops together -+ // rather than interleave them between channels and indeed even the -+ // shift/add sections seem happier not interleaved -+ -+ // Y0 -+ smull v26.4s, v16.4h, v3.h[0] -+ smlal v26.4s, v17.4h, v4.h[0] -+ smlal v26.4s, v18.4h, v5.h[0] -+ smull2 v27.4s, v16.8h, v3.h[0] -+ smlal2 v27.4s, v17.8h, v4.h[0] -+ smlal2 v27.4s, v18.8h, v5.h[0] -+ // Y1 -+ smull v28.4s, v20.4h, v3.h[0] -+ smlal v28.4s, v21.4h, v4.h[0] -+ smlal v28.4s, v22.4h, v5.h[0] -+ smull2 v29.4s, v20.8h, v3.h[0] -+ smlal2 v29.4s, v21.8h, v4.h[0] -+ smlal2 v29.4s, v22.8h, v5.h[0] -+ -+ ld3 {v0.16b, v1.16b, v2.16b}, [x10], #48 -+ -+ shrn v26.4h, v26.4s, #12 -+ shrn2 v26.8h, v27.4s, #12 -+ add v26.8h, v26.8h, v6.8h // +128 (>> 3 = 16) -+ uqrshrn v26.8b, v26.8h, #3 -+ shrn v28.4h, v28.4s, #12 -+ shrn2 v28.8h, v29.4s, #12 -+ add v28.8h, v28.8h, v6.8h -+ uqrshrn2 v26.16b, v28.8h, #3 -+ // Y0/Y1 -+ -+ subs w9, w9, #16 -+ -+ st1 {v26.16b}, [x11], #16 -+ -+ b.gt 10b -+ -+// -------------------- Odd line tail - Y -+// If width % 16 == 0 then simply runs once with preloaded RGB -+// If other then deals with preload & then does remaining tail -+ -+13: -+ // Body is simple copy of main loop body minus preload -+ -+ uxtl v16.8h, v0.8b -+ uxtl v17.8h, v1.8b -+ uxtl v18.8h, v2.8b -+ -+ uxtl2 v20.8h, v0.16b -+ uxtl2 v21.8h, v1.16b -+ uxtl2 v22.8h, v2.16b -+ -+ // Y0 -+ smull v26.4s, v16.4h, v3.h[0] -+ smlal v26.4s, v17.4h, v4.h[0] -+ smlal v26.4s, v18.4h, v5.h[0] -+ smull2 v27.4s, v16.8h, v3.h[0] -+ smlal2 v27.4s, v17.8h, v4.h[0] -+ smlal2 v27.4s, v18.8h, v5.h[0] -+ // Y1 -+ smull v28.4s, v20.4h, v3.h[0] -+ smlal v28.4s, v21.4h, v4.h[0] -+ smlal v28.4s, v22.4h, v5.h[0] -+ smull2 v29.4s, v20.8h, v3.h[0] -+ smlal2 v29.4s, v21.8h, v4.h[0] -+ smlal2 v29.4s, v22.8h, v5.h[0] -+ -+ cmp w9, #-16 -+ -+ shrn v26.4h, v26.4s, #12 -+ shrn2 v26.8h, v27.4s, #12 -+ add v26.8h, v26.8h, v6.8h // +128 (>> 3 = 16) -+ uqrshrn v26.8b, v26.8h, #3 -+ shrn v28.4h, v28.4s, #12 -+ shrn2 v28.8h, v29.4s, #12 -+ add v28.8h, v28.8h, v6.8h -+ uqrshrn2 v26.16b, v28.8h, #3 -+ // Y0/Y1 -+ -+ // Here: -+ // w9 == 0 width % 16 == 0, tail done -+ // w9 > -16 1st tail done (16 pels), remainder still to go -+ // w9 == -16 shouldn't happen -+ // w9 > -32 2nd tail done -+ // w9 <= -32 shouldn't happen -+ -+ b.lt 2f -+ st1 {v26.16b}, [x11], #16 -+ cbz w9, 3f -+ -+12: -+ sub w9, w9, #16 -+ -+ tbz w9, #3, 1f -+ ld3 {v0.8b, v1.8b, v2.8b}, [x10], #24 -+1: tbz w9, #2, 1f -+ ld3 {v0.b, v1.b, v2.b}[8], [x10], #3 -+ ld3 {v0.b, v1.b, v2.b}[9], [x10], #3 -+ ld3 {v0.b, v1.b, v2.b}[10], [x10], #3 -+ ld3 {v0.b, v1.b, v2.b}[11], [x10], #3 -+1: tbz w9, #1, 1f -+ ld3 {v0.b, v1.b, v2.b}[12], [x10], #3 -+ ld3 {v0.b, v1.b, v2.b}[13], [x10], #3 -+1: tbz w9, #0, 13b -+ ld3 {v0.b, v1.b, v2.b}[14], [x10], #3 -+ b 13b -+ -+2: -+ tbz w9, #3, 1f -+ st1 {v26.8b}, [x11], #8 -+1: tbz w9, #2, 1f -+ st1 {v26.s}[2], [x11], #4 -+1: tbz w9, #1, 1f -+ st1 {v26.h}[6], [x11], #2 -+1: tbz w9, #0, 1f -+ st1 {v26.b}[14], [x11] -+1: -+3: -+ -+// ------------------- Loop to start -+ -+ add x0, x0, w14, SXTX -+ add x1, x1, w6, SXTX -+ add x2, x2, w7, SXTX -+ add x3, x3, w7, SXTX -+ subs w5, w5, #1 -+ b.gt 11b -+90: -+ ret -+endfunc -diff --git a/libswscale/aarch64/yuv2rgb_neon.S b/libswscale/aarch64/yuv2rgb_neon.S -index f341268c5d..f4b220fb60 100644 ---- a/libswscale/aarch64/yuv2rgb_neon.S -+++ b/libswscale/aarch64/yuv2rgb_neon.S -@@ -118,8 +118,8 @@ - .endm - - .macro increment_yuv422p -- add x6, x6, w7, SXTW // srcU += incU -- add x13, x13, w14, SXTW // srcV += incV -+ add x6, x6, w7, UXTW // srcU += incU -+ add x13, x13, w14, UXTW // srcV += incV - .endm - - .macro compute_rgba r1 g1 b1 a1 r2 g2 b2 a2 -@@ -189,8 +189,8 @@ function ff_\ifmt\()_to_\ofmt\()_neon, export=1 - st4 {v16.8B,v17.8B,v18.8B,v19.8B}, [x2], #32 - subs w8, w8, #16 // width -= 16 - b.gt 2b -- add x2, x2, w3, SXTW // dst += padding -- add x4, x4, w5, SXTW // srcY += paddingY -+ add x2, x2, w3, UXTW // dst += padding -+ add x4, x4, w5, UXTW // srcY += paddingY - increment_\ifmt - subs w1, w1, #1 // height -= 1 - b.gt 1b + int av_timecode_check_frame_rate(AVRational rate) diff --git a/libswscale/input.c b/libswscale/input.c index 197152f65b..6850801a44 100644 --- a/libswscale/input.c @@ -74825,9 +74791,32 @@ index 197152f65b..6850801a44 100644 dstU[i] = (ru*r + gu*g + bu*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT; dstV[i] = (rv*r + gv*g + bv*b + (0x10001 << (RGB2YUV_SHIFT - 1))) >> RGB2YUV_SHIFT; diff --git a/libswscale/output.c b/libswscale/output.c -index aef0e7f82a..e855ad606a 100644 +index b04f5a6549..e855ad606a 100644 --- a/libswscale/output.c +++ b/libswscale/output.c +@@ -1002,8 +1002,8 @@ yuv2rgba64_X_c_template(SwsContext *c, const int16_t *lumFilter, + + for (i = 0; i < ((dstW + 1) >> 1); i++) { + int j; +- unsigned Y1 = -0x40000000; +- unsigned Y2 = -0x40000000; ++ int Y1 = -0x40000000; ++ int Y2 = -0x40000000; + int U = -(128 << 23); // 19 + int V = -(128 << 23); + int R, G, B; +@@ -1031,9 +1031,9 @@ yuv2rgba64_X_c_template(SwsContext *c, const int16_t *lumFilter, + } + + // 8 bits: 12+15=27; 16 bits: 12+19=31 +- Y1 = (int)Y1 >> 14; // 10 ++ Y1 >>= 14; // 10 + Y1 += 0x10000; +- Y2 = (int)Y2 >> 14; ++ Y2 >>= 14; + Y2 += 0x10000; + U >>= 14; + V >>= 14; @@ -1043,8 +1043,8 @@ yuv2rgba64_X_c_template(SwsContext *c, const int16_t *lumFilter, Y2 -= c->yuv2rgb_y_offset; Y1 *= c->yuv2rgb_y_coeff; @@ -74843,26 +74832,26 @@ index aef0e7f82a..e855ad606a 100644 B = U * c->yuv2rgb_u2b_coeff; // 8 bits: 30 - 22 = 8 bits, 16 bits: 30 bits - 14 = 16 bits -- output_pixel(&dest[0], av_clip_uintp2(((R_B + Y1) >> 14) + (1<<15), 16)); -- output_pixel(&dest[1], av_clip_uintp2((( G + Y1) >> 14) + (1<<15), 16)); -- output_pixel(&dest[2], av_clip_uintp2(((B_R + Y1) >> 14) + (1<<15), 16)); +- output_pixel(&dest[0], av_clip_uintp2(((int)(R_B + Y1) >> 14) + (1<<15), 16)); +- output_pixel(&dest[1], av_clip_uintp2(((int)( G + Y1) >> 14) + (1<<15), 16)); +- output_pixel(&dest[2], av_clip_uintp2(((int)(B_R + Y1) >> 14) + (1<<15), 16)); + output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); + output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); + output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); if (eightbytes) { output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14); -- output_pixel(&dest[4], av_clip_uintp2(((R_B + Y2) >> 14) + (1<<15), 16)); -- output_pixel(&dest[5], av_clip_uintp2((( G + Y2) >> 14) + (1<<15), 16)); -- output_pixel(&dest[6], av_clip_uintp2(((B_R + Y2) >> 14) + (1<<15), 16)); +- output_pixel(&dest[4], av_clip_uintp2(((int)(R_B + Y2) >> 14) + (1<<15), 16)); +- output_pixel(&dest[5], av_clip_uintp2(((int)( G + Y2) >> 14) + (1<<15), 16)); +- output_pixel(&dest[6], av_clip_uintp2(((int)(B_R + Y2) >> 14) + (1<<15), 16)); + output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14); + output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14); + output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14); output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14); dest += 8; } else { -- output_pixel(&dest[3], av_clip_uintp2(((R_B + Y2) >> 14) + (1<<15), 16)); -- output_pixel(&dest[4], av_clip_uintp2((( G + Y2) >> 14) + (1<<15), 16)); -- output_pixel(&dest[5], av_clip_uintp2(((B_R + Y2) >> 14) + (1<<15), 16)); +- output_pixel(&dest[3], av_clip_uintp2(((int)(R_B + Y2) >> 14) + (1<<15), 16)); +- output_pixel(&dest[4], av_clip_uintp2(((int)( G + Y2) >> 14) + (1<<15), 16)); +- output_pixel(&dest[5], av_clip_uintp2(((int)(B_R + Y2) >> 14) + (1<<15), 16)); + output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); + output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); + output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); @@ -74910,7 +74899,18 @@ index aef0e7f82a..e855ad606a 100644 dest += 6; } } -@@ -1158,8 +1158,8 @@ yuv2rgba64_1_c_template(SwsContext *c, const int32_t *buf0, +@@ -1148,8 +1148,8 @@ yuv2rgba64_1_c_template(SwsContext *c, const int32_t *buf0, + + if (uvalpha < 2048) { + for (i = 0; i < ((dstW + 1) >> 1); i++) { +- SUINT Y1 = (buf0[i * 2] ) >> 2; +- SUINT Y2 = (buf0[i * 2 + 1]) >> 2; ++ int Y1 = (buf0[i * 2] ) >> 2; ++ int Y2 = (buf0[i * 2 + 1]) >> 2; + int U = (ubuf0[i] - (128 << 11)) >> 2; + int V = (vbuf0[i] - (128 << 11)) >> 2; + int R, G, B; +@@ -1158,12 +1158,12 @@ yuv2rgba64_1_c_template(SwsContext *c, const int32_t *buf0, Y2 -= c->yuv2rgb_y_offset; Y1 *= c->yuv2rgb_y_coeff; Y2 *= c->yuv2rgb_y_coeff; @@ -74920,38 +74920,55 @@ index aef0e7f82a..e855ad606a 100644 + Y2 += 1 << 13; if (hasAlpha) { - A1 = abuf0[i * 2 ] << 11; +- A1 = abuf0[i * 2 ] * (1 << 11); +- A2 = abuf0[i * 2 + 1] * (1 << 11); ++ A1 = abuf0[i * 2 ] << 11; ++ A2 = abuf0[i * 2 + 1] << 11; + + A1 += 1 << 13; + A2 += 1 << 13; @@ -1173,20 +1173,20 @@ yuv2rgba64_1_c_template(SwsContext *c, const int32_t *buf0, G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; B = U * c->yuv2rgb_u2b_coeff; -- output_pixel(&dest[0], av_clip_uintp2(((R_B + Y1) >> 14) + (1<<15), 16)); -- output_pixel(&dest[1], av_clip_uintp2((( G + Y1) >> 14) + (1<<15), 16)); -- output_pixel(&dest[2], av_clip_uintp2(((B_R + Y1) >> 14) + (1<<15), 16)); +- output_pixel(&dest[0], av_clip_uintp2(((int)(R_B + Y1) >> 14) + (1<<15), 16)); +- output_pixel(&dest[1], av_clip_uintp2(((int)( G + Y1) >> 14) + (1<<15), 16)); +- output_pixel(&dest[2], av_clip_uintp2(((int)(B_R + Y1) >> 14) + (1<<15), 16)); + output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); + output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); + output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); if (eightbytes) { output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14); -- output_pixel(&dest[4], av_clip_uintp2(((R_B + Y2) >> 14) + (1<<15), 16)); -- output_pixel(&dest[5], av_clip_uintp2((( G + Y2) >> 14) + (1<<15), 16)); -- output_pixel(&dest[6], av_clip_uintp2(((B_R + Y2) >> 14) + (1<<15), 16)); +- output_pixel(&dest[4], av_clip_uintp2(((int)(R_B + Y2) >> 14) + (1<<15), 16)); +- output_pixel(&dest[5], av_clip_uintp2(((int)( G + Y2) >> 14) + (1<<15), 16)); +- output_pixel(&dest[6], av_clip_uintp2(((int)(B_R + Y2) >> 14) + (1<<15), 16)); + output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14); + output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14); + output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14); output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14); dest += 8; } else { -- output_pixel(&dest[3], av_clip_uintp2(((R_B + Y2) >> 14) + (1<<15), 16)); -- output_pixel(&dest[4], av_clip_uintp2((( G + Y2) >> 14) + (1<<15), 16)); -- output_pixel(&dest[5], av_clip_uintp2(((B_R + Y2) >> 14) + (1<<15), 16)); +- output_pixel(&dest[3], av_clip_uintp2(((int)(R_B + Y2) >> 14) + (1<<15), 16)); +- output_pixel(&dest[4], av_clip_uintp2(((int)( G + Y2) >> 14) + (1<<15), 16)); +- output_pixel(&dest[5], av_clip_uintp2(((int)(B_R + Y2) >> 14) + (1<<15), 16)); + output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); + output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); + output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); dest += 6; } } -@@ -1204,8 +1204,8 @@ yuv2rgba64_1_c_template(SwsContext *c, const int32_t *buf0, +@@ -1194,8 +1194,8 @@ yuv2rgba64_1_c_template(SwsContext *c, const int32_t *buf0, + const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1]; + int A1 = 0xffff<<14, A2 = 0xffff<<14; + for (i = 0; i < ((dstW + 1) >> 1); i++) { +- SUINT Y1 = (buf0[i * 2] ) >> 2; +- SUINT Y2 = (buf0[i * 2 + 1]) >> 2; ++ int Y1 = (buf0[i * 2] ) >> 2; ++ int Y2 = (buf0[i * 2 + 1]) >> 2; + int U = (ubuf0[i] + ubuf1[i] - (128 << 12)) >> 3; + int V = (vbuf0[i] + vbuf1[i] - (128 << 12)) >> 3; + int R, G, B; +@@ -1204,12 +1204,12 @@ yuv2rgba64_1_c_template(SwsContext *c, const int32_t *buf0, Y2 -= c->yuv2rgb_y_offset; Y1 *= c->yuv2rgb_y_coeff; Y2 *= c->yuv2rgb_y_coeff; @@ -74961,31 +74978,37 @@ index aef0e7f82a..e855ad606a 100644 + Y2 += 1 << 13; if (hasAlpha) { - A1 = abuf0[i * 2 ] << 11; +- A1 = abuf0[i * 2 ] * (1 << 11); +- A2 = abuf0[i * 2 + 1] * (1 << 11); ++ A1 = abuf0[i * 2 ] << 11; ++ A2 = abuf0[i * 2 + 1] << 11; + + A1 += 1 << 13; + A2 += 1 << 13; @@ -1219,20 +1219,20 @@ yuv2rgba64_1_c_template(SwsContext *c, const int32_t *buf0, G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; B = U * c->yuv2rgb_u2b_coeff; -- output_pixel(&dest[0], av_clip_uintp2(((R_B + Y1) >> 14) + (1<<15), 16)); -- output_pixel(&dest[1], av_clip_uintp2((( G + Y1) >> 14) + (1<<15), 16)); -- output_pixel(&dest[2], av_clip_uintp2(((B_R + Y1) >> 14) + (1<<15), 16)); +- output_pixel(&dest[0], av_clip_uintp2(((int)(R_B + Y1) >> 14) + (1<<15), 16)); +- output_pixel(&dest[1], av_clip_uintp2(((int)( G + Y1) >> 14) + (1<<15), 16)); +- output_pixel(&dest[2], av_clip_uintp2(((int)(B_R + Y1) >> 14) + (1<<15), 16)); + output_pixel(&dest[0], av_clip_uintp2(R_B + Y1, 30) >> 14); + output_pixel(&dest[1], av_clip_uintp2( G + Y1, 30) >> 14); + output_pixel(&dest[2], av_clip_uintp2(B_R + Y1, 30) >> 14); if (eightbytes) { output_pixel(&dest[3], av_clip_uintp2(A1 , 30) >> 14); -- output_pixel(&dest[4], av_clip_uintp2(((R_B + Y2) >> 14) + (1<<15), 16)); -- output_pixel(&dest[5], av_clip_uintp2((( G + Y2) >> 14) + (1<<15), 16)); -- output_pixel(&dest[6], av_clip_uintp2(((B_R + Y2) >> 14) + (1<<15), 16)); +- output_pixel(&dest[4], av_clip_uintp2(((int)(R_B + Y2) >> 14) + (1<<15), 16)); +- output_pixel(&dest[5], av_clip_uintp2(((int)( G + Y2) >> 14) + (1<<15), 16)); +- output_pixel(&dest[6], av_clip_uintp2(((int)(B_R + Y2) >> 14) + (1<<15), 16)); + output_pixel(&dest[4], av_clip_uintp2(R_B + Y2, 30) >> 14); + output_pixel(&dest[5], av_clip_uintp2( G + Y2, 30) >> 14); + output_pixel(&dest[6], av_clip_uintp2(B_R + Y2, 30) >> 14); output_pixel(&dest[7], av_clip_uintp2(A2 , 30) >> 14); dest += 8; } else { -- output_pixel(&dest[3], av_clip_uintp2(((R_B + Y2) >> 14) + (1<<15), 16)); -- output_pixel(&dest[4], av_clip_uintp2((( G + Y2) >> 14) + (1<<15), 16)); -- output_pixel(&dest[5], av_clip_uintp2(((B_R + Y2) >> 14) + (1<<15), 16)); +- output_pixel(&dest[3], av_clip_uintp2(((int)(R_B + Y2) >> 14) + (1<<15), 16)); +- output_pixel(&dest[4], av_clip_uintp2(((int)( G + Y2) >> 14) + (1<<15), 16)); +- output_pixel(&dest[5], av_clip_uintp2(((int)(B_R + Y2) >> 14) + (1<<15), 16)); + output_pixel(&dest[3], av_clip_uintp2(R_B + Y2, 30) >> 14); + output_pixel(&dest[4], av_clip_uintp2( G + Y2, 30) >> 14); + output_pixel(&dest[5], av_clip_uintp2(B_R + Y2, 30) >> 14); @@ -75036,7 +75059,15 @@ index aef0e7f82a..e855ad606a 100644 if (eightbytes) { output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14); dest += 4; -@@ -1374,7 +1374,7 @@ yuv2rgba64_full_1_c_template(SwsContext *c, const int32_t *buf0, +@@ -1367,17 +1367,17 @@ yuv2rgba64_full_1_c_template(SwsContext *c, const int32_t *buf0, + + if (uvalpha < 2048) { + for (i = 0; i < dstW; i++) { +- SUINT Y = (buf0[i]) >> 2; ++ int Y = (buf0[i]) >> 2; + int U = (ubuf0[i] - (128 << 11)) >> 2; + int V = (vbuf0[i] - (128 << 11)) >> 2; + int R, G, B; Y -= c->yuv2rgb_y_offset; Y *= c->yuv2rgb_y_coeff; @@ -75044,21 +75075,33 @@ index aef0e7f82a..e855ad606a 100644 + Y += 1 << 13; if (hasAlpha) { - A = abuf0[i] << 11; +- A = abuf0[i] * (1 << 11); ++ A = abuf0[i] << 11; + + A += 1 << 13; + } @@ -1386,9 +1386,9 @@ yuv2rgba64_full_1_c_template(SwsContext *c, const int32_t *buf0, G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; B = U * c->yuv2rgb_u2b_coeff; -- output_pixel(&dest[0], av_clip_uintp2(((R_B + Y) >> 14) + (1<<15), 16)); -- output_pixel(&dest[1], av_clip_uintp2((( G + Y) >> 14) + (1<<15), 16)); -- output_pixel(&dest[2], av_clip_uintp2(((B_R + Y) >> 14) + (1<<15), 16)); +- output_pixel(&dest[0], av_clip_uintp2(((int)(R_B + Y) >> 14) + (1<<15), 16)); +- output_pixel(&dest[1], av_clip_uintp2(((int)( G + Y) >> 14) + (1<<15), 16)); +- output_pixel(&dest[2], av_clip_uintp2(((int)(B_R + Y) >> 14) + (1<<15), 16)); + output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14); + output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14); + output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14); if (eightbytes) { output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14); dest += 4; -@@ -1407,7 +1407,7 @@ yuv2rgba64_full_1_c_template(SwsContext *c, const int32_t *buf0, +@@ -1400,17 +1400,17 @@ yuv2rgba64_full_1_c_template(SwsContext *c, const int32_t *buf0, + const int32_t *ubuf1 = ubuf[1], *vbuf1 = vbuf[1]; + int A = 0xffff<<14; + for (i = 0; i < dstW; i++) { +- SUINT Y = (buf0[i] ) >> 2; ++ int Y = (buf0[i] ) >> 2; + int U = (ubuf0[i] + ubuf1[i] - (128 << 12)) >> 3; + int V = (vbuf0[i] + vbuf1[i] - (128 << 12)) >> 3; + int R, G, B; Y -= c->yuv2rgb_y_offset; Y *= c->yuv2rgb_y_coeff; @@ -75066,657 +75109,101 @@ index aef0e7f82a..e855ad606a 100644 + Y += 1 << 13; if (hasAlpha) { - A = abuf0[i] << 11; +- A = abuf0[i] * (1 << 11); ++ A = abuf0[i] << 11; + + A += 1 << 13; + } @@ -1419,9 +1419,9 @@ yuv2rgba64_full_1_c_template(SwsContext *c, const int32_t *buf0, G = V * c->yuv2rgb_v2g_coeff + U * c->yuv2rgb_u2g_coeff; B = U * c->yuv2rgb_u2b_coeff; -- output_pixel(&dest[0], av_clip_uintp2(((R_B + Y) >> 14) + (1<<15), 16)); -- output_pixel(&dest[1], av_clip_uintp2((( G + Y) >> 14) + (1<<15), 16)); -- output_pixel(&dest[2], av_clip_uintp2(((B_R + Y) >> 14) + (1<<15), 16)); +- output_pixel(&dest[0], av_clip_uintp2(((int)(R_B + Y) >> 14) + (1<<15), 16)); +- output_pixel(&dest[1], av_clip_uintp2(((int)( G + Y) >> 14) + (1<<15), 16)); +- output_pixel(&dest[2], av_clip_uintp2(((int)(B_R + Y) >> 14) + (1<<15), 16)); + output_pixel(&dest[0], av_clip_uintp2(R_B + Y, 30) >> 14); + output_pixel(&dest[1], av_clip_uintp2( G + Y, 30) >> 14); + output_pixel(&dest[2], av_clip_uintp2(B_R + Y, 30) >> 14); if (eightbytes) { output_pixel(&dest[3], av_clip_uintp2(A, 30) >> 14); dest += 4; -diff --git a/libswscale/rgb2rgb.c b/libswscale/rgb2rgb.c -index a7300f3ba4..ba1db155b0 100644 ---- a/libswscale/rgb2rgb.c -+++ b/libswscale/rgb2rgb.c -@@ -83,6 +83,31 @@ void (*ff_rgb24toyv12)(const uint8_t *src, uint8_t *ydst, - int width, int height, - int lumStride, int chromStride, int srcStride, - int32_t *rgb2yuv); -+void (*ff_bgr24toyv12)(const uint8_t *src, uint8_t *ydst, -+ uint8_t *udst, uint8_t *vdst, -+ int width, int height, -+ int lumStride, int chromStride, int srcStride, -+ int32_t *rgb2yuv); -+void (*ff_rgbxtoyv12)(const uint8_t *src, uint8_t *ydst, -+ uint8_t *udst, uint8_t *vdst, -+ int width, int height, -+ int lumStride, int chromStride, int srcStride, -+ int32_t *rgb2yuv); -+void (*ff_bgrxtoyv12)(const uint8_t *src, uint8_t *ydst, -+ uint8_t *udst, uint8_t *vdst, -+ int width, int height, -+ int lumStride, int chromStride, int srcStride, -+ int32_t *rgb2yuv); -+void (*ff_xrgbtoyv12)(const uint8_t *src, uint8_t *ydst, -+ uint8_t *udst, uint8_t *vdst, -+ int width, int height, -+ int lumStride, int chromStride, int srcStride, -+ int32_t *rgb2yuv); -+void (*ff_xbgrtoyv12)(const uint8_t *src, uint8_t *ydst, -+ uint8_t *udst, uint8_t *vdst, -+ int width, int height, -+ int lumStride, int chromStride, int srcStride, -+ int32_t *rgb2yuv); - void (*planar2x)(const uint8_t *src, uint8_t *dst, int width, int height, - int srcStride, int dstStride); - void (*interleaveBytes)(const uint8_t *src1, const uint8_t *src2, uint8_t *dst, -diff --git a/libswscale/rgb2rgb.h b/libswscale/rgb2rgb.h -index 48bba1586a..6329533f18 100644 ---- a/libswscale/rgb2rgb.h -+++ b/libswscale/rgb2rgb.h -@@ -82,6 +82,9 @@ void rgb12to15(const uint8_t *src, uint8_t *dst, int src_size); - void ff_rgb24toyv12_c(const uint8_t *src, uint8_t *ydst, uint8_t *udst, - uint8_t *vdst, int width, int height, int lumStride, - int chromStride, int srcStride, int32_t *rgb2yuv); -+void ff_bgr24toyv12_c(const uint8_t *src, uint8_t *ydst, uint8_t *udst, -+ uint8_t *vdst, int width, int height, int lumStride, -+ int chromStride, int srcStride, int32_t *rgb2yuv); +@@ -1860,9 +1860,9 @@ static av_always_inline void yuv2rgb_write_full(SwsContext *c, + Y -= c->yuv2rgb_y_offset; + Y *= c->yuv2rgb_y_coeff; + Y += 1 << 21; +- R = (unsigned)Y + V*(unsigned)c->yuv2rgb_v2r_coeff; +- G = (unsigned)Y + V*(unsigned)c->yuv2rgb_v2g_coeff + U*(unsigned)c->yuv2rgb_u2g_coeff; +- B = (unsigned)Y + U*(unsigned)c->yuv2rgb_u2b_coeff; ++ R = (unsigned)Y + V*c->yuv2rgb_v2r_coeff; ++ G = (unsigned)Y + V*c->yuv2rgb_v2g_coeff + U*c->yuv2rgb_u2g_coeff; ++ B = (unsigned)Y + U*c->yuv2rgb_u2b_coeff; + if ((R | G | B) & 0xC0000000) { + R = av_clip_uintp2(R, 30); + G = av_clip_uintp2(G, 30); +diff --git a/libswscale/swscale.c b/libswscale/swscale.c +index 1b146b23ab..12160a169a 100644 +--- a/libswscale/swscale.c ++++ b/libswscale/swscale.c +@@ -770,8 +770,7 @@ int attribute_align_arg sws_scale(struct SwsContext *c, - /** - * Height should be a multiple of 2 and width should be a multiple of 16. -@@ -131,6 +134,26 @@ extern void (*ff_rgb24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, - int width, int height, - int lumStride, int chromStride, int srcStride, - int32_t *rgb2yuv); -+extern void (*ff_bgr24toyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, -+ int width, int height, -+ int lumStride, int chromStride, int srcStride, -+ int32_t *rgb2yuv); -+extern void (*ff_rgbxtoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, -+ int width, int height, -+ int lumStride, int chromStride, int srcStride, -+ int32_t *rgb2yuv); -+extern void (*ff_bgrxtoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, -+ int width, int height, -+ int lumStride, int chromStride, int srcStride, -+ int32_t *rgb2yuv); -+extern void (*ff_xrgbtoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, -+ int width, int height, -+ int lumStride, int chromStride, int srcStride, -+ int32_t *rgb2yuv); -+extern void (*ff_xbgrtoyv12)(const uint8_t *src, uint8_t *ydst, uint8_t *udst, uint8_t *vdst, -+ int width, int height, -+ int lumStride, int chromStride, int srcStride, -+ int32_t *rgb2yuv); - extern void (*planar2x)(const uint8_t *src, uint8_t *dst, int width, int height, - int srcStride, int dstStride); - -diff --git a/libswscale/rgb2rgb_template.c b/libswscale/rgb2rgb_template.c -index 42c69801ba..e711589e1e 100644 ---- a/libswscale/rgb2rgb_template.c -+++ b/libswscale/rgb2rgb_template.c -@@ -646,13 +646,14 @@ static inline void uyvytoyv12_c(const uint8_t *src, uint8_t *ydst, - * others are ignored in the C version. - * FIXME: Write HQ version. - */ --void ff_rgb24toyv12_c(const uint8_t *src, uint8_t *ydst, uint8_t *udst, -+static void rgb24toyv12_x(const uint8_t *src, uint8_t *ydst, uint8_t *udst, - uint8_t *vdst, int width, int height, int lumStride, -- int chromStride, int srcStride, int32_t *rgb2yuv) -+ int chromStride, int srcStride, int32_t *rgb2yuv, -+ const uint8_t x[9]) - { -- int32_t ry = rgb2yuv[RY_IDX], gy = rgb2yuv[GY_IDX], by = rgb2yuv[BY_IDX]; -- int32_t ru = rgb2yuv[RU_IDX], gu = rgb2yuv[GU_IDX], bu = rgb2yuv[BU_IDX]; -- int32_t rv = rgb2yuv[RV_IDX], gv = rgb2yuv[GV_IDX], bv = rgb2yuv[BV_IDX]; -+ int32_t ry = rgb2yuv[x[0]], gy = rgb2yuv[x[1]], by = rgb2yuv[x[2]]; -+ int32_t ru = rgb2yuv[x[3]], gu = rgb2yuv[x[4]], bu = rgb2yuv[x[5]]; -+ int32_t rv = rgb2yuv[x[6]], gv = rgb2yuv[x[7]], bv = rgb2yuv[x[8]]; - int y; - const int chromWidth = width >> 1; - -@@ -678,6 +679,19 @@ void ff_rgb24toyv12_c(const uint8_t *src, uint8_t *ydst, uint8_t *udst, - Y = ((ry * r + gy * g + by * b) >> RGB2YUV_SHIFT) + 16; - ydst[2 * i + 1] = Y; - } -+ if ((width & 1) != 0) { -+ unsigned int b = src[6 * i + 0]; -+ unsigned int g = src[6 * i + 1]; -+ unsigned int r = src[6 * i + 2]; -+ -+ unsigned int Y = ((ry * r + gy * g + by * b) >> RGB2YUV_SHIFT) + 16; -+ unsigned int V = ((rv * r + gv * g + bv * b) >> RGB2YUV_SHIFT) + 128; -+ unsigned int U = ((ru * r + gu * g + bu * b) >> RGB2YUV_SHIFT) + 128; -+ -+ udst[i] = U; -+ vdst[i] = V; -+ ydst[2 * i] = Y; -+ } - ydst += lumStride; - src += srcStride; - -@@ -700,6 +714,125 @@ void ff_rgb24toyv12_c(const uint8_t *src, uint8_t *ydst, uint8_t *udst, - Y = ((ry * r + gy * g + by * b) >> RGB2YUV_SHIFT) + 16; - ydst[2 * i + 1] = Y; - } -+ if ((width & 1) != 0) { -+ unsigned int b = src[6 * i + 0]; -+ unsigned int g = src[6 * i + 1]; -+ unsigned int r = src[6 * i + 2]; -+ -+ unsigned int Y = ((ry * r + gy * g + by * b) >> RGB2YUV_SHIFT) + 16; -+ -+ ydst[2 * i] = Y; -+ } -+ udst += chromStride; -+ vdst += chromStride; -+ ydst += lumStride; -+ src += srcStride; -+ } -+} -+ -+static const uint8_t x_rgb[9] = { -+ RY_IDX, GY_IDX, BY_IDX, -+ RU_IDX, GU_IDX, BU_IDX, -+ RV_IDX, GV_IDX, BV_IDX, -+}; -+ -+static const uint8_t x_bgr[9] = { -+ BY_IDX, GY_IDX, RY_IDX, -+ BU_IDX, GU_IDX, RU_IDX, -+ BV_IDX, GV_IDX, RV_IDX, -+}; -+ -+void ff_rgb24toyv12_c(const uint8_t *src, uint8_t *ydst, uint8_t *udst, -+ uint8_t *vdst, int width, int height, int lumStride, -+ int chromStride, int srcStride, int32_t *rgb2yuv) -+{ -+ rgb24toyv12_x(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride, rgb2yuv, x_rgb); -+} -+ -+void ff_bgr24toyv12_c(const uint8_t *src, uint8_t *ydst, uint8_t *udst, -+ uint8_t *vdst, int width, int height, int lumStride, -+ int chromStride, int srcStride, int32_t *rgb2yuv) -+{ -+ rgb24toyv12_x(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride, rgb2yuv, x_bgr); -+} -+ -+static void rgbxtoyv12_x(const uint8_t *src, uint8_t *ydst, uint8_t *udst, -+ uint8_t *vdst, int width, int height, int lumStride, -+ int chromStride, int srcStride, int32_t *rgb2yuv, -+ const uint8_t x[9]) -+{ -+ int32_t ry = rgb2yuv[x[0]], gy = rgb2yuv[x[1]], by = rgb2yuv[x[2]]; -+ int32_t ru = rgb2yuv[x[3]], gu = rgb2yuv[x[4]], bu = rgb2yuv[x[5]]; -+ int32_t rv = rgb2yuv[x[6]], gv = rgb2yuv[x[7]], bv = rgb2yuv[x[8]]; -+ int y; -+ const int chromWidth = width >> 1; -+ -+ for (y = 0; y < height; y += 2) { -+ int i; -+ for (i = 0; i < chromWidth; i++) { -+ unsigned int b = src[8 * i + 2]; -+ unsigned int g = src[8 * i + 1]; -+ unsigned int r = src[8 * i + 0]; -+ -+ unsigned int Y = ((ry * r + gy * g + by * b) >> RGB2YUV_SHIFT) + 16; -+ unsigned int V = ((rv * r + gv * g + bv * b) >> RGB2YUV_SHIFT) + 128; -+ unsigned int U = ((ru * r + gu * g + bu * b) >> RGB2YUV_SHIFT) + 128; -+ -+ udst[i] = U; -+ vdst[i] = V; -+ ydst[2 * i] = Y; -+ -+ b = src[8 * i + 6]; -+ g = src[8 * i + 5]; -+ r = src[8 * i + 4]; -+ -+ Y = ((ry * r + gy * g + by * b) >> RGB2YUV_SHIFT) + 16; -+ ydst[2 * i + 1] = Y; -+ } -+ if ((width & 1) != 0) { -+ unsigned int b = src[8 * i + 2]; -+ unsigned int g = src[8 * i + 1]; -+ unsigned int r = src[8 * i + 0]; -+ -+ unsigned int Y = ((ry * r + gy * g + by * b) >> RGB2YUV_SHIFT) + 16; -+ unsigned int V = ((rv * r + gv * g + bv * b) >> RGB2YUV_SHIFT) + 128; -+ unsigned int U = ((ru * r + gu * g + bu * b) >> RGB2YUV_SHIFT) + 128; -+ -+ udst[i] = U; -+ vdst[i] = V; -+ ydst[2 * i] = Y; -+ } -+ ydst += lumStride; -+ src += srcStride; -+ -+ if (y+1 == height) -+ break; -+ -+ for (i = 0; i < chromWidth; i++) { -+ unsigned int b = src[8 * i + 2]; -+ unsigned int g = src[8 * i + 1]; -+ unsigned int r = src[8 * i + 0]; -+ -+ unsigned int Y = ((ry * r + gy * g + by * b) >> RGB2YUV_SHIFT) + 16; -+ -+ ydst[2 * i] = Y; -+ -+ b = src[8 * i + 6]; -+ g = src[8 * i + 5]; -+ r = src[8 * i + 4]; -+ -+ Y = ((ry * r + gy * g + by * b) >> RGB2YUV_SHIFT) + 16; -+ ydst[2 * i + 1] = Y; -+ } -+ if ((width & 1) != 0) { -+ unsigned int b = src[8 * i + 2]; -+ unsigned int g = src[8 * i + 1]; -+ unsigned int r = src[8 * i + 0]; -+ -+ unsigned int Y = ((ry * r + gy * g + by * b) >> RGB2YUV_SHIFT) + 16; -+ -+ ydst[2 * i] = Y; -+ } - udst += chromStride; - vdst += chromStride; - ydst += lumStride; -@@ -707,6 +840,37 @@ void ff_rgb24toyv12_c(const uint8_t *src, uint8_t *ydst, uint8_t *udst, + if ((srcSliceY & (macro_height-1)) || + ((srcSliceH& (macro_height-1)) && srcSliceY + srcSliceH != c->srcH) || +- srcSliceY + srcSliceH > c->srcH || +- (isBayer(c->srcFormat) && srcSliceH <= 1)) { ++ srcSliceY + srcSliceH > c->srcH) { + av_log(c, AV_LOG_ERROR, "Slice parameters %d, %d are invalid\n", srcSliceY, srcSliceH); + return AVERROR(EINVAL); } - } - -+static void ff_rgbxtoyv12_c(const uint8_t *src, uint8_t *ydst, uint8_t *udst, -+ uint8_t *vdst, int width, int height, int lumStride, -+ int chromStride, int srcStride, int32_t *rgb2yuv) -+{ -+ rgbxtoyv12_x(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride, rgb2yuv, x_rgb); -+} -+ -+static void ff_bgrxtoyv12_c(const uint8_t *src, uint8_t *ydst, uint8_t *udst, -+ uint8_t *vdst, int width, int height, int lumStride, -+ int chromStride, int srcStride, int32_t *rgb2yuv) -+{ -+ rgbxtoyv12_x(src, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride, rgb2yuv, x_bgr); -+} -+ -+// As the general code does no SIMD-like ops simply adding 1 to the src address -+// will fix the ignored alpha position -+static void ff_xrgbtoyv12_c(const uint8_t *src, uint8_t *ydst, uint8_t *udst, -+ uint8_t *vdst, int width, int height, int lumStride, -+ int chromStride, int srcStride, int32_t *rgb2yuv) -+{ -+ rgbxtoyv12_x(src + 1, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride, rgb2yuv, x_rgb); -+} -+ -+static void ff_xbgrtoyv12_c(const uint8_t *src, uint8_t *ydst, uint8_t *udst, -+ uint8_t *vdst, int width, int height, int lumStride, -+ int chromStride, int srcStride, int32_t *rgb2yuv) -+{ -+ rgbxtoyv12_x(src + 1, ydst, udst, vdst, width, height, lumStride, chromStride, srcStride, rgb2yuv, x_bgr); -+} -+ -+ - static void interleaveBytes_c(const uint8_t *src1, const uint8_t *src2, - uint8_t *dest, int width, int height, - int src1Stride, int src2Stride, int dstStride) -@@ -980,6 +1144,11 @@ static av_cold void rgb2rgb_init_c(void) - yuy2toyv12 = yuy2toyv12_c; - planar2x = planar2x_c; - ff_rgb24toyv12 = ff_rgb24toyv12_c; -+ ff_bgr24toyv12 = ff_bgr24toyv12_c; -+ ff_rgbxtoyv12 = ff_rgbxtoyv12_c; -+ ff_bgrxtoyv12 = ff_bgrxtoyv12_c; -+ ff_xrgbtoyv12 = ff_xrgbtoyv12_c; -+ ff_xbgrtoyv12 = ff_xbgrtoyv12_c; - interleaveBytes = interleaveBytes_c; - deinterleaveBytes = deinterleaveBytes_c; - vu9_to_vu12 = vu9_to_vu12_c; -diff --git a/libswscale/swscale_unscaled.c b/libswscale/swscale_unscaled.c -index c4dd8a4d83..da38d7f8ac 100644 ---- a/libswscale/swscale_unscaled.c -+++ b/libswscale/swscale_unscaled.c -@@ -1655,6 +1655,91 @@ static int bgr24ToYv12Wrapper(SwsContext *c, const uint8_t *src[], - return srcSliceH; - } - -+static int rgb24ToYv12Wrapper(SwsContext *c, const uint8_t *src[], -+ int srcStride[], int srcSliceY, int srcSliceH, -+ uint8_t *dst[], int dstStride[]) -+{ -+ ff_bgr24toyv12( -+ src[0], -+ dst[0] + srcSliceY * dstStride[0], -+ dst[1] + (srcSliceY >> 1) * dstStride[1], -+ dst[2] + (srcSliceY >> 1) * dstStride[2], -+ c->srcW, srcSliceH, -+ dstStride[0], dstStride[1], srcStride[0], -+ c->input_rgb2yuv_table); -+ if (dst[3]) -+ fillPlane(dst[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255); -+ return srcSliceH; -+} -+ -+static int bgrxToYv12Wrapper(SwsContext *c, const uint8_t *src[], -+ int srcStride[], int srcSliceY, int srcSliceH, -+ uint8_t *dst[], int dstStride[]) -+{ -+ ff_bgrxtoyv12( -+ src[0], -+ dst[0] + srcSliceY * dstStride[0], -+ dst[1] + (srcSliceY >> 1) * dstStride[1], -+ dst[2] + (srcSliceY >> 1) * dstStride[2], -+ c->srcW, srcSliceH, -+ dstStride[0], dstStride[1], srcStride[0], -+ c->input_rgb2yuv_table); -+ if (dst[3]) -+ fillPlane(dst[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255); -+ return srcSliceH; -+} -+ -+static int rgbxToYv12Wrapper(SwsContext *c, const uint8_t *src[], -+ int srcStride[], int srcSliceY, int srcSliceH, -+ uint8_t *dst[], int dstStride[]) -+{ -+ ff_rgbxtoyv12( -+ src[0], -+ dst[0] + srcSliceY * dstStride[0], -+ dst[1] + (srcSliceY >> 1) * dstStride[1], -+ dst[2] + (srcSliceY >> 1) * dstStride[2], -+ c->srcW, srcSliceH, -+ dstStride[0], dstStride[1], srcStride[0], -+ c->input_rgb2yuv_table); -+ if (dst[3]) -+ fillPlane(dst[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255); -+ return srcSliceH; -+} -+ -+static int xbgrToYv12Wrapper(SwsContext *c, const uint8_t *src[], -+ int srcStride[], int srcSliceY, int srcSliceH, -+ uint8_t *dst[], int dstStride[]) -+{ -+ ff_xbgrtoyv12( -+ src[0], -+ dst[0] + srcSliceY * dstStride[0], -+ dst[1] + (srcSliceY >> 1) * dstStride[1], -+ dst[2] + (srcSliceY >> 1) * dstStride[2], -+ c->srcW, srcSliceH, -+ dstStride[0], dstStride[1], srcStride[0], -+ c->input_rgb2yuv_table); -+ if (dst[3]) -+ fillPlane(dst[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255); -+ return srcSliceH; -+} -+ -+static int xrgbToYv12Wrapper(SwsContext *c, const uint8_t *src[], -+ int srcStride[], int srcSliceY, int srcSliceH, -+ uint8_t *dst[], int dstStride[]) -+{ -+ ff_xrgbtoyv12( -+ src[0], -+ dst[0] + srcSliceY * dstStride[0], -+ dst[1] + (srcSliceY >> 1) * dstStride[1], -+ dst[2] + (srcSliceY >> 1) * dstStride[2], -+ c->srcW, srcSliceH, -+ dstStride[0], dstStride[1], srcStride[0], -+ c->input_rgb2yuv_table); -+ if (dst[3]) -+ fillPlane(dst[3], dstStride[3], c->srcW, srcSliceH, srcSliceY, 255); -+ return srcSliceH; -+} -+ - static int yvu9ToYv12Wrapper(SwsContext *c, const uint8_t *src[], - int srcStride[], int srcSliceY, int srcSliceH, - uint8_t *dst[], int dstStride[]) -@@ -2035,6 +2120,32 @@ void ff_get_unscaled_swscale(SwsContext *c) - (dstFormat == AV_PIX_FMT_YUV420P || dstFormat == AV_PIX_FMT_YUVA420P) && - !(flags & SWS_ACCURATE_RND)) - c->swscale = bgr24ToYv12Wrapper; -+ /* rgb24toYV12 */ -+ if (srcFormat == AV_PIX_FMT_RGB24 && -+ (dstFormat == AV_PIX_FMT_YUV420P || dstFormat == AV_PIX_FMT_YUVA420P) && -+ !(flags & SWS_ACCURATE_RND)) -+ c->swscale = rgb24ToYv12Wrapper; -+ -+ /* bgrxtoYV12 */ -+ if (((srcFormat == AV_PIX_FMT_BGRA && dstFormat == AV_PIX_FMT_YUV420P) || -+ (srcFormat == AV_PIX_FMT_BGR0 && (dstFormat == AV_PIX_FMT_YUV420P || dstFormat == AV_PIX_FMT_YUVA420P))) && -+ !(flags & SWS_ACCURATE_RND)) -+ c->swscale = bgrxToYv12Wrapper; -+ /* rgbx24toYV12 */ -+ if (((srcFormat == AV_PIX_FMT_RGBA && dstFormat == AV_PIX_FMT_YUV420P) || -+ (srcFormat == AV_PIX_FMT_RGB0 && (dstFormat == AV_PIX_FMT_YUV420P || dstFormat == AV_PIX_FMT_YUVA420P))) && -+ !(flags & SWS_ACCURATE_RND)) -+ c->swscale = rgbxToYv12Wrapper; -+ /* xbgrtoYV12 */ -+ if (((srcFormat == AV_PIX_FMT_ABGR && dstFormat == AV_PIX_FMT_YUV420P) || -+ (srcFormat == AV_PIX_FMT_0BGR && (dstFormat == AV_PIX_FMT_YUV420P || dstFormat == AV_PIX_FMT_YUVA420P))) && -+ !(flags & SWS_ACCURATE_RND)) -+ c->swscale = xbgrToYv12Wrapper; -+ /* xrgb24toYV12 */ -+ if (((srcFormat == AV_PIX_FMT_ARGB && dstFormat == AV_PIX_FMT_YUV420P) || -+ (srcFormat == AV_PIX_FMT_0RGB && (dstFormat == AV_PIX_FMT_YUV420P || dstFormat == AV_PIX_FMT_YUVA420P))) && -+ !(flags & SWS_ACCURATE_RND)) -+ c->swscale = xrgbToYv12Wrapper; - - /* RGB/BGR -> RGB/BGR (no dither needed forms) */ - if (isAnyRGB(srcFormat) && isAnyRGB(dstFormat) && findRgbConvFn(c) -diff --git a/libswscale/tests/swscale.c b/libswscale/tests/swscale.c -index 6c38041ddb..12776ffec7 100644 ---- a/libswscale/tests/swscale.c -+++ b/libswscale/tests/swscale.c -@@ -23,6 +23,7 @@ - #include - #include - #include -+#include - - #undef HAVE_AV_CONFIG_H - #include "libavutil/cpu.h" -@@ -78,6 +79,15 @@ struct Results { - uint32_t crc; - }; - -+static int time_rep = 0; -+ -+static uint64_t utime(void) -+{ -+ struct timespec ts; -+ clock_gettime(CLOCK_MONOTONIC, &ts); -+ return ts.tv_nsec / 1000 + (uint64_t)ts.tv_sec * 1000000; -+} -+ - // test by ref -> src -> dst -> out & compare out against ref - // ref & out are YV12 - static int doTest(const uint8_t * const ref[4], int refStride[4], int w, int h, -@@ -174,7 +184,7 @@ static int doTest(const uint8_t * const ref[4], int refStride[4], int w, int h, - goto end; - } - -- printf(" %s %dx%d -> %s %3dx%3d flags=%2d", -+ printf(" %s %4dx%4d -> %s %4dx%4d flags=%2d", - desc_src->name, srcW, srcH, - desc_dst->name, dstW, dstH, - flags); -@@ -182,6 +192,17 @@ static int doTest(const uint8_t * const ref[4], int refStride[4], int w, int h, - - sws_scale(dstContext, (const uint8_t * const*)src, srcStride, 0, srcH, dst, dstStride); - -+ if (time_rep != 0) -+ { -+ const uint64_t now = utime(); -+ uint64_t done; -+ for (i = 1; i != time_rep; ++i) { -+ sws_scale(dstContext, (const uint8_t * const*)src, srcStride, 0, srcH, dst, dstStride); -+ } -+ done = utime(); -+ printf(" T=%7"PRId64"us ", done-now); -+ } -+ - for (i = 0; i < 4 && dstStride[i]; i++) - crc = av_crc(av_crc_get_table(AV_CRC_32_IEEE), crc, dst[i], - dstStride[i] * dstH); -@@ -355,56 +376,78 @@ static int fileTest(const uint8_t * const ref[4], int refStride[4], - return 0; - } - --#define W 96 --#define H 96 -- - int main(int argc, char **argv) - { -+ unsigned int W = 96; -+ unsigned int H = 96; -+ unsigned int W2; -+ unsigned int H2; -+ unsigned int S; - enum AVPixelFormat srcFormat = AV_PIX_FMT_NONE; - enum AVPixelFormat dstFormat = AV_PIX_FMT_NONE; -- uint8_t *rgb_data = av_malloc(W * H * 4); -- const uint8_t * const rgb_src[4] = { rgb_data, NULL, NULL, NULL }; -- int rgb_stride[4] = { 4 * W, 0, 0, 0 }; -- uint8_t *data = av_malloc(4 * W * H); -- const uint8_t * const src[4] = { data, data + W * H, data + W * H * 2, data + W * H * 3 }; -- int stride[4] = { W, W, W, W }; - int x, y; - struct SwsContext *sws; - AVLFG rand; - int res = -1; - int i; - FILE *fp = NULL; -- -- if (!rgb_data || !data) -- return -1; -+ uint8_t *rgb_data; -+ uint8_t * rgb_src[4] = { NULL }; -+ int rgb_stride[4] = { 0 }; -+ uint8_t *data; -+ uint8_t * src[4] = { NULL }; -+ int stride[4] = { 0 }; - - for (i = 1; i < argc; i += 2) { -+ const char * const arg2 = argv[i+1]; -+ - if (argv[i][0] != '-' || i + 1 == argc) - goto bad_option; - if (!strcmp(argv[i], "-ref")) { -- fp = fopen(argv[i + 1], "r"); -+ fp = fopen(arg2, "r"); - if (!fp) { -- fprintf(stderr, "could not open '%s'\n", argv[i + 1]); -+ fprintf(stderr, "could not open '%s'\n", arg2); - goto error; +diff --git a/libswscale/utils.c b/libswscale/utils.c +index a5702922b8..352a8ed116 100644 +--- a/libswscale/utils.c ++++ b/libswscale/utils.c +@@ -512,7 +512,7 @@ static av_cold int initFilter(int16_t **outFilter, int32_t **filterPos, + filter[i * filterSize + j] = coeff; + xx++; } - } else if (!strcmp(argv[i], "-cpuflags")) { - unsigned flags = av_get_cpu_flags(); -- int ret = av_parse_cpu_caps(&flags, argv[i + 1]); -+ int ret = av_parse_cpu_caps(&flags, arg2); - if (ret < 0) { -- fprintf(stderr, "invalid cpu flags %s\n", argv[i + 1]); -+ fprintf(stderr, "invalid cpu flags %s\n", arg2); - return ret; - } - av_force_cpu_flags(flags); - } else if (!strcmp(argv[i], "-src")) { -- srcFormat = av_get_pix_fmt(argv[i + 1]); -+ srcFormat = av_get_pix_fmt(arg2); - if (srcFormat == AV_PIX_FMT_NONE) { -- fprintf(stderr, "invalid pixel format %s\n", argv[i + 1]); -+ fprintf(stderr, "invalid pixel format %s\n", arg2); - return -1; - } - } else if (!strcmp(argv[i], "-dst")) { -- dstFormat = av_get_pix_fmt(argv[i + 1]); -+ dstFormat = av_get_pix_fmt(arg2); - if (dstFormat == AV_PIX_FMT_NONE) { -- fprintf(stderr, "invalid pixel format %s\n", argv[i + 1]); -+ fprintf(stderr, "invalid pixel format %s\n", arg2); -+ return -1; -+ } -+ } else if (!strcmp(argv[i], "-w")) { -+ char * p = NULL; -+ W = strtoul(arg2, &p, 0); -+ if (!W || *p) { -+ fprintf(stderr, "bad width %s\n", arg2); -+ return -1; -+ } -+ } else if (!strcmp(argv[i], "-h")) { -+ char * p = NULL; -+ H = strtoul(arg2, &p, 0); -+ if (!H || *p) { -+ fprintf(stderr, "bad height '%s'\n", arg2); -+ return -1; -+ } -+ } else if (!strcmp(argv[i], "-t")) { -+ char * p = NULL; -+ time_rep = (int)strtol(arg2, &p, 0); -+ if (*p) { -+ fprintf(stderr, "bad time repetitions '%s'\n", arg2); - return -1; - } - } else { -@@ -414,15 +457,34 @@ bad_option: +- xDstInSrc += 2LL * xInc; ++ xDstInSrc += 2 * xInc; } } -- sws = sws_getContext(W / 12, H / 12, AV_PIX_FMT_RGB32, W, H, -+ S = (W + 15) & ~15; -+ rgb_data = av_mallocz(S * H * 4); -+ rgb_src[0] = rgb_data; -+ rgb_stride[0] = 4 * S; -+ data = av_mallocz(4 * S * H); -+ src[0] = data; -+ src[1] = data + S * H; -+ src[2] = data + S * H * 2; -+ src[3] = data + S * H * 3; -+ stride[0] = S; -+ stride[1] = S; -+ stride[2] = S; -+ stride[3] = S; -+ H2 = H < 96 ? 8 : H / 12; -+ W2 = W < 96 ? 8 : W / 12; -+ -+ if (!rgb_data || !data) -+ return -1; -+ -+ sws = sws_getContext(W2, H2, AV_PIX_FMT_RGB32, W, H, - AV_PIX_FMT_YUVA420P, SWS_BILINEAR, NULL, NULL, NULL); - - av_lfg_init(&rand, 1); - - for (y = 0; y < H; y++) - for (x = 0; x < W * 4; x++) -- rgb_data[ x + y * 4 * W] = av_lfg_get(&rand); -- res = sws_scale(sws, rgb_src, rgb_stride, 0, H / 12, (uint8_t * const *) src, stride); -+ rgb_data[ x + y * 4 * S] = av_lfg_get(&rand); -+ res = sws_scale(sws, (const uint8_t * const *)rgb_src, rgb_stride, 0, H2, (uint8_t * const *) src, stride); - if (res < 0 || res != H) { - res = -1; - goto error; -@@ -431,10 +493,10 @@ bad_option: - av_free(rgb_data); - - if(fp) { -- res = fileTest(src, stride, W, H, fp, srcFormat, dstFormat); -+ res = fileTest((const uint8_t * const *)src, stride, W, H, fp, srcFormat, dstFormat); - fclose(fp); - } else { -- selfTest(src, stride, W, H, srcFormat, dstFormat); -+ selfTest((const uint8_t * const *)src, stride, W, H, srcFormat, dstFormat); - res = 0; +@@ -1763,7 +1763,7 @@ av_cold int sws_init_context(SwsContext *c, SwsFilter *srcFilter, } - error: + + for (i = 0; i < 4; i++) +- if (!FF_ALLOCZ_TYPED_ARRAY(c->dither_error[i], c->dstW + 3)) ++ if (!FF_ALLOCZ_TYPED_ARRAY(c->dither_error[i], c->dstW + 2)) + goto nomem; + + c->needAlpha = (CONFIG_SWSCALE_ALPHA && isALPHA(c->srcFormat) && isALPHA(c->dstFormat)) ? 1 : 0; +@@ -1840,7 +1840,7 @@ av_cold int sws_init_context(SwsContext *c, SwsFilter *srcFilter, + /* unscaled special cases */ + if (unscaled && !usesHFilter && !usesVFilter && + (c->srcRange == c->dstRange || isAnyRGB(dstFormat) || +- isFloat(srcFormat) || isFloat(dstFormat) || isBayer(srcFormat))){ ++ isFloat(srcFormat) || isFloat(dstFormat))){ + ff_get_unscaled_swscale(c); + + if (c->swscale) { +diff --git a/libswscale/yuv2rgb.c b/libswscale/yuv2rgb.c +index eb095a77b1..6a3956e8e2 100644 +--- a/libswscale/yuv2rgb.c ++++ b/libswscale/yuv2rgb.c +@@ -826,7 +826,7 @@ av_cold int ff_yuv2rgb_c_init_tables(SwsContext *c, const int inv_table[4], + cbu = (cbu * contrast * saturation) >> 32; + cgu = (cgu * contrast * saturation) >> 32; + cgv = (cgv * contrast * saturation) >> 32; +- oy -= 256LL * brightness; ++ oy -= 256 * brightness; + + c->uOffset = 0x0400040004000400LL; + c->vOffset = 0x0400040004000400LL; diff --git a/pi-util/BUILD.txt b/pi-util/BUILD.txt new file mode 100644 -index 0000000000..2b62d660c0 +index 0000000000..b050971f63 --- /dev/null +++ b/pi-util/BUILD.txt -@@ -0,0 +1,67 @@ +@@ -0,0 +1,59 @@ +Building Pi FFmpeg +================== + @@ -75743,8 +75230,6 @@ index 0000000000..2b62d660c0 + paths being confused and therefore running the wrong code, Shared + is what is needed, in most cases, when building for use by other + programs. -+ --usr Set install dir to /usr (i.e. system default) rather than in -+ /install + +So for a static build +--------------------- @@ -75758,31 +75243,25 @@ index 0000000000..2b62d660c0 +For a shared build +------------------ + -+There are two choices here -+ +$ pi-util/conf_native.sh ++ ++You will normally want an install target if shared. Note that the script has ++set this up to be generated in out//install, you don't have to worry ++about overwriting your system libs. ++ +$ make -j8 -C out/ install + -+This sets the install prefix to /install and is probably what you -+want if you don't want to overwrite the system files. -+ +You can now set LD_LIBRARY_PATH appropriately and run ffmpeg from where it was -+built. You can copy the contents of /install to /usr and that mostly -+works. The only downside is that paths in pkgconfig end up being set to the -+install directory in your build directory which may be less than ideal when -+building other packages. ++built or install the image on the system - you have to be careful to get rid ++of all other ffmpeg libs or confusion may result. There is a little script ++that wipes all other versions - obviously use with care! + -+The alternative if you just want to replace the system libs is: -+ -+$ pi-util/conf_native.sh --usr -+$ make -j8 -C out/ +$ sudo pi-util/clean_usr_libs.sh -+$ sudo make -j8 -C out/ install + -+The clean_usr_libs.sh step wipes any existing libs & includes (for all -+architectures) from the system which helps avoid confusion when running other -+progs as you can be sure you're not running old code which is unfortunately -+easy to do otherwise. ++Then simply copying from the install to /usr works ++ ++$ sudo cp -r out//install/* /usr ++ + diff --git a/pi-util/NOTES.txt b/pi-util/NOTES.txt new file mode 100644 @@ -75949,27 +75428,11 @@ index 0000000000..92bc13a3df + diff --git a/pi-util/clean_usr_libs.sh b/pi-util/clean_usr_libs.sh new file mode 100755 -index 0000000000..01bd6a6a22 +index 0000000000..b3b2d5509d --- /dev/null +++ b/pi-util/clean_usr_libs.sh -@@ -0,0 +1,42 @@ +@@ -0,0 +1,26 @@ +set -e -+U=/usr/include/arm-linux-gnueabihf -+rm -rf $U/libavcodec -+rm -rf $U/libavdevice -+rm -rf $U/libavfilter -+rm -rf $U/libavformat -+rm -rf $U/libavutil -+rm -rf $U/libswresample -+rm -rf $U/libswscale -+U=/usr/include/aarch64-linux-gnu -+rm -rf $U/libavcodec -+rm -rf $U/libavdevice -+rm -rf $U/libavfilter -+rm -rf $U/libavformat -+rm -rf $U/libavutil -+rm -rf $U/libswresample -+rm -rf $U/libswscale +U=/usr/lib/arm-linux-gnueabihf +rm -f $U/libavcodec.* +rm -f $U/libavdevice.* @@ -76552,10 +76015,10 @@ index 0000000000..fc14f2a3c2 +1,WPP_F_ericsson_MAIN_2,WPP_F_ericsson_MAIN_2.bit,WPP_F_ericsson_MAIN_2_yuv.md5 diff --git a/pi-util/conf_native.sh b/pi-util/conf_native.sh new file mode 100755 -index 0000000000..5fb69ccee2 +index 0000000000..a9e053801c --- /dev/null +++ b/pi-util/conf_native.sh -@@ -0,0 +1,127 @@ +@@ -0,0 +1,107 @@ +echo "Configure for native build" + +FFSRC=`pwd` @@ -76567,7 +76030,6 @@ index 0000000000..5fb69ccee2 + +NOSHARED= +MMAL= -+USR_PREFIX= + +while [ "$1" != "" ] ; do + case $1 in @@ -76577,14 +76039,8 @@ index 0000000000..5fb69ccee2 + --mmal) + MMAL=1 + ;; -+ --usr) -+ USR_PREFIX=/usr -+ ;; + *) -+ echo "Usage $0: [--noshared] [--mmal] [--usr]" -+ echo " noshared Build static libs and executable - good for testing" -+ echo " mmal Build mmal decoders" -+ echo " usr Set install prefix to /usr [default=/install]" ++ echo "Usage $0: [--noshared] [--mmal]" + exit 1 + ;; + esac @@ -76598,28 +76054,18 @@ index 0000000000..5fb69ccee2 +RPI_DEFINES= +RPI_EXTRALIBS= + -+# uname -m gives kernel type which may not have the same -+# 32/64bitness as userspace :-( getconf shoudl provide the answer -+# but use uname to check we are on the right processor -+MC=`uname -m` -+LB=`getconf LONG_BIT` -+if [ "$MC" == "armv7l" ] || [ "$MC" == "aarch64" ]; then -+ if [ "$LB" == "32" ]; then -+ echo "M/C armv7" -+ A=arm-linux-gnueabihf -+ B=armv7 -+ MCOPTS="--arch=armv6t2 --cpu=cortex-a7" -+ RPI_DEFINES=-mfpu=neon-vfpv4 -+ elif [ "$LB" == "64" ]; then -+ echo "M/C aarch64" -+ A=aarch64-linux-gnu -+ B=arm64 -+ else -+ echo "Unknown LONG_BIT name: $LB" -+ exit 1 -+ fi ++if [ "$MC" == "arm64" ]; then ++ echo "M/C aarch64" ++ A=aarch64-linux-gnu ++ B=arm64 ++elif [ "$MC" == "armhf" ]; then ++ echo "M/C armv7" ++ A=arm-linux-gnueabihf ++ B=armv7 ++ MCOPTS="--arch=armv6t2 --cpu=cortex-a7" ++ RPI_DEFINES=-mfpu=neon-vfpv4 +else -+ echo "Unknown machine name: $MC" ++ echo Unexpected architecture $MC + exit 1 +fi + @@ -76647,9 +76093,7 @@ index 0000000000..5fb69ccee2 + OUT=$BUILDBASE/$B-$C-$V-shared-rel +fi + -+if [ ! $USR_PREFIX ]; then -+ USR_PREFIX=$OUT/install -+fi ++USR_PREFIX=$OUT/install +LIB_PREFIX=$USR_PREFIX/lib/$A +INC_PREFIX=$USR_PREFIX/include/$A + @@ -76679,7 +76123,6 @@ index 0000000000..5fb69ccee2 + --extra-libs="$RPI_EXTRALIBS"\ + --extra-version="rpi" + -+echo "Configured into $OUT" + +# gcc option for getting asm listing +# -Wa,-ahls @@ -78120,58 +77563,18 @@ index 07f1d8238e..aa5f45ec8f 100644 fate-checkasm-vf_blend \ fate-checkasm-vf_colorspace \ fate-checkasm-vf_eq \ -diff --git a/tests/ref/fate/webm-dash-manifest b/tests/ref/fate/webm-dash-manifest -index 3a557fc39f..f5fc9121da 100644 ---- a/tests/ref/fate/webm-dash-manifest -+++ b/tests/ref/fate/webm-dash-manifest -@@ -6,7 +6,7 @@ - type="static" - mediaPresentationDuration="PT32.501S" - minBufferTime="PT1S" -- profiles="urn:mpeg:dash:profile:webm-on-demand:2012"> -+ profiles="urn:webm:dash:profile:webm-on-demand:2012"> - - - -diff --git a/tests/ref/fate/webm-dash-manifest-representations b/tests/ref/fate/webm-dash-manifest-representations -index 41713bb367..8556ecebee 100644 ---- a/tests/ref/fate/webm-dash-manifest-representations -+++ b/tests/ref/fate/webm-dash-manifest-representations -@@ -6,7 +6,7 @@ - type="static" - mediaPresentationDuration="PT32.48S" - minBufferTime="PT1S" -- profiles="urn:mpeg:dash:profile:webm-on-demand:2012"> -+ profiles="urn:webm:dash:profile:webm-on-demand:2012"> - - - -diff --git a/tests/ref/fate/webm-dash-manifest-unaligned-audio-streams b/tests/ref/fate/webm-dash-manifest-unaligned-audio-streams -index b1bc7ecea1..6e9de211fb 100644 ---- a/tests/ref/fate/webm-dash-manifest-unaligned-audio-streams -+++ b/tests/ref/fate/webm-dash-manifest-unaligned-audio-streams -@@ -6,7 +6,7 @@ - type="static" - mediaPresentationDuration="PT32.501S" - minBufferTime="PT1S" -- profiles="urn:mpeg:dash:profile:webm-on-demand:2012"> -+ profiles="urn:webm:dash:profile:webm-on-demand:2012"> - - - -diff --git a/tests/ref/fate/webm-dash-manifest-unaligned-video-streams b/tests/ref/fate/webm-dash-manifest-unaligned-video-streams -index 690c2aabe3..ce205638b6 100644 ---- a/tests/ref/fate/webm-dash-manifest-unaligned-video-streams -+++ b/tests/ref/fate/webm-dash-manifest-unaligned-video-streams -@@ -6,7 +6,7 @@ - type="static" - mediaPresentationDuration="PT32.48S" - minBufferTime="PT1S" -- profiles="urn:mpeg:dash:profile:webm-on-demand:2012"> -+ profiles="urn:webm:dash:profile:webm-on-demand:2012"> - - - +diff --git a/tests/fate/subtitles.mak b/tests/fate/subtitles.mak +index 31d8b93521..ee65afe35b 100644 +--- a/tests/fate/subtitles.mak ++++ b/tests/fate/subtitles.mak +@@ -102,7 +102,6 @@ fate-sub-charenc: CMD = fmtstdout ass -sub_charenc cp1251 -i $(TARGET_SAMPLES)/s + + FATE_SUBTITLES-$(call DEMDEC, SCC, CCAPTION) += fate-sub-scc + fate-sub-scc: CMD = fmtstdout ass -ss 57 -i $(TARGET_SAMPLES)/sub/witch.scc +-fate-sub-scc: CMP = diff + + FATE_SUBTITLES-$(call ALLYES, MPEGTS_DEMUXER DVBSUB_DECODER DVBSUB_ENCODER) += fate-sub-dvb + fate-sub-dvb: CMD = framecrc -i $(TARGET_SAMPLES)/sub/dvbsubtest_filter.ts -map s:0 -c dvbsub diff --git a/tests/ref/seek/vsynth_lena-snow b/tests/ref/seek/vsynth_lena-snow index b2d2d22cda..33d6c27463 100644 --- a/tests/ref/seek/vsynth_lena-snow @@ -78236,6 +77639,17 @@ index b2d2d22cda..33d6c27463 100644 -ret: 0 st: 0 flags:1 dts: 0.480000 pts: 0.480000 pos: 16074 size: 3245 +ret: 0 st: 0 flags:1 dts: 0.480000 pts: 0.480000 pos: 16134 size: 3244 ret:-1 st:-1 flags:1 ts:-0.645825 +diff --git a/tests/ref/vsynth/vsynth1-jpeg2000-97 b/tests/ref/vsynth/vsynth1-jpeg2000-97 +index c979ab5c36..6ab5aa4237 100644 +--- a/tests/ref/vsynth/vsynth1-jpeg2000-97 ++++ b/tests/ref/vsynth/vsynth1-jpeg2000-97 +@@ -1,4 +1,4 @@ +-5e6d32b7205d31245b0d1f015d08b515 *tests/data/fate/vsynth1-jpeg2000-97.avi +-3643886 tests/data/fate/vsynth1-jpeg2000-97.avi ++e4d03b2e3c03e56c7f831b1e662c4031 *tests/data/fate/vsynth1-jpeg2000-97.avi ++3643928 tests/data/fate/vsynth1-jpeg2000-97.avi + a2262f1da2f49bc196b780a6b47ec4e8 *tests/data/fate/vsynth1-jpeg2000-97.out.rawvideo + stddev: 4.23 PSNR: 35.59 MAXDIFF: 53 bytes: 7603200/ 7603200 diff --git a/tests/ref/vsynth/vsynth1-snow b/tests/ref/vsynth/vsynth1-snow index b0e3a0bfd7..f20abd2ee4 100644 --- a/tests/ref/vsynth/vsynth1-snow @@ -78262,6 +77676,28 @@ index 72b082b2ce..39780ad8a2 100644 +138446 tests/data/fate/vsynth1-snow-hpel.avi +57c914cd150f8fc260b5989ce3e5884c *tests/data/fate/vsynth1-snow-hpel.out.rawvideo +stddev: 22.74 PSNR: 20.99 MAXDIFF: 172 bytes: 7603200/ 7603200 +diff --git a/tests/ref/vsynth/vsynth2-jpeg2000 b/tests/ref/vsynth/vsynth2-jpeg2000 +index b60307d5da..d0df0099ea 100644 +--- a/tests/ref/vsynth/vsynth2-jpeg2000 ++++ b/tests/ref/vsynth/vsynth2-jpeg2000 +@@ -1,4 +1,4 @@ +-bfe90391779a02319aab98b06dd18e6c *tests/data/fate/vsynth2-jpeg2000.avi +-1538724 tests/data/fate/vsynth2-jpeg2000.avi ++8c8a68ca748190c71b3ea43e5ab7f502 *tests/data/fate/vsynth2-jpeg2000.avi ++1538736 tests/data/fate/vsynth2-jpeg2000.avi + 64fadc87447268cf90503cb294db7f61 *tests/data/fate/vsynth2-jpeg2000.out.rawvideo + stddev: 4.91 PSNR: 34.29 MAXDIFF: 55 bytes: 7603200/ 7603200 +diff --git a/tests/ref/vsynth/vsynth2-jpeg2000-97 b/tests/ref/vsynth/vsynth2-jpeg2000-97 +index 591f8b6bb3..33c1fb2425 100644 +--- a/tests/ref/vsynth/vsynth2-jpeg2000-97 ++++ b/tests/ref/vsynth/vsynth2-jpeg2000-97 +@@ -1,4 +1,4 @@ +-aa5573136c54b1855d8d00efe2a149bd *tests/data/fate/vsynth2-jpeg2000-97.avi +-2464134 tests/data/fate/vsynth2-jpeg2000-97.avi ++c8f76055f59804ca72dbd66eb4db83a2 *tests/data/fate/vsynth2-jpeg2000-97.avi ++2464138 tests/data/fate/vsynth2-jpeg2000-97.avi + 1f63c8b065e847e4c63d57ce23442ea8 *tests/data/fate/vsynth2-jpeg2000-97.out.rawvideo + stddev: 3.21 PSNR: 37.99 MAXDIFF: 26 bytes: 7603200/ 7603200 diff --git a/tests/ref/vsynth/vsynth2-snow b/tests/ref/vsynth/vsynth2-snow index 355f89d5f4..e9607bb7d0 100644 --- a/tests/ref/vsynth/vsynth2-snow @@ -78288,6 +77724,50 @@ index ec3b5dfad2..66839fd6f6 100644 +79728 tests/data/fate/vsynth2-snow-hpel.avi +2cc64d8171175a1532fd7d3ed3011fbf *tests/data/fate/vsynth2-snow-hpel.out.rawvideo +stddev: 13.70 PSNR: 25.39 MAXDIFF: 162 bytes: 7603200/ 7603200 +diff --git a/tests/ref/vsynth/vsynth3-jpeg2000 b/tests/ref/vsynth/vsynth3-jpeg2000 +index 894dba27dc..ecc286b9a4 100644 +--- a/tests/ref/vsynth/vsynth3-jpeg2000 ++++ b/tests/ref/vsynth/vsynth3-jpeg2000 +@@ -1,4 +1,4 @@ +-1d039969504abdc143b410f99b5f9171 *tests/data/fate/vsynth3-jpeg2000.avi +-67354 tests/data/fate/vsynth3-jpeg2000.avi ++776bf3234cbf25002f129b89baab42ea *tests/data/fate/vsynth3-jpeg2000.avi ++67400 tests/data/fate/vsynth3-jpeg2000.avi + 098f5980667e1fcd50452b1dc1a74f61 *tests/data/fate/vsynth3-jpeg2000.out.rawvideo + stddev: 5.47 PSNR: 33.36 MAXDIFF: 48 bytes: 86700/ 86700 +diff --git a/tests/ref/vsynth/vsynth3-jpeg2000-97 b/tests/ref/vsynth/vsynth3-jpeg2000-97 +index 5d9d083791..df10f43270 100644 +--- a/tests/ref/vsynth/vsynth3-jpeg2000-97 ++++ b/tests/ref/vsynth/vsynth3-jpeg2000-97 +@@ -1,4 +1,4 @@ +-522e12684aca4262a9d613cb2db7006c *tests/data/fate/vsynth3-jpeg2000-97.avi +-85526 tests/data/fate/vsynth3-jpeg2000-97.avi ++cd023db503f03ef72dd83e4617a90c7b *tests/data/fate/vsynth3-jpeg2000-97.avi ++85606 tests/data/fate/vsynth3-jpeg2000-97.avi + 8def36ad1413ab3a5c2af2e1af4603f9 *tests/data/fate/vsynth3-jpeg2000-97.out.rawvideo + stddev: 4.51 PSNR: 35.04 MAXDIFF: 47 bytes: 86700/ 86700 +diff --git a/tests/ref/vsynth/vsynth_lena-jpeg2000 b/tests/ref/vsynth/vsynth_lena-jpeg2000 +index e2cbc899d3..88629add21 100644 +--- a/tests/ref/vsynth/vsynth_lena-jpeg2000 ++++ b/tests/ref/vsynth/vsynth_lena-jpeg2000 +@@ -1,4 +1,4 @@ +-51f061731d7fb987ff4e71789785225e *tests/data/fate/vsynth_lena-jpeg2000.avi +-1188882 tests/data/fate/vsynth_lena-jpeg2000.avi ++b8aaa45236f77a2a626791d462fd8ac1 *tests/data/fate/vsynth_lena-jpeg2000.avi ++1188886 tests/data/fate/vsynth_lena-jpeg2000.avi + 39a2c5b61cd0cf2821c6fb4cceba2fa8 *tests/data/fate/vsynth_lena-jpeg2000.out.rawvideo + stddev: 4.30 PSNR: 35.45 MAXDIFF: 45 bytes: 7603200/ 7603200 +diff --git a/tests/ref/vsynth/vsynth_lena-jpeg2000-97 b/tests/ref/vsynth/vsynth_lena-jpeg2000-97 +index 0539300185..b6f5f75f77 100644 +--- a/tests/ref/vsynth/vsynth_lena-jpeg2000-97 ++++ b/tests/ref/vsynth/vsynth_lena-jpeg2000-97 +@@ -1,4 +1,4 @@ +-80fe872c8afaad914da6ef037957d93b *tests/data/fate/vsynth_lena-jpeg2000-97.avi +-1937216 tests/data/fate/vsynth_lena-jpeg2000-97.avi ++b2d9525433c6300674f504922d762437 *tests/data/fate/vsynth_lena-jpeg2000-97.avi ++1937232 tests/data/fate/vsynth_lena-jpeg2000-97.avi + 1b97333a8dc115a5ba609b0070d89d4d *tests/data/fate/vsynth_lena-jpeg2000-97.out.rawvideo + stddev: 2.82 PSNR: 39.10 MAXDIFF: 24 bytes: 7603200/ 7603200 diff --git a/tests/ref/vsynth/vsynth_lena-snow b/tests/ref/vsynth/vsynth_lena-snow index 582c294531..ec29a78483 100644 --- a/tests/ref/vsynth/vsynth_lena-snow @@ -78314,15 +77794,48 @@ index 67effebc8a..2d6edd8a79 100644 +61764 tests/data/fate/vsynth_lena-snow-hpel.avi +244b0266127fa354d8485234b2c388e4 *tests/data/fate/vsynth_lena-snow-hpel.out.rawvideo +stddev: 10.45 PSNR: 27.74 MAXDIFF: 119 bytes: 7603200/ 7603200 +diff --git a/tools/coverity.c b/tools/coverity.c +index 541e108238..19a132a976 100644 +--- a/tools/coverity.c ++++ b/tools/coverity.c +@@ -31,17 +31,6 @@ + + #define NULL (void *)0 + +-typedef long long int64_t; +- +-enum AVRounding { +- AV_ROUND_ZERO = 0, +- AV_ROUND_INF = 1, +- AV_ROUND_DOWN = 2, +- AV_ROUND_UP = 3, +- AV_ROUND_NEAR_INF = 5, +- AV_ROUND_PASS_MINMAX = 8192, +-}; +- + // Based on https://scan.coverity.com/models + void *av_malloc(size_t size) { + int has_memory; +@@ -88,10 +77,3 @@ void *av_free(void *ptr) { + __coverity_mark_as_afm_freed__(ptr, "av_free"); + } + +- +-int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd) { +- __coverity_negative_sink__(b); +- __coverity_negative_sink__(c); +- +- return (double)a * (double)b / (double)c; +-} diff --git a/tools/target_dec_fuzzer.c b/tools/target_dec_fuzzer.c -index 825ca2d7eb..9e15216e59 100644 +index 43449ffb0e..825ca2d7eb 100644 --- a/tools/target_dec_fuzzer.c +++ b/tools/target_dec_fuzzer.c -@@ -172,7 +172,6 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { - case AV_CODEC_ID_INTERPLAY_ACM: maxsamples /= 16384; break; - case AV_CODEC_ID_LAGARITH: maxpixels /= 1024; break; - case AV_CODEC_ID_LSCR: maxpixels /= 16; break; -- case AV_CODEC_ID_MMVIDEO: maxpixels /= 256; break; - case AV_CODEC_ID_MOTIONPIXELS:maxpixels /= 256; break; - case AV_CODEC_ID_MP4ALS: maxsamples /= 65536; break; - case AV_CODEC_ID_MSA1: maxpixels /= 16384; break; +@@ -184,7 +184,6 @@ int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { + case AV_CODEC_ID_APNG: maxpixels /= 128; break; + case AV_CODEC_ID_QTRLE: maxpixels /= 16; break; + case AV_CODEC_ID_RASC: maxpixels /= 16; break; +- case AV_CODEC_ID_RTV1: maxpixels /= 16; break; + case AV_CODEC_ID_SANM: maxpixels /= 16; break; + case AV_CODEC_ID_SCPR: maxpixels /= 32; break; + case AV_CODEC_ID_SCREENPRESSO:maxpixels /= 64; break; diff --git a/alarm/ffmpeg-rpi/PKGBUILD b/alarm/ffmpeg-rpi/PKGBUILD index 7b1f4ddf8..111502dde 100644 --- a/alarm/ffmpeg-rpi/PKGBUILD +++ b/alarm/ffmpeg-rpi/PKGBUILD @@ -6,8 +6,8 @@ pkgbase=ffmpeg-rpi pkgname=($pkgbase $pkgbase-bin) -pkgver=4.4.4 -pkgrel=6 +pkgver=4.4.5 +pkgrel=1 arch=(aarch64) url=https://ffmpeg.org/ license=(GPL3) @@ -77,13 +77,13 @@ depends=( options=(debug) source=(https://ffmpeg.org/releases/${pkgname/-rpi}-$pkgver.tar.xz{,.asc} 0001-vmaf-model-path.patch - 0002-ffmpeg-4.4.4n-rpi.patch + 0002-ffmpeg-4.4.5n-rpi.patch 0003-fix_flags.patch ) -sha256sums=('e80b380d595c809060f66f96a5d849511ef4a76a26b76eacf5778b94c3570309' +sha256sums=('f9514e0d3515aee5a271283df71636e1d1ff7274b15853bcd84e144be416ab07' 'SKIP' '2e8d885de789b461ddf63c10646cdb16ad5519b671efd1624bf5a8e7da43dbf3' - 'c3db95417fbfdd9e7a96d63cb2a91ad1eee17ae233c0ef1cf1588f8c0eff90fa' + 'db745dfb5a4b857bd080ffdf0597929a9081112c61e61a6677b0b85cd314f425' '42f57e7a55f250811515571c870372d6ed0ed504f823b341d26f383c082ce0a0') validpgpkeys=('FCF986EA15E6E293A5644F10B4322F04D67658D8')