diff --git a/YUViewLib/externalLibs/LibFFmpeg++/wrappers/AVPacketWrapper.cpp b/YUViewLib/externalLibs/LibFFmpeg++/wrappers/AVPacketWrapper.cpp index 658fe92ca..13a8396f0 100644 --- a/YUViewLib/externalLibs/LibFFmpeg++/wrappers/AVPacketWrapper.cpp +++ b/YUViewLib/externalLibs/LibFFmpeg++/wrappers/AVPacketWrapper.cpp @@ -31,104 +31,10 @@ */ #include "AVPacketWrapper.h" -#include -#include namespace LibFFmpeg { -namespace -{ - -bool checkForRawNALFormat(QByteArray &data, bool threeByteStartCode) -{ - if (threeByteStartCode && data.length() > 3 && data.at(0) == (char)0 && data.at(1) == (char)0 && - data.at(2) == (char)1) - return true; - if (!threeByteStartCode && data.length() > 4 && data.at(0) == (char)0 && data.at(1) == (char)0 && - data.at(2) == (char)0 && data.at(3) == (char)1) - return true; - return false; -} - -bool checkForMp4Format(QByteArray &data) -{ - // Check the ISO mp4 format: Parse the whole data and check if the size bytes per Unit are - // correct. - uint64_t posInData = 0; - while (posInData + 4 <= uint64_t(data.length())) - { - auto firstBytes = data.mid(posInData, 4); - - unsigned size = (unsigned char)firstBytes.at(3); - size += (unsigned char)firstBytes.at(2) << 8; - size += (unsigned char)firstBytes.at(1) << 16; - size += (unsigned char)firstBytes.at(0) << 24; - posInData += 4; - - if (size > 1'000'000'000) - // A Nal with more then 1GB? This is probably an error. - return false; - if (posInData + size > uint64_t(data.length())) - // Not enough data in the input array to read NAL unit. - return false; - posInData += size; - } - return true; -} - -bool checkForObuFormat(QByteArray &data) -{ - // TODO: We already have an implementation of this in the parser - // That should also be used here so we only have one place where we parse OBUs. - try - { - size_t posInData = 0; - while (posInData + 2 <= size_t(data.length())) - { - parser::reader::SubByteReaderLogging reader( - parser::reader::SubByteReaderLogging::convertToByteVector(data), nullptr, "", posInData); - - QString bitsRead; - auto forbiddenBit = reader.readFlag("obu_forbidden_bit"); - if (forbiddenBit) - return false; - auto obu_type = reader.readBits("obu_type", 4); - if (obu_type == 0 || (obu_type >= 9 && obu_type <= 14)) - // RESERVED obu types should not occur (highly unlikely) - return false; - auto obu_extension_flag = reader.readFlag("obu_extension_flag"); - auto obu_has_size_field = reader.readFlag("obu_has_size_field"); - reader.readFlag("obu_reserved_1bit", parser::reader::Options().withCheckEqualTo(1)); - - if (obu_extension_flag) - { - reader.readBits("temporal_id", 3); - reader.readBits("spatial_id", 2); - reader.readBits( - "extension_header_reserved_3bits", 3, parser::reader::Options().withCheckEqualTo(0)); - } - size_t obu_size; - if (obu_has_size_field) - { - obu_size = reader.readLEB128("obu_size"); - } - else - { - obu_size = (size_t(data.size()) - posInData) - 1 - (obu_extension_flag ? 1 : 0); - } - posInData += obu_size + reader.nrBytesRead(); - } - } - catch (...) - { - return false; - } - return true; -} - -} // namespace - AVPacketWrapper::AVPacketWrapper(AVPacket *packet, const LibraryVersions &libraryVersions) { assert(packet != nullptr); @@ -317,46 +223,6 @@ int AVPacketWrapper::getDataSize() return this->size; } -PacketType AVPacketWrapper::getPacketType() const -{ - return this->packetType; -} - -void AVPacketWrapper::setPacketType(PacketType packetType) -{ - this->packetType = packetType; -} - -PacketDataFormat AVPacketWrapper::guessDataFormatFromData() -{ - if (this->packetFormat != PacketDataFormat::Unknown) - return this->packetFormat; - - auto avpacketData = QByteArray::fromRawData((const char *)(getData()), getDataSize()); - if (avpacketData.length() < 4) - { - this->packetFormat = PacketDataFormat::Unknown; - return this->packetFormat; - } - - // AVPacket data can be in one of two formats: - // 1: The raw annexB format with start codes (0x00000001 or 0x000001) - // 2: ISO/IEC 14496-15 mp4 format: The first 4 bytes determine the size of the NAL unit followed - // by the payload We will try to guess the format of the data from the data in this AVPacket. This - // should always work unless a format is used which we did not encounter so far (which is not - // listed above) Also I think this should be identical for all packets in a bitstream. - if (checkForRawNALFormat(avpacketData, false)) - this->packetFormat = PacketDataFormat::RawNAL; - else if (checkForMp4Format(avpacketData)) - this->packetFormat = PacketDataFormat::MP4; - else if (checkForObuFormat(avpacketData)) - this->packetFormat = PacketDataFormat::OBU; - else if (checkForRawNALFormat(avpacketData, true)) - this->packetFormat = PacketDataFormat::RawNAL; - - return this->packetFormat; -} - void AVPacketWrapper::update() { if (this->pkt == nullptr) diff --git a/YUViewLib/externalLibs/LibFFmpeg++/wrappers/AVPacketWrapper.h b/YUViewLib/externalLibs/LibFFmpeg++/wrappers/AVPacketWrapper.h index 15f1b4bdb..904dbe7c1 100644 --- a/YUViewLib/externalLibs/LibFFmpeg++/wrappers/AVPacketWrapper.h +++ b/YUViewLib/externalLibs/LibFFmpeg++/wrappers/AVPacketWrapper.h @@ -37,27 +37,6 @@ namespace LibFFmpeg { -// AVPacket data can be in one of two formats: -// 1: The raw annexB format with start codes (0x00000001 or 0x000001) -// 2: ISO/IEC 14496-15 mp4 format: The first 4 bytes determine the size of the NAL unit followed by -// the payload -enum class PacketDataFormat -{ - Unknown, - RawNAL, - MP4, - OBU -}; - -enum class PacketType -{ - VIDEO, - AUDIO, - SUBTITLE_DVB, - SUBTITLE_608, - OTHER -}; - // AVPacket is part of avcodec. The definition is different for different major versions of avcodec. // These are the version independent functions to retrive data from AVPacket. // The size of this struct is part of the public API and must be correct @@ -139,13 +118,6 @@ class AVPacketWrapper uint8_t * getData(); int getDataSize(); - // This info is set externally (in FileSourceFFmpegFile) based on the stream info - PacketType getPacketType() const; - void setPacketType(PacketType packetType); - - // Guess the format. The actual guessing is only performed if the packetFormat is not set yet. - PacketDataFormat guessDataFormatFromData(); - explicit operator bool() const { return this->pkt != nullptr; }; private: @@ -164,11 +136,8 @@ class AVPacketWrapper int64_t duration{}; int64_t pos{}; - PacketType packetType{}; - - AVPacket * pkt{}; - LibraryVersions libraryVersions{}; - PacketDataFormat packetFormat{PacketDataFormat::Unknown}; + AVPacket * pkt{}; + LibraryVersions libraryVersions{}; }; -} // namespace LibFFmpeg \ No newline at end of file +} // namespace LibFFmpeg diff --git a/YUViewLib/externalLibs/LibFFmpeg++/wrappers/AVPixFmtDescriptorWrapper.cpp b/YUViewLib/externalLibs/LibFFmpeg++/wrappers/AVPixFmtDescriptorWrapper.cpp index 89afdb2d5..ed6de2be3 100644 --- a/YUViewLib/externalLibs/LibFFmpeg++/wrappers/AVPixFmtDescriptorWrapper.cpp +++ b/YUViewLib/externalLibs/LibFFmpeg++/wrappers/AVPixFmtDescriptorWrapper.cpp @@ -32,10 +32,6 @@ #include "AVPixFmtDescriptorWrapper.h" -using Subsampling = video::yuv::Subsampling; -using PlaneOrder = video::yuv::PlaneOrder; -using PixelFormatYUV = video::yuv::PixelFormatYUV; - using namespace std::rel_ops; namespace LibFFmpeg @@ -44,16 +40,16 @@ namespace LibFFmpeg namespace { -typedef struct AVComponentDescriptor_54 +struct AVComponentDescriptor_54 { uint16_t plane : 2; uint16_t step_minus1 : 3; uint16_t offset_plus1 : 3; uint16_t shift : 3; uint16_t depth_minus1 : 4; -} AVComponentDescriptor_54; +}; -typedef struct AVPixFmtDescriptor_54 +struct AVPixFmtDescriptor_54 { const char * name; uint8_t nb_components; @@ -62,9 +58,9 @@ typedef struct AVPixFmtDescriptor_54 uint8_t flags; AVComponentDescriptor_54 comp[4]; const char * alias; -} AVPixFmtDescriptor_54; +}; -typedef struct AVComponentDescriptor_55_56 +struct AVComponentDescriptor_55_56 { int plane; int step; @@ -76,9 +72,9 @@ typedef struct AVComponentDescriptor_55_56 int step_minus1; int depth_minus1; int offset_plus1; -} AVComponentDescriptor_55_56; +}; -typedef struct AVPixFmtDescriptor_55 +struct AVPixFmtDescriptor_55 { const char * name; uint8_t nb_components; @@ -87,9 +83,9 @@ typedef struct AVPixFmtDescriptor_55 uint64_t flags; AVComponentDescriptor_55_56 comp[4]; const char * alias; -} AVPixFmtDescriptor_55; +}; -typedef struct AVPixFmtDescriptor_56 +struct AVPixFmtDescriptor_56 { const char * name; uint8_t nb_components; @@ -98,18 +94,18 @@ typedef struct AVPixFmtDescriptor_56 uint64_t flags; AVComponentDescriptor_55_56 comp[4]; const char * alias; -} AVPixFmtDescriptor_56; +}; -typedef struct AVComponentDescriptor_57 +struct AVComponentDescriptor_57 { int plane; int step; int offset; int shift; int depth; -} AVComponentDescriptor_57; +}; -typedef struct AVPixFmtDescriptor_57_58 +struct AVPixFmtDescriptor_57_58 { const char * name; uint8_t nb_components; @@ -118,9 +114,9 @@ typedef struct AVPixFmtDescriptor_57_58 uint64_t flags; AVComponentDescriptor_57 comp[4]; const char * alias; -} AVPixFmtDescriptor_57_58; +}; -AVPixFmtDescriptorWrapper::Flags parseFlags(uint8_t flagsValue) +AVPixFmtDescriptorWrapper::Flags parseFlags(const uint8_t flagsValue) { AVPixFmtDescriptorWrapper::Flags flags; flags.bigEndian = flagsValue & (1 << 0); @@ -131,25 +127,23 @@ AVPixFmtDescriptorWrapper::Flags parseFlags(uint8_t flagsValue) flags.rgb = flagsValue & (1 << 5); flags.pseudoPallette = flagsValue & (1 << 6); flags.hasAlphaPlane = flagsValue & (1 << 7); - flags.bayerPattern = flagsValue & (1 << 8); - flags.floatValues = flagsValue & (1 << 9); return flags; } -bool flagsSupported(const AVPixFmtDescriptorWrapper::Flags &flags) +AVPixFmtDescriptorWrapper::Flags parseFlags(const uint64_t flagsValue) { - // We don't support any of these - if (flags.pallette) - return false; - if (flags.hwAccelerated) - return false; - if (flags.pseudoPallette) - return false; - if (flags.bayerPattern) - return false; - if (flags.floatValues) - return false; - return true; + AVPixFmtDescriptorWrapper::Flags flags; + flags.bigEndian = flagsValue & (1 << 0); + flags.pallette = flagsValue & (1 << 1); + flags.bitwisePacked = flagsValue & (1 << 2); + flags.hwAccelerated = flagsValue & (1 << 3); + flags.planar = flagsValue & (1 << 4); + flags.rgb = flagsValue & (1 << 5); + flags.pseudoPallette = flagsValue & (1 << 6); + flags.hasAlphaPlane = flagsValue & (1 << 7); + flags.bayerPattern = flagsValue & (1 << 8); + flags.floatValues = flagsValue & (1 << 9); + return flags; } } // namespace @@ -240,134 +234,6 @@ AVPixFmtDescriptorWrapper::AVPixFmtDescriptorWrapper(AVPixFmtDescriptor * desc } } -video::RawFormat AVPixFmtDescriptorWrapper::getRawFormat() const -{ - return this->flags.rgb ? video::RawFormat::RGB : video::RawFormat::YUV; -} - -PixelFormatYUV AVPixFmtDescriptorWrapper::getPixelFormatYUV() const -{ - if (this->getRawFormat() == video::RawFormat::RGB || !flagsSupported(this->flags)) - return {}; - - Subsampling subsampling; - if (this->nb_components == 1) - subsampling = Subsampling::YUV_400; - else if (this->log2_chroma_w == 0 && this->log2_chroma_h == 0) - subsampling = Subsampling::YUV_444; - else if (this->log2_chroma_w == 1 && this->log2_chroma_h == 0) - subsampling = Subsampling::YUV_422; - else if (this->log2_chroma_w == 1 && this->log2_chroma_h == 1) - subsampling = Subsampling::YUV_420; - else if (this->log2_chroma_w == 0 && this->log2_chroma_h == 1) - subsampling = Subsampling::YUV_440; - else if (this->log2_chroma_w == 2 && this->log2_chroma_h == 2) - subsampling = Subsampling::YUV_410; - else if (this->log2_chroma_w == 0 && this->log2_chroma_h == 2) - subsampling = Subsampling::YUV_411; - else - return {}; - - PlaneOrder planeOrder; - if (this->nb_components == 1) - planeOrder = PlaneOrder::YUV; - else if (this->nb_components == 3 && !this->flags.hasAlphaPlane) - planeOrder = PlaneOrder::YUV; - else if (this->nb_components == 4 && this->flags.hasAlphaPlane) - planeOrder = PlaneOrder::YUVA; - else - return {}; - - int bitsPerSample = comp[0].depth; - for (int i = 1; i < this->nb_components; i++) - if (comp[i].depth != bitsPerSample) - // Varying bit depths for components is not supported - return {}; - - if (this->flags.bitwisePacked || !this->flags.planar) - // Maybe this could be supported but I don't think that any decoder actually uses this. - // If you encounter a format that does not work because of this check please let us know. - return {}; - - return PixelFormatYUV(subsampling, bitsPerSample, planeOrder, this->flags.bigEndian); -} - -video::rgb::PixelFormatRGB AVPixFmtDescriptorWrapper::getRGBPixelFormat() const -{ - if (this->getRawFormat() == video::RawFormat::YUV || !flagsSupported(this->flags)) - return {}; - - auto bitsPerSample = comp[0].depth; - for (int i = 1; i < nb_components; i++) - if (comp[i].depth != bitsPerSample) - // Varying bit depths for components is not supported - return {}; - - if (this->flags.bitwisePacked) - // Maybe this could be supported but I don't think that any decoder actually uses this. - // If you encounter a format that does not work because of this check please let us know. - return {}; - - // The only possible order of planes seems to be RGB(A) - auto dataLayout = this->flags.planar ? video::DataLayout::Planar : video::DataLayout::Packed; - auto alphaMode = - this->flags.hasAlphaPlane ? video::rgb::AlphaMode::Last : video::rgb::AlphaMode::None; - auto endianness = this->flags.bigEndian ? video::Endianness::Big : video::Endianness::Little; - - return video::rgb::PixelFormatRGB( - bitsPerSample, dataLayout, video::rgb::ChannelOrder::RGB, alphaMode, endianness); -} - -bool AVPixFmtDescriptorWrapper::setValuesFromPixelFormatYUV(PixelFormatYUV fmt) -{ - const auto planeOrder = fmt.getPlaneOrder(); - if (planeOrder == PlaneOrder::YVU || planeOrder == PlaneOrder::YVUA) - return false; - - const auto subsampling = fmt.getSubsampling(); - switch (subsampling) - { - case Subsampling::YUV_422: - this->log2_chroma_w = 1; - this->log2_chroma_h = 0; - break; - case Subsampling::YUV_420: - this->log2_chroma_w = 1; - this->log2_chroma_h = 1; - break; - case Subsampling::YUV_440: - this->log2_chroma_w = 0; - this->log2_chroma_h = 1; - break; - case Subsampling::YUV_410: - this->log2_chroma_w = 2; - this->log2_chroma_h = 2; - break; - case Subsampling::YUV_411: - this->log2_chroma_w = 0; - this->log2_chroma_h = 2; - break; - default: - break; - } - - this->nb_components = (subsampling == Subsampling::YUV_400 ? 1 : 3); - - this->flags.bigEndian = fmt.isBigEndian(); - this->flags.planar = fmt.isPlanar(); - this->flags.hasAlphaPlane = (planeOrder == PlaneOrder::YUVA); - - for (int i = 0; i < this->nb_components; i++) - { - this->comp[i].plane = i; - this->comp[i].step = (fmt.getBitsPerSample() > 8) ? 2 : 1; - this->comp[i].offset = 0; - this->comp[i].shift = 0; - this->comp[i].depth = fmt.getBitsPerSample(); - } - return true; -} - bool AVPixFmtDescriptorWrapper::Flags::operator==( const AVPixFmtDescriptorWrapper::Flags &other) const { diff --git a/YUViewLib/externalLibs/LibFFmpeg++/wrappers/AVPixFmtDescriptorWrapper.h b/YUViewLib/externalLibs/LibFFmpeg++/wrappers/AVPixFmtDescriptorWrapper.h index 6acbd3037..1846a1cf1 100644 --- a/YUViewLib/externalLibs/LibFFmpeg++/wrappers/AVPixFmtDescriptorWrapper.h +++ b/YUViewLib/externalLibs/LibFFmpeg++/wrappers/AVPixFmtDescriptorWrapper.h @@ -34,9 +34,6 @@ #include -#include