From 98fe307cbfe653f1551358be1f2055dd01e77e07 Mon Sep 17 00:00:00 2001 From: John Preston Date: Tue, 5 Jul 2016 20:43:30 +0300 Subject: [PATCH] Packet queue instead of single packet is used in ffmpeg clip reader. --- .../SourceFiles/media/media_clip_ffmpeg.cpp | 175 +++++++++++------- .../SourceFiles/media/media_clip_ffmpeg.h | 22 ++- .../media/media_clip_implementation.h | 7 +- .../SourceFiles/media/media_clip_qtgif.cpp | 4 +- Telegram/SourceFiles/media/media_clip_qtgif.h | 2 +- .../SourceFiles/media/media_clip_reader.cpp | 36 ++-- .../SourceFiles/media/media_clip_reader.h | 4 + Telegram/SourceFiles/mediaview.cpp | 3 +- 8 files changed, 163 insertions(+), 90 deletions(-) diff --git a/Telegram/SourceFiles/media/media_clip_ffmpeg.cpp b/Telegram/SourceFiles/media/media_clip_ffmpeg.cpp index d10568f704..f509310d8e 100644 --- a/Telegram/SourceFiles/media/media_clip_ffmpeg.cpp +++ b/Telegram/SourceFiles/media/media_clip_ffmpeg.cpp @@ -27,9 +27,9 @@ namespace internal { FFMpegReaderImplementation::FFMpegReaderImplementation(FileLocation *location, QByteArray *data) : ReaderImplementation(location, data) { _frame = av_frame_alloc(); - av_init_packet(&_avpkt); - _avpkt.data = NULL; - _avpkt.size = 0; + av_init_packet(&_packetNull); + _packetNull.data = nullptr; + _packetNull.size = 0; } bool FFMpegReaderImplementation::readNextFrame() { @@ -38,57 +38,50 @@ bool FFMpegReaderImplementation::readNextFrame() { _frameRead = false; } - int res; while (true) { - if (_avpkt.size > 0) { // previous packet not finished - res = 0; - } else if ((res = av_read_frame(_fmtContext, &_avpkt)) < 0) { - if (res != AVERROR_EOF || !_hadFrame) { - char err[AV_ERROR_MAX_STRING_SIZE] = { 0 }; - LOG(("Gif Error: Unable to av_read_frame() %1, error %2, %3").arg(logData()).arg(res).arg(av_make_error_string(err, sizeof(err), res))); + while (_packetQueue.isEmpty()) { + auto packetResult = readPacket(); + if (packetResult == PacketResult::Error) { + return false; + } else if (packetResult == PacketResult::EndOfFile) { + break; + } + } + bool eofReached = _packetQueue.isEmpty(); + + startPacket(); + + int got_frame = 0; + int decoded = 0; + auto packet = &_packetNull; + if (!_packetQueue.isEmpty()) { + packet = &_packetQueue.head(); + decoded = packet->size; + } + + int res = 0; + if ((res = avcodec_decode_video2(_codecContext, _frame, &got_frame, packet)) < 0) { + char err[AV_ERROR_MAX_STRING_SIZE] = { 0 }; + LOG(("Gif Error: Unable to avcodec_decode_video2() %1, error %2, %3").arg(logData()).arg(res).arg(av_make_error_string(err, sizeof(err), res))); + + if (res == AVERROR_INVALIDDATA) { // try to skip bad packet + finishPacket(); + continue; + } + + eofReached = (res == AVERROR_EOF); + if (!eofReached || !_hadFrame) { // try to skip end of file return false; } } + if (res > 0) decoded = res; - bool finished = (res < 0); - if (finished) { - _avpkt.data = NULL; - _avpkt.size = 0; - } else { - rememberPacket(); - } - - int32 got_frame = 0; - int32 decoded = _avpkt.size; - if (_avpkt.stream_index == _streamId) { - if ((res = avcodec_decode_video2(_codecContext, _frame, &got_frame, &_avpkt)) < 0) { - char err[AV_ERROR_MAX_STRING_SIZE] = { 0 }; - LOG(("Gif Error: Unable to avcodec_decode_video2() %1, error %2, %3").arg(logData()).arg(res).arg(av_make_error_string(err, sizeof(err), res))); - - if (res == AVERROR_INVALIDDATA) { // try to skip bad packet - freePacket(); - _avpkt.data = NULL; - _avpkt.size = 0; - continue; - } - - if (res != AVERROR_EOF || !_hadFrame) { // try to skip end of file - return false; - } - freePacket(); - _avpkt.data = NULL; - _avpkt.size = 0; - continue; + if (!_packetQueue.isEmpty()) { + packet->data += decoded; + packet->size -= decoded; + if (packet->size <= 0) { + finishPacket(); } - if (res > 0) decoded = res; - } else if (_audioStreamId >= 0 && _avpkt.stream_index == _audioStreamId) { - freePacket(); - continue; - } - if (!finished) { - _avpkt.data += decoded; - _avpkt.size -= decoded; - if (_avpkt.size <= 0) freePacket(); } if (got_frame) { @@ -110,7 +103,8 @@ bool FFMpegReaderImplementation::readNextFrame() { return true; } - if (finished) { + if (eofReached) { + clearPacketQueue(); if ((res = avformat_seek_file(_fmtContext, _streamId, std::numeric_limits::min(), 0, std::numeric_limits::max(), 0)) < 0) { if ((res = av_seek_frame(_fmtContext, _streamId, 0, AVSEEK_FLAG_BYTE)) < 0) { if ((res = av_seek_frame(_fmtContext, _streamId, 0, AVSEEK_FLAG_FRAME)) < 0) { @@ -176,7 +170,7 @@ int FFMpegReaderImplementation::nextFrameDelay() { return _currentFrameDelay; } -bool FFMpegReaderImplementation::start(bool onlyGifv) { +bool FFMpegReaderImplementation::start(Mode mode) { initDevice(); if (!_device->open(QIODevice::ReadOnly)) { LOG(("Gif Error: Unable to open device %1").arg(logData())); @@ -211,13 +205,14 @@ bool FFMpegReaderImplementation::start(bool onlyGifv) { LOG(("Gif Error: Unable to av_find_best_stream %1, error %2, %3").arg(logData()).arg(_streamId).arg(av_make_error_string(err, sizeof(err), _streamId))); return false; } + _packetNull.stream_index = _streamId; // Get a pointer to the codec context for the audio stream _codecContext = _fmtContext->streams[_streamId]->codec; _codec = avcodec_find_decoder(_codecContext->codec_id); _audioStreamId = av_find_best_stream(_fmtContext, AVMEDIA_TYPE_AUDIO, -1, -1, 0, 0); - if (onlyGifv) { + if (mode == Mode::OnlyGifv) { if (_audioStreamId >= 0) { // should be no audio stream return false; } @@ -227,6 +222,8 @@ bool FFMpegReaderImplementation::start(bool onlyGifv) { if (_codecContext->codec_id != AV_CODEC_ID_H264) { return false; } + } else if (mode == Mode::Silent) { + _audioStreamId = -1; } av_opt_set_int(_codecContext, "refcounted_frames", 1, 0); if ((res = avcodec_open2(_codecContext, _codec, 0)) < 0) { @@ -261,23 +258,75 @@ FFMpegReaderImplementation::~FFMpegReaderImplementation() { } if (_fmtContext) avformat_free_context(_fmtContext); av_frame_free(&_frame); - freePacket(); + + clearPacketQueue(); } -void FFMpegReaderImplementation::rememberPacket() { - if (!_packetWas) { - _packetSize = _avpkt.size; - _packetData = _avpkt.data; - _packetWas = true; +FFMpegReaderImplementation::PacketResult FFMpegReaderImplementation::readPacket() { + AVPacket packet; + av_init_packet(&packet); + packet.data = nullptr; + packet.size = 0; + + int res = 0; + if ((res = av_read_frame(_fmtContext, &packet)) < 0) { + if (res == AVERROR_EOF) { + return PacketResult::EndOfFile; + } + char err[AV_ERROR_MAX_STRING_SIZE] = { 0 }; + LOG(("Gif Error: Unable to av_read_frame() %1, error %2, %3").arg(logData()).arg(res).arg(av_make_error_string(err, sizeof(err), res))); + return PacketResult::Error; + } + + bool videoPacket = (packet.stream_index == _streamId); + bool audioPacket = (_audioStreamId >= 0 && packet.stream_index == _audioStreamId); + if (audioPacket || videoPacket) { + //AVPacket packetForQueue; + //av_init_packet(&packetForQueue); + //if ((res = av_packet_ref(&packetForQueue, &packet)) < 0) { + // char err[AV_ERROR_MAX_STRING_SIZE] = { 0 }; + // LOG(("Gif Error: Unable to av_packet_ref() %1, error %2, %3").arg(logData()).arg(res).arg(av_make_error_string(err, sizeof(err), res))); + // return PacketResult::Error; + //} + + if (videoPacket) { + _packetQueue.enqueue(packet); + //_packetQueue.enqueue(packetForQueue); + } else if (audioPacket) { + // queue packet to audio player + // audioPlayer()->enqueuePacket(packet, &isEnough) + //av_packet_unref(&packetForQueue); + av_packet_unref(&packet); + } + } else { + av_packet_unref(&packet); + } + //av_packet_unref(&packet); + return PacketResult::Ok; +} + +void FFMpegReaderImplementation::startPacket() { + if (!_packetStarted && !_packetQueue.isEmpty()) { + _packetStartedSize = _packetQueue.head().size; + _packetStartedData = _packetQueue.head().data; + _packetStarted = true; } } -void FFMpegReaderImplementation::freePacket() { - if (_packetWas) { - _avpkt.size = _packetSize; - _avpkt.data = _packetData; - _packetWas = false; - av_packet_unref(&_avpkt); +void FFMpegReaderImplementation::finishPacket() { + if (_packetStarted) { + _packetQueue.head().size = _packetStartedSize; + _packetQueue.head().data = _packetStartedData; + _packetStarted = false; + av_packet_unref(&_packetQueue.dequeue()); + } +} + +void FFMpegReaderImplementation::clearPacketQueue() { + finishPacket(); + auto packets = createAndSwap(_packetQueue); + for (auto &packet : packets) { + av_packet_unref(&packet); } } diff --git a/Telegram/SourceFiles/media/media_clip_ffmpeg.h b/Telegram/SourceFiles/media/media_clip_ffmpeg.h index 3850ebf494..78c5482825 100644 --- a/Telegram/SourceFiles/media/media_clip_ffmpeg.h +++ b/Telegram/SourceFiles/media/media_clip_ffmpeg.h @@ -41,7 +41,7 @@ public: bool readNextFrame() override; bool renderFrame(QImage &to, bool &hasAlpha, const QSize &size) override; int nextFrameDelay() override; - bool start(bool onlyGifv) override; + bool start(Mode mode) override; int duration() const; QString logData() const; @@ -49,8 +49,15 @@ public: ~FFMpegReaderImplementation(); private: - void rememberPacket(); - void freePacket(); + enum class PacketResult { + Ok, + EndOfFile, + Error, + }; + PacketResult readPacket(); + void startPacket(); + void finishPacket(); + void clearPacketQueue(); static int _read(void *opaque, uint8_t *buf, int buf_size); static int64_t _seek(void *opaque, int64_t offset, int whence); @@ -68,10 +75,11 @@ private: int _audioStreamId = 0; - AVPacket _avpkt; - int _packetSize = 0; - uint8_t *_packetData = nullptr; - bool _packetWas = false; + QQueue _packetQueue; + AVPacket _packetNull; // for final decoding + int _packetStartedSize = 0; + uint8_t *_packetStartedData = nullptr; + bool _packetStarted = false; int _width = 0; int _height = 0; diff --git a/Telegram/SourceFiles/media/media_clip_implementation.h b/Telegram/SourceFiles/media/media_clip_implementation.h index 30f34c4bc2..c54fc4b7a3 100644 --- a/Telegram/SourceFiles/media/media_clip_implementation.h +++ b/Telegram/SourceFiles/media/media_clip_implementation.h @@ -33,10 +33,15 @@ public: : _location(location) , _data(data) { } + enum class Mode { + OnlyGifv, + Silent, + Normal, + }; virtual bool readNextFrame() = 0; virtual bool renderFrame(QImage &to, bool &hasAlpha, const QSize &size) = 0; virtual int nextFrameDelay() = 0; - virtual bool start(bool onlyGifv) = 0; + virtual bool start(Mode mode) = 0; virtual ~ReaderImplementation() { } int64 dataSize() const { diff --git a/Telegram/SourceFiles/media/media_clip_qtgif.cpp b/Telegram/SourceFiles/media/media_clip_qtgif.cpp index 2ec76997c0..ff911c9724 100644 --- a/Telegram/SourceFiles/media/media_clip_qtgif.cpp +++ b/Telegram/SourceFiles/media/media_clip_qtgif.cpp @@ -70,8 +70,8 @@ int QtGifReaderImplementation::nextFrameDelay() { return _frameDelay; } -bool QtGifReaderImplementation::start(bool onlyGifv) { - if (onlyGifv) return false; +bool QtGifReaderImplementation::start(Mode mode) { + if (mode == Mode::OnlyGifv) return false; return jumpToStart(); } diff --git a/Telegram/SourceFiles/media/media_clip_qtgif.h b/Telegram/SourceFiles/media/media_clip_qtgif.h index 5e8efa0638..910d60e40a 100644 --- a/Telegram/SourceFiles/media/media_clip_qtgif.h +++ b/Telegram/SourceFiles/media/media_clip_qtgif.h @@ -34,7 +34,7 @@ public: bool readNextFrame() override; bool renderFrame(QImage &to, bool &hasAlpha, const QSize &size) override; int nextFrameDelay() override; - bool start(bool onlyGifv) override; + bool start(Mode mode) override; ~QtGifReaderImplementation(); diff --git a/Telegram/SourceFiles/media/media_clip_reader.cpp b/Telegram/SourceFiles/media/media_clip_reader.cpp index ca96eb4187..500275a5a5 100644 --- a/Telegram/SourceFiles/media/media_clip_reader.cpp +++ b/Telegram/SourceFiles/media/media_clip_reader.cpp @@ -287,10 +287,10 @@ Reader::~Reader() { class ReaderPrivate { public: - ReaderPrivate(Reader *reader, const FileLocation &location, const QByteArray &data) : _interface(reader) - , _data(data) - , _location(_data.isEmpty() ? new FileLocation(location) : 0) { + , _mode(reader->mode()) + , _data(data) + , _location(_data.isEmpty() ? new FileLocation(location) : 0) { if (_data.isEmpty() && !_location->accessEnable()) { error(); return; @@ -381,9 +381,17 @@ public: } } - _implementation = new internal::FFMpegReaderImplementation(_location, &_data); + _implementation = std_::make_unique(_location, &_data); // _implementation = new QtGifReaderImplementation(_location, &_data); - return _implementation->start(false); + + auto implementationMode = [this]() { + using ImplementationMode = internal::ReaderImplementation::Mode; + if (_mode == Reader::Mode::Gif) { + return ImplementationMode::Silent; + } + return ImplementationMode::Normal; + }; + return _implementation->start(implementationMode()); } ProcessResult error() { @@ -393,8 +401,7 @@ public: } void stop() { - delete _implementation; - _implementation = 0; + _implementation = nullptr; if (_location) { if (_accessed) { @@ -409,21 +416,20 @@ public: ~ReaderPrivate() { stop(); deleteAndMark(_location); - deleteAndMark(_implementation); _data.clear(); } private: - Reader *_interface; State _state = State::Reading; + Reader::Mode _mode; QByteArray _data; FileLocation *_location; bool _accessed = false; QBuffer _buffer; - internal::ReaderImplementation *_implementation = nullptr; + std_::unique_ptr _implementation; FrameRequest _request; struct Frame { @@ -474,7 +480,7 @@ void Manager::start(Reader *reader) { void Manager::update(Reader *reader) { QReadLocker lock(&_readerPointersMutex); - ReaderPointers::const_iterator i = _readerPointers.constFind(reader); + auto i = _readerPointers.constFind(reader); if (i == _readerPointers.cend()) { lock.unlock(); @@ -615,9 +621,9 @@ void Manager::process() { uint64 ms = getms(), minms = ms + 86400 * 1000ULL; { QReadLocker lock(&_readerPointersMutex); - for (ReaderPointers::iterator it = _readerPointers.begin(), e = _readerPointers.end(); it != e; ++it) { + for (auto it = _readerPointers.begin(), e = _readerPointers.end(); it != e; ++it) { if (it->v.loadAcquire()) { - Readers::iterator i = _readers.find(it.key()->_private); + auto i = _readers.find(it.key()->_private); if (i == _readers.cend()) { _readers.insert(it.key()->_private, 0); } else { @@ -633,7 +639,7 @@ void Manager::process() { } } - for (Readers::iterator i = _readers.begin(), e = _readers.end(); i != e;) { + for (auto i = _readers.begin(), e = _readers.end(); i != e;) { ReaderPrivate *reader = i.key(); if (i.value() <= ms) { ResultHandleState state = handleResult(reader, reader->process(ms), ms); @@ -693,7 +699,7 @@ MTPDocumentAttribute readAttributes(const QString &fname, const QByteArray &data QByteArray localdata(data); auto reader = std_::make_unique(&localloc, &localdata); - if (reader->start(true)) { + if (reader->start(internal::ReaderImplementation::Mode::OnlyGifv)) { bool hasAlpha = false; if (reader->readNextFrame() && reader->renderFrame(cover, hasAlpha, QSize())) { if (cover.width() > 0 && cover.height() > 0 && cover.width() < cover.height() * 10 && cover.height() < cover.width() * 10) { diff --git a/Telegram/SourceFiles/media/media_clip_reader.h b/Telegram/SourceFiles/media/media_clip_reader.h index a0b160e705..43b2768ead 100644 --- a/Telegram/SourceFiles/media/media_clip_reader.h +++ b/Telegram/SourceFiles/media/media_clip_reader.h @@ -101,6 +101,10 @@ public: void stop(); void error(); + Mode mode() const { + return _mode; + } + ~Reader(); private: diff --git a/Telegram/SourceFiles/mediaview.cpp b/Telegram/SourceFiles/mediaview.cpp index ba722e41d1..1f5a9d073d 100644 --- a/Telegram/SourceFiles/mediaview.cpp +++ b/Telegram/SourceFiles/mediaview.cpp @@ -1087,7 +1087,8 @@ void MediaView::displayDocument(DocumentData *doc, HistoryItem *item) { // empty if (_doc->dimensions.width() && _doc->dimensions.height()) { _current = _doc->thumb->pixNoCache(_doc->dimensions.width(), _doc->dimensions.height(), ImagePixSmooth | ImagePixBlurred, _doc->dimensions.width(), _doc->dimensions.height()); } - _gif = new Media::Clip::Reader(location, _doc->data(), func(this, &MediaView::clipCallback)); + auto mode = _doc->isVideo() ? Media::Clip::Reader::Mode::Video : Media::Clip::Reader::Mode::Gif; + _gif = new Media::Clip::Reader(location, _doc->data(), func(this, &MediaView::clipCallback), mode); } } else { if (QImageReader(location.name()).canRead()) {