/*
    Coder : Dzlua
    Email : 505544956@qq.com
    Time  : 2017/07/04
*/
#include "decoder.h"

extern "C" {

#include <ffmpeg/libavformat/avformat.h>
#include <ffmpeg/libavcodec/avcodec.h>
#include <ffmpeg/libswscale/swscale.h>
#include <ffmpeg/libswresample/swresample.h>
#include <ffmpeg/libavutil/adler32.h>
#include <ffmpeg/libavutil/imgutils.h>
#include <ffmpeg/libavutil/mathematics.h>
#include <ffmpeg/libavutil/avassert.h>

}

namespace dzlua {
namespace av {

namespace {

enum class eState : uint8_t {
    null, nused, run, pause,
    cont, seek, done, stop
};

/*
*    video infos
*/
struct VideoInfoImpl {
    AVPixelFormat format;
    int width;
    int height;
    int algorithm;
};

/*
*    Audio infos
*/
struct AudioInfoImpl {
    int64_t layout;
    AVSampleFormat format;
    int rate;
    int channels;
    int samples;
};

/*
*    Subtitle infos
*/
struct SubtitleInfoImpl {};

//------------------------//
// frome ePixelFormat to ffmpeg AVPixelFormat
AVPixelFormat dc_ts_pixel_format(ePixelFormat epfmt) {
    AVPixelFormat apfmt = AV_PIX_FMT_YUV420P;
    switch (epfmt) {
        case ePixelFormat::yuv420p: apfmt = AV_PIX_FMT_YUV420P; break;
        case ePixelFormat::rgb24: apfmt = AV_PIX_FMT_RGB24; break;
        case ePixelFormat::bgr24: apfmt = AV_PIX_FMT_BGR24; break;

        case ePixelFormat::argb: apfmt = AV_PIX_FMT_ARGB; break;
        case ePixelFormat::rgba: apfmt = AV_PIX_FMT_RGBA; break;
        case ePixelFormat::abgr: apfmt = AV_PIX_FMT_ABGR; break;
        case ePixelFormat::bgra: apfmt = AV_PIX_FMT_BGRA; break;
    }
    return apfmt;
}

// from ffmpeg AVPixelFormat to ePixelFormat
ePixelFormat dc_ts_pixel_format(AVPixelFormat apfmt) {
    ePixelFormat epfmt = ePixelFormat::unknown;
    switch (apfmt) {
        case AV_PIX_FMT_YUV420P: epfmt = ePixelFormat::yuv420p; break;
        case AV_PIX_FMT_RGB24: epfmt = ePixelFormat::rgb24; break;
        case AV_PIX_FMT_BGR24: epfmt = ePixelFormat::bgr24; break;

        case AV_PIX_FMT_ARGB: epfmt = ePixelFormat::argb; break;
        case AV_PIX_FMT_RGBA: epfmt = ePixelFormat::rgba; break;
        case AV_PIX_FMT_ABGR: epfmt = ePixelFormat::abgr; break;
        case AV_PIX_FMT_BGRA: epfmt = ePixelFormat::bgra; break;
    }
    return epfmt;
}

// frome eAlgorithm to ffmpeg algorithm
int dc_ts_algorithm(eAlgorithm ealg) {
    int alg = SWS_BICUBIC;
    switch (ealg) {
        case eAlgorithm::fast_bilinear: alg = SWS_FAST_BILINEAR; break;
        case eAlgorithm::bilinear: alg = SWS_BILINEAR; break;
        case eAlgorithm::bicubic: alg = SWS_BICUBIC; break;
        case eAlgorithm::x: alg = SWS_X; break;
        case eAlgorithm::point: alg = SWS_POINT; break;
        case eAlgorithm::area: alg = SWS_AREA; break;
        case eAlgorithm::bicublin: alg = SWS_BICUBLIN; break;
        case eAlgorithm::gauss: alg = SWS_GAUSS; break;
        case eAlgorithm::sinc: alg = SWS_SINC; break;
        case eAlgorithm::lanczos: alg = SWS_LANCZOS; break;
        case eAlgorithm::spline: alg = SWS_SPLINE; break;
    }
    return alg;
}

// from ffmpeg algorithm to eAlgorithm
eAlgorithm dc_ts_algorithm(int alg) {
    eAlgorithm ealg = eAlgorithm::bicubic;
    switch (alg) {
        case SWS_FAST_BILINEAR: ealg = eAlgorithm::fast_bilinear; break;
        case SWS_BILINEAR: ealg = eAlgorithm::bilinear; break;
        case SWS_BICUBIC: ealg = eAlgorithm::bicubic; break;
        case SWS_X: ealg = eAlgorithm::x; break;
        case SWS_POINT: ealg = eAlgorithm::point; break;
        case SWS_AREA: ealg = eAlgorithm::area; break;
        case SWS_BICUBLIN: ealg = eAlgorithm::bicublin; break;
        case SWS_GAUSS: ealg = eAlgorithm::gauss; break;
        case SWS_SINC: ealg = eAlgorithm::sinc; break;
        case SWS_LANCZOS: ealg = eAlgorithm::lanczos; break;
        case SWS_SPLINE: ealg = eAlgorithm::spline; break;
    }
    return ealg;
}

// from eChannelLayout to ffmpeg channel layout.
int64_t dc_ts_channel_layout(const eChannelLayout ecl) {
    int64_t layout = AV_CH_LAYOUT_NATIVE;
    switch (ecl) {
        case eChannelLayout::native: layout = AV_CH_LAYOUT_NATIVE; break;
        case eChannelLayout::mono: layout = AV_CH_LAYOUT_MONO; break;
        case eChannelLayout::stereo: layout = AV_CH_LAYOUT_STEREO; break;
        case eChannelLayout::_2point1: layout = AV_CH_LAYOUT_2POINT1; break;
        case eChannelLayout::_2_1: layout = AV_CH_LAYOUT_2_1; break;
        case eChannelLayout::surround: layout = AV_CH_LAYOUT_SURROUND; break;
        case eChannelLayout::_3point1: layout = AV_CH_LAYOUT_3POINT1; break;
        case eChannelLayout::_4point0: layout = AV_CH_LAYOUT_4POINT0; break;
        case eChannelLayout::_4point1: layout = AV_CH_LAYOUT_4POINT1; break;
        case eChannelLayout::_2_2: layout = AV_CH_LAYOUT_2_2; break;
        case eChannelLayout::quad: layout = AV_CH_LAYOUT_QUAD; break;
        case eChannelLayout::_5point0: layout = AV_CH_LAYOUT_5POINT0; break;
        case eChannelLayout::_5point1: layout = AV_CH_LAYOUT_5POINT1; break;
        case eChannelLayout::_5point0_back: layout = AV_CH_LAYOUT_5POINT0_BACK; break;
        case eChannelLayout::_5point1_back: layout = AV_CH_LAYOUT_5POINT1_BACK; break;
        case eChannelLayout::_6point0: layout = AV_CH_LAYOUT_6POINT0; break;
        case eChannelLayout::_6point0_front: layout = AV_CH_LAYOUT_6POINT0_FRONT; break;
        case eChannelLayout::hexagonal: layout = AV_CH_LAYOUT_HEXAGONAL; break;
        case eChannelLayout::_6point1: layout = AV_CH_LAYOUT_6POINT1; break;
        case eChannelLayout::_6point1_back: layout = AV_CH_LAYOUT_6POINT1_BACK; break;
        case eChannelLayout::_6point1_front: layout = AV_CH_LAYOUT_6POINT1_FRONT; break;
        case eChannelLayout::_7point0: layout = AV_CH_LAYOUT_7POINT0; break;
        case eChannelLayout::_7point0_front: layout = AV_CH_LAYOUT_7POINT0_FRONT; break;
        case eChannelLayout::_7point1: layout = AV_CH_LAYOUT_7POINT1; break;
        case eChannelLayout::_7point1_wide: layout = AV_CH_LAYOUT_7POINT1_WIDE; break;
        case eChannelLayout::_7point1_wide_back: layout = AV_CH_LAYOUT_7POINT1_WIDE_BACK; break;
        case eChannelLayout::octagonal: layout = AV_CH_LAYOUT_OCTAGONAL; break;
        case eChannelLayout::hexadecagonal: layout = AV_CH_LAYOUT_HEXADECAGONAL; break;
        case eChannelLayout::stereo_downmix: layout = AV_CH_LAYOUT_STEREO_DOWNMIX; break;
    }
    return layout;
}

// from ffmpeg channel layout to eChannelLayout.
eChannelLayout dc_ts_channel_layout(const int64_t layout) {
    eChannelLayout ecl = eChannelLayout::native;
    switch (layout) {
        case AV_CH_LAYOUT_NATIVE: ecl = eChannelLayout::native; break;
        case AV_CH_LAYOUT_MONO: ecl = eChannelLayout::mono; break;
        case AV_CH_LAYOUT_STEREO: ecl = eChannelLayout::stereo; break;
        case AV_CH_LAYOUT_2POINT1: ecl = eChannelLayout::_2point1; break;
        case AV_CH_LAYOUT_2_1: ecl = eChannelLayout::_2_1; break;
        case AV_CH_LAYOUT_SURROUND: ecl = eChannelLayout::surround; break;
        case AV_CH_LAYOUT_3POINT1: ecl = eChannelLayout::_3point1; break;
        case AV_CH_LAYOUT_4POINT0: ecl = eChannelLayout::_4point0; break;
        case AV_CH_LAYOUT_4POINT1: ecl = eChannelLayout::_4point1; break;
        case AV_CH_LAYOUT_2_2: ecl = eChannelLayout::_2_2; break;
        case AV_CH_LAYOUT_QUAD: ecl = eChannelLayout::quad; break;
        case AV_CH_LAYOUT_5POINT0: ecl = eChannelLayout::_5point0; break;
        case AV_CH_LAYOUT_5POINT1: ecl = eChannelLayout::_5point1; break;
        case AV_CH_LAYOUT_5POINT0_BACK: ecl = eChannelLayout::_5point0_back; break;
        case AV_CH_LAYOUT_5POINT1_BACK: ecl = eChannelLayout::_5point1_back; break;
        case AV_CH_LAYOUT_6POINT0: ecl = eChannelLayout::_6point0; break;
        case AV_CH_LAYOUT_6POINT0_FRONT: ecl = eChannelLayout::_6point0_front; break;
        case AV_CH_LAYOUT_HEXAGONAL: ecl = eChannelLayout::hexagonal; break;
        case AV_CH_LAYOUT_6POINT1: ecl = eChannelLayout::_6point1; break;
        case AV_CH_LAYOUT_6POINT1_BACK: ecl = eChannelLayout::_6point1_back; break;
        case AV_CH_LAYOUT_6POINT1_FRONT: ecl = eChannelLayout::_6point1_front; break;
        case AV_CH_LAYOUT_7POINT0: ecl = eChannelLayout::_7point0; break;
        case AV_CH_LAYOUT_7POINT0_FRONT: ecl = eChannelLayout::_7point0_front; break;
        case AV_CH_LAYOUT_7POINT1: ecl = eChannelLayout::_7point1; break;
        case AV_CH_LAYOUT_7POINT1_WIDE: ecl = eChannelLayout::_7point1_wide; break;
        case AV_CH_LAYOUT_7POINT1_WIDE_BACK: ecl = eChannelLayout::_7point1_wide_back; break;
        case AV_CH_LAYOUT_OCTAGONAL: ecl = eChannelLayout::octagonal; break;
        case AV_CH_LAYOUT_HEXADECAGONAL: ecl = eChannelLayout::hexadecagonal; break;
        case AV_CH_LAYOUT_STEREO_DOWNMIX: ecl = eChannelLayout::stereo_downmix; break;
    }
    return ecl;
}

//-----------------//
// from eChannelLayout to ffmpeg AVSampleFormat.
AVSampleFormat dc_ts_sample_format(const eSampleFormat esf) {
    AVSampleFormat asf = AV_SAMPLE_FMT_NONE;
    switch (esf) {
        case eSampleFormat::u8:  asf = AV_SAMPLE_FMT_U8;  break;
        case eSampleFormat::s16: asf = AV_SAMPLE_FMT_S16; break;
        case eSampleFormat::s32: asf = AV_SAMPLE_FMT_S32; break;
        case eSampleFormat::flt: asf = AV_SAMPLE_FMT_FLT; break;
        case eSampleFormat::dbl: asf = AV_SAMPLE_FMT_DBL; break;
        case eSampleFormat::s64: asf = AV_SAMPLE_FMT_S64; break;

        case eSampleFormat::u8p:  asf = AV_SAMPLE_FMT_U8P;  break;
        case eSampleFormat::s16p: asf = AV_SAMPLE_FMT_S16P; break;
        case eSampleFormat::s32p: asf = AV_SAMPLE_FMT_S32P; break;
        case eSampleFormat::fltp: asf = AV_SAMPLE_FMT_FLTP; break;
        case eSampleFormat::dblp: asf = AV_SAMPLE_FMT_DBLP; break;
        case eSampleFormat::s64p: asf = AV_SAMPLE_FMT_S64P; break;
    }
    return asf;
}

// from ffmpeg AVSampleFormat to eSampleFormat.
eSampleFormat dc_ts_sample_format(const AVSampleFormat asf) {
    eSampleFormat esf = eSampleFormat::none;
    switch(asf) {
        case AV_SAMPLE_FMT_U8 : esf = eSampleFormat::u8;  break;
        case AV_SAMPLE_FMT_S16: esf = eSampleFormat::s16; break;
        case AV_SAMPLE_FMT_S32: esf = eSampleFormat::s32; break;
        case AV_SAMPLE_FMT_FLT: esf = eSampleFormat::flt; break;
        case AV_SAMPLE_FMT_DBL: esf = eSampleFormat::dbl; break;
        case AV_SAMPLE_FMT_S64: esf = eSampleFormat::s64; break;

        case AV_SAMPLE_FMT_U8P : esf = eSampleFormat::u8p;  break;
        case AV_SAMPLE_FMT_S16P: esf = eSampleFormat::s16p; break;
        case AV_SAMPLE_FMT_S32P: esf = eSampleFormat::s32p; break;
        case AV_SAMPLE_FMT_FLTP: esf = eSampleFormat::fltp; break;
        case AV_SAMPLE_FMT_DBLP: esf = eSampleFormat::dblp; break;
        case AV_SAMPLE_FMT_S64P: esf = eSampleFormat::s64p; break;
    }
    return esf;
}
//-----------------//
std::shared_ptr<VideoInfo> dc_ts_info(const VideoInfoImpl *info) {
    if (!info) return nullptr;
    auto dat = std::make_shared<VideoInfo>();
    if (!dat) return nullptr;
    dat->format = dc_ts_pixel_format(info->format);
    dat->algorithm = dc_ts_algorithm(info->algorithm);
    dat->width = info->width;
    dat->height = info->height;
    return dat;
}

std::shared_ptr<AudioInfo> dc_ts_info(const AudioInfoImpl *info) {
    if (!info) return nullptr;
    auto dat = std::make_shared<AudioInfo>();
    if (!dat) return nullptr;
    dat->layout = dc_ts_channel_layout(info->layout);
    dat->format = dc_ts_sample_format(info->format);
    dat->rate = info->rate;
    dat->channels = info->channels;
    dat->samples = info->samples;
    return dat;
}

std::shared_ptr<SubtitleInfo> dc_ts_info(const SubtitleInfoImpl *info) {
    if (!info) return nullptr;
    auto dat = std::make_shared<SubtitleInfo>();
    if (!dat) return nullptr;
    return dat;
}

std::shared_ptr<VideoInfoImpl> dc_ts_info(const VideoInfo *info) {
    if (!info) return nullptr;
    auto dat = std::make_shared<VideoInfoImpl>();
    if (!dat) return nullptr;
    dat->format = dc_ts_pixel_format(info->format);
    dat->algorithm = dc_ts_algorithm(info->algorithm);
    dat->width = info->width;
    dat->height = info->height;
    return dat;
}

std::shared_ptr<AudioInfoImpl> dc_ts_info(const AudioInfo *info) {
    if (!info) return nullptr;
    auto dat = std::make_shared<AudioInfoImpl>();
    if (!dat) return nullptr;
    dat->layout = dc_ts_channel_layout(info->layout);
    dat->format = dc_ts_sample_format(info->format);
    dat->rate = info->rate;
    dat->channels = info->channels;
    dat->samples = info->samples;
    return dat;
}

std::shared_ptr<SubtitleInfoImpl> dc_ts_info(const SubtitleInfo *info) {
    if (!info) return nullptr;
    auto dat = std::make_shared<SubtitleInfoImpl>();
    if (!dat) return nullptr;
    return dat;
}

} // end namespace

//----------------------//
AudioData::AudioData(const AudioInfo *info)
        : data(nullptr)
        , linesize(0)
        , size(0)
        , pts(0)
        , time(0) {
    av_assert0(info);
    layout = info->layout;
    format = info->format;
    rate = info->rate;
    channels = info->channels;
    samples = info->samples;
}

VideoData::VideoData(const VideoInfo *info)
        : pts(0)
        , time(0) {
    av_assert0(info);
    format = info->format;
    algorithm = info->algorithm;
    width = info->width;
    height = info->height;
    
    for (size_t i = 0; i < 4; ++i) {
        data[i] = nullptr;
        linesize[i] = 0;
    }
}

//------------------------------//
void log(const std::string msg) {
    av_log(nullptr, AV_LOG_INFO, msg.c_str());
}

//----------------------//
class DecoderImpl
        : public Decoder {
public:
    DecoderImpl();
    virtual ~DecoderImpl();
public:
    virtual bool Init(size_t max_queue_size = AV_MAX_QUEUE_SIZE
                    , size_t sleep_time = AV_SLEEP_TIME_AFTER_QUEUE_FULL) override;
    virtual bool Open(const std::string &filename) override;
    virtual void SetNoUse(bool setVideo = false, bool setAudio = false, bool setSubtitle = false) override;
    virtual void Close() override;
    virtual void Run() override;
    virtual void Stop() override;
    virtual void Pause() override;
    virtual void Continue() override;
    virtual void Seek(double pos) override;
    virtual double Duration() override;
    virtual void Wait() override;
    virtual void WaitDone() override;
    virtual bool IsDone() override;
public:
    virtual void setInfo(const VideoInfo *info) override;
    virtual void setInfo(const AudioInfo *info) override;
    virtual void setInfo(const SubtitleInfo *info) override;
    virtual std::shared_ptr<VideoInfo> getVideoInfo() override;
    virtual std::shared_ptr<AudioInfo> getAudioInfo() override;
    virtual std::shared_ptr<SubtitleInfo> getSebtitleInfo() override;
public:
    virtual std::shared_ptr<VideoData> pop_video_data() override;
    virtual std::shared_ptr<AudioData> pop_audio_data() override;
    virtual std::shared_ptr<SubtitleData> pop_subtitle_data() override;
    virtual bool empty(eDataType edt) override;
    virtual size_t size(eDataType edt) override;
    virtual void clear(eDataType edt) override;
protected:
    void checkDone();
    void waitState();
    bool swrNeedConvert();
    bool swrResetConvert();
    bool swsNeedConvert();
    bool swsResetConvert();
protected:
    void tdCreate();
    void tdMgr(const std::string &tdname
                , std::atomic<eState> *es
                , std::function<bool()> fun_run);
protected:
    bool openStream(AVMediaType tp);

    // if finished return true
    bool readFrame();
    // if finished return true
    bool decodeVideo();
    // if finished return true
    bool decodeAudio();
    // if finished return true
    bool decodeSubtitle();
protected:
    std::shared_ptr<VideoInfoImpl> info_video_;
    std::shared_ptr<AudioInfoImpl> info_audio_;
    std::shared_ptr<SubtitleInfoImpl> info_subtitle_;

    // use for WaitDone();
    std::atomic<bool> wait_done_;
    // wait fun(as:runfun) finish in current loop ,
    // it's used to change estate.
    std::atomic<uint8_t> wait_state_;
    
    std::atomic<eState> es_pkg_;
    std::atomic<eState> es_video_;
    std::atomic<eState> es_audio_;
    std::atomic<eState> es_subtitle_;

    Thread td_pkg_;
    Thread td_video_;
    Thread td_audio_;
    Thread td_subtitle_;

    SafeQueue<std::shared_ptr<AVPacket>> queue_video_;
    SafeQueue<std::shared_ptr<AVPacket>> queue_audio_;
    SafeQueue<std::shared_ptr<AVPacket>> queue_subtitle_;

    SafeQueue<std::shared_ptr<VideoData>>    queue_data_video_;
    SafeQueue<std::shared_ptr<AudioData>>    queue_data_audio_;
    SafeQueue<std::shared_ptr<SubtitleData>> queue_data_subtitle_;

    size_t queue_max_size_;
    size_t sleep_time_after_queue_full_;

    //
    std::shared_ptr<AVFormatContext> ctx_fmt_;
    std::shared_ptr<AVCodecContext> ctx_video_;
    std::shared_ptr<AVCodecContext> ctx_audio_;
    std::shared_ptr<AVCodecContext> ctx_subtitle_;

    std::shared_ptr<SwrContext> ctx_swr_;
    std::shared_ptr<SwsContext> ctx_sws_;

    int stream_video_;
    int stream_audio_;
    int stream_subtitle_;

    std::shared_ptr<VideoData> data_video_last_;
};

//--------------------//
std::shared_ptr<Decoder> Decoder::NewInterface() {
    return std::make_shared<DecoderImpl>();
}
//--------------------//
DecoderImpl::DecoderImpl()
        : info_video_(nullptr)
        , info_audio_(nullptr)
        , info_subtitle_(nullptr)
        , wait_done_(false)
        , wait_state_(0)
        , es_pkg_(eState::null)
        , es_video_(eState::null)
        , es_audio_(eState::null)
        , es_subtitle_(eState::null)
        , queue_max_size_(0)
        , sleep_time_after_queue_full_(0)
        , ctx_fmt_(nullptr)
        , ctx_video_(nullptr)
        , ctx_audio_(nullptr)
        , ctx_subtitle_(nullptr)
        , ctx_swr_(nullptr)
        , ctx_sws_(nullptr)
        , stream_video_(-1)
        , stream_audio_(-1)
        , stream_subtitle_(-1)
        , data_video_last_(nullptr) {
    td_pkg_.SetName("ThreadPacket");
    td_video_.SetName("ThreadVideo");
    td_audio_.SetName("ThreadAudio");
    td_subtitle_.SetName("ThreadSubtitle");
}

DecoderImpl::~DecoderImpl() {
    this->Stop();
}

//--------------------//
void DecoderImpl::setInfo(const VideoInfo *info) {
    info_video_ = dc_ts_info(info);
    ctx_sws_ = nullptr;
}

void DecoderImpl::setInfo(const AudioInfo *info) {
    info_audio_ = dc_ts_info(info);
    ctx_swr_ = nullptr;
}

void DecoderImpl::setInfo(const SubtitleInfo *info) {
    info_subtitle_ = dc_ts_info(info);
}

std::shared_ptr<VideoInfo> DecoderImpl::getVideoInfo() {
    if (!ctx_video_) return nullptr;
    VideoInfoImpl info;
    info.format = ctx_video_->pix_fmt;
    info.width = ctx_video_->width;
    info.height = ctx_video_->height;
    info.algorithm = SWS_BICUBIC;
    return dc_ts_info(&info);
}

std::shared_ptr<AudioInfo> DecoderImpl::getAudioInfo() {
    if (!ctx_audio_) return nullptr;
    AudioInfoImpl info;
    info.channels = ctx_audio_->channels;
    info.rate = ctx_audio_->sample_rate;
    info.format = ctx_audio_->sample_fmt;
    info.layout = ctx_audio_->channel_layout;
    info.samples = ctx_audio_->frame_size;
    return dc_ts_info(&info);
}

std::shared_ptr<SubtitleInfo> DecoderImpl::getSebtitleInfo() {
    return nullptr;
    //return dc_ts_info(info_subtitle_.get());
}

//--------------------//
std::shared_ptr<VideoData> DecoderImpl::pop_video_data() {
    return queue_data_video_.Pop();
}

std::shared_ptr<AudioData> DecoderImpl::pop_audio_data() {
    return queue_data_audio_.Pop();
}

std::shared_ptr<SubtitleData> DecoderImpl::pop_subtitle_data() {
    return queue_data_subtitle_.Pop();
}

bool DecoderImpl::empty(eDataType edt) {
    switch (edt) {
        case eDataType::video: return queue_data_video_.Empty();
        case eDataType::audio: return queue_data_audio_.Empty();
        case eDataType::subtitle: return queue_data_subtitle_.Empty();
    }
    return false;
}

size_t DecoderImpl::size(eDataType edt) {
    switch (edt) {
        case eDataType::video: return queue_data_video_.Count();
        case eDataType::audio: return queue_data_audio_.Count();
        case eDataType::subtitle: return queue_data_subtitle_.Count();
    }
    return 0;
}

void DecoderImpl::clear(eDataType edt) {
    switch (edt) {
        case eDataType::video: queue_data_video_.Clear(); break;
        case eDataType::audio: queue_data_audio_.Clear(); break;
        case eDataType::subtitle: queue_data_subtitle_.Clear(); break;
    }
}

//--------------------//
bool DecoderImpl::Init(size_t max_queue_size /*= AV_MAX_QUEUE_SIZE*/
                    , size_t sleep_time /*= AV_SLEEP_TIME_AFTER_QUEUE_FULL*/) {
    queue_max_size_ = max_queue_size;
    sleep_time_after_queue_full_ = sleep_time;
    av_register_all();
    return true;
}

bool DecoderImpl::Open(const std::string &filename) {
    this->Close();
    // todo:
    AVFormatContext* ctx_fmt = nullptr;
    int result = avformat_open_input(&ctx_fmt, filename.c_str(), nullptr, nullptr);
    if (result < 0) {
        char errmsg[AV_ERROR_MAX_STRING_SIZE];
        av_log(nullptr, AV_LOG_ERROR, "Can't open file : %s, %s\n", filename.c_str()
                , av_make_error_string(errmsg, AV_ERROR_MAX_STRING_SIZE, result));
        goto error;
    }

    ctx_fmt_ = std::shared_ptr<AVFormatContext>(
              ctx_fmt
            , [](AVFormatContext* ctx_fmt) {
                avformat_close_input(&ctx_fmt);
            });

    result = avformat_find_stream_info(ctx_fmt_.get(), nullptr);
    if (result < 0) {
        av_log(nullptr, AV_LOG_ERROR, "Can't get stream info : %s\n", filename.c_str());
        goto error;
    }

    av_dump_format(ctx_fmt_.get(), 0, nullptr, 0);

    // open decoder
    {
        if (!this->openStream(AVMEDIA_TYPE_VIDEO))
            es_video_ = eState::nused;
        if (!this->openStream(AVMEDIA_TYPE_AUDIO))
            es_audio_ = eState::nused;

        if (eState::nused == es_video_
                && eState::nused == es_audio_) {
            goto error;
        }

        if (!this->openStream(AVMEDIA_TYPE_SUBTITLE))
            es_subtitle_ = eState::nused;
    }

    // create thread
    this->tdCreate();
    return true;
error:
    this->Close();
    return false;
}

void DecoderImpl::SetNoUse(bool setVideo, bool setAudio, bool setSubtitle) {
    if (setVideo) es_video_ = eState::nused;
    if (setAudio) es_audio_ = eState::nused;
    if (setSubtitle) es_subtitle_ = eState::nused;
}

void DecoderImpl::Close() {
    es_pkg_ = eState::null;
    es_video_ = eState::null;
    es_audio_ = eState::null;
    es_subtitle_ = eState::null;

    this->waitState();

    queue_video_.Clear();
    queue_audio_.Clear();
    queue_subtitle_.Clear();

    queue_data_video_.Clear();
    queue_data_audio_.Clear();
    queue_data_subtitle_.Clear();

    ctx_video_ = nullptr;
    ctx_audio_ = nullptr;
    ctx_subtitle_ = nullptr;
    ctx_fmt_ = nullptr;

    stream_video_ = -1;
    stream_audio_ = -1;
    stream_subtitle_ = -1;

    wait_done_ = false;

    data_video_last_ = nullptr;
}

void DecoderImpl::Stop() {
    this->Close();
    es_pkg_ = eState::stop;
    es_video_ = eState::stop;
    es_audio_ = eState::stop;
    es_subtitle_ = eState::stop;
    this->Wait();
}

void DecoderImpl::Run() {
    if (eState::nused != es_pkg_)
        es_pkg_ = eState::run;
    if (eState::nused != es_video_)
        es_video_ = eState::run;
    if (eState::nused != es_audio_)
        es_audio_ = eState::run;
    if (eState::nused != es_subtitle_)
        es_subtitle_ = eState::run;
}

void DecoderImpl::Pause() {
    if (eState::nused != es_pkg_)
        es_pkg_ = eState::pause;
    if (eState::nused != es_video_)
        es_video_ = eState::pause;
    if (eState::nused != es_audio_)
        es_audio_ = eState::pause;
    if (eState::nused != es_subtitle_)
        es_subtitle_ = eState::pause;
    this->waitState();
}

void DecoderImpl::Continue() {
    if (eState::nused != es_pkg_)
        es_pkg_ = eState::run;
    if (eState::nused != es_video_)
        es_video_ = eState::run;
    if (eState::nused != es_audio_)
        es_audio_ = eState::run;
    if (eState::nused != es_subtitle_)
        es_subtitle_ = eState::run;
}

void DecoderImpl::Seek(double pos) {
    if (eState::nused != es_pkg_)
        es_pkg_ = eState::seek;
    if (eState::nused != es_video_)
        es_video_ = eState::seek;
    if (eState::nused != es_audio_)
        es_audio_ = eState::seek;
    if (eState::nused != es_subtitle_)
        es_subtitle_ = eState::seek;
}

double DecoderImpl::Duration() {
    return ctx_fmt_->streams[stream_video_]->duration * av_q2d(
            ctx_fmt_->streams[stream_video_]->time_base );
}

void DecoderImpl::Wait() {
    td_pkg_.Join();
    td_video_.Join();
    td_audio_.Join();
    td_subtitle_.Join();
}

void DecoderImpl::WaitDone() {
    wait_done_ = true;
    this->Wait();
}

bool DecoderImpl::IsDone() {
    auto isdone = [](eState es) -> bool {
        if (eState::null == es) return true;
        if (eState::nused == es) return true;
        if (eState::done == es) return true;
        return false;
    };

    if (!isdone(es_video_)) return false;
    if (!isdone(es_audio_)) return false;
    if (!isdone(es_subtitle_)) return false;
    if (!isdone(es_pkg_)) return false;

    return true;
}

//--------------------//
void DecoderImpl::checkDone() {
    if (!this->IsDone())
        return;

    // done
    if (wait_done_) {
        wait_done_ = false;
        
        // do not use this->Stop, 
        // maybe the process repeats in an endless
        es_pkg_ = eState::stop;
        es_video_ = eState::stop;
        es_audio_ = eState::stop;
        es_subtitle_ = eState::stop;
    }
}

void DecoderImpl::waitState() {
    // wait other thread not uesd ctx_video_ and so on.
    while (wait_state_) {
        sleep_for(10);
    }
}

bool DecoderImpl::swrNeedConvert() {
    av_assert0(ctx_audio_);
    if (!info_audio_)
        return false;
    if (ctx_audio_->channels != info_audio_->channels)
        return true;
    if (ctx_audio_->sample_rate != info_audio_->rate)
        return true;
    if (ctx_audio_->sample_fmt != info_audio_->format)
        return true;
    if (ctx_audio_->channel_layout != info_audio_->layout)
        return true;
    return false;
}

bool DecoderImpl::swrResetConvert() {
    if (ctx_swr_) return true;

    auto ctx_swr = std::shared_ptr<SwrContext>(
            swr_alloc()
        , [](SwrContext* ctx) {
            swr_free(&ctx);
        }
    );
    if (!ctx_swr) {
        av_log(nullptr, AV_LOG_ERROR, "Can't alloc swrContext\n");
        return false;
    }

    if ( nullptr == swr_alloc_set_opts(
            ctx_swr.get()
            , info_audio_->layout
            , info_audio_->format
            , info_audio_->rate
            , ctx_audio_->channel_layout
            , ctx_audio_->sample_fmt
            , ctx_audio_->sample_rate
            , 0, nullptr) ) {
        av_log(nullptr, AV_LOG_ERROR, "Can't swr_alloc_set_opts\n");
        return false;
    }

    if ( 0 != swr_init(ctx_swr.get()) ) {
        av_log(nullptr, AV_LOG_ERROR, "Can't swr_init\n");
        return false;
    }

    ctx_swr_ = ctx_swr;

    return true;
}

bool DecoderImpl::swsNeedConvert() {
    av_assert0(ctx_video_);
    if (!info_video_)
        return false;
    if (ctx_video_->pix_fmt != info_video_->format)
        return true;
    if (ctx_video_->width != info_video_->width)
        return true;
    if (ctx_video_->height != info_video_->height)
        return true;
    return false;
}

bool DecoderImpl::swsResetConvert() {
    if (ctx_sws_) return true;

    ctx_sws_ = std::shared_ptr<SwsContext>(
            sws_getContext(
                  ctx_video_->width
                , ctx_video_->height
                , ctx_video_->pix_fmt
                , info_video_->width
                , info_video_->height
                , info_video_->format
                , info_video_->algorithm
                , nullptr, nullptr, nullptr)
            , [](SwsContext* ctx) {
                sws_freeContext(ctx);
            });
    if (!ctx_sws_) {
        av_log(nullptr, AV_LOG_ERROR, "Can't alloc SwsContext\n");
        return false;
    }

    return true;
}

//--------------------//
bool DecoderImpl::openStream(AVMediaType tp) {
    int *stream = nullptr;
    const char *logstr = nullptr;
    std::shared_ptr<AVCodecContext> *ctx = nullptr;

    switch (tp) {
        case AVMEDIA_TYPE_VIDEO: {
            stream = &stream_video_;
            ctx = &ctx_video_;
            logstr = "video";
        } break;
        case AVMEDIA_TYPE_AUDIO: {
            stream = &stream_audio_;
            ctx = &ctx_audio_;
            logstr = "audio";
        } break;
        case AVMEDIA_TYPE_SUBTITLE: {
            stream = &stream_subtitle_;
            ctx = &ctx_subtitle_;
            logstr = "subtitle";
        } break;
        default : break;
    }

    if (!stream || !logstr || !ctx) {
        av_log(nullptr, AV_LOG_ERROR
                , "openStream args must one of AVMEDIA_TYPE_VIDEO"
                  "AVMEDIA_TYPE_AUDIO AVMEDIA_TYPE_SUBTITLE.\n"
                , logstr);
        return false;
    }

    // to do
    *stream = av_find_best_stream(ctx_fmt_.get(), tp, -1, -1, nullptr, 0);
    if (*stream < 0) {
      av_log(nullptr, AV_LOG_ERROR, "Can't find %s stream in input file\n", logstr);
      return false;
    }

    AVCodecParameters *origin_par = ctx_fmt_->streams[*stream]->codecpar;

    AVCodec *codec = avcodec_find_decoder(origin_par->codec_id);
    if (!codec) {
        av_log(nullptr, AV_LOG_ERROR, "Can't find %s decoder\n", logstr);
        return false;
    }

    *ctx = std::shared_ptr<AVCodecContext>(
              avcodec_alloc_context3(codec)
            , [](AVCodecContext* ctx) {
                avcodec_close(ctx);
                avcodec_free_context(&ctx);
            });

    if (! (*ctx) ) {
        av_log(nullptr, AV_LOG_ERROR, "Can't allocate %s decoder context\n", logstr);
        return false;
    }

    int result = avcodec_parameters_to_context( (*ctx).get(), origin_par);
    if (result) {
        av_log(nullptr, AV_LOG_ERROR, "Can't copy %s decoder context\n", logstr);
         (*ctx) = nullptr;
        return false;
    }

    result = avcodec_open2( (*ctx).get(), codec, nullptr);
    if (result < 0) {
        av_log( (*ctx).get(), AV_LOG_ERROR, "Can't open %s decoder\n", logstr);
        (*ctx) = nullptr;
        return false;
    }

    return true;
}

bool DecoderImpl::readFrame() {
    av_log(nullptr, AV_LOG_INFO, ".");

    auto pkt = std::shared_ptr<AVPacket>(
              av_packet_alloc()
            , [](AVPacket* pkt) {
                av_packet_free(&pkt);
            } );
    if (!pkt) {
        av_log(nullptr, AV_LOG_ERROR, "Can't allocate packet\n");
        return false;
    }

    /* Return 0 if OK, < 0 on error or end of file.	*/
    if (av_read_frame(ctx_fmt_.get(), pkt.get()) < 0)
        return true;

    if (pkt->stream_index == stream_video_
            && es_video_ != eState::nused) {
        queue_video_.Push(pkt);
    } else if (pkt->stream_index == stream_audio_
            && es_audio_ != eState::nused) {
        queue_audio_.Push(pkt);
    } else if (pkt->stream_index == stream_subtitle_
            && es_subtitle_ != eState::nused) {
        queue_subtitle_.Push(pkt);
    }

    auto needWait = []( 
              int stream
            , eState state
            , size_t queue_size
            , size_t queue_max_size) -> bool {
        if (stream < 0) return true;
        if (state != eState::run) return true;
        return queue_size >= queue_max_size;
    };

    if ( needWait(stream_video_, es_video_, queue_video_.Count(), queue_max_size_)
            && needWait(stream_audio_, es_audio_, queue_audio_.Count(), queue_max_size_)
            && needWait(stream_subtitle_, es_subtitle_, queue_subtitle_.Count(), queue_max_size_) ) {
        sleep_for(sleep_time_after_queue_full_);
    }

    return false;
}

bool DecoderImpl::decodeVideo() {
    auto pkt = queue_video_.Pop();
    if (!pkt) {
        if (eState::done == es_pkg_) {
            /*
            *   push the last data into queue.
            */
            if (data_video_last_) {
                queue_data_video_.Push(data_video_last_);
                data_video_last_ = nullptr;
            }
            return true;
        }
        sleep_for(10);
        return false;
    }

    /*
    * decode
    */
    int ret = avcodec_send_packet(ctx_video_.get(), pkt.get());
    if (ret < 0) {
        if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
            av_log(nullptr, AV_LOG_ERROR, "video error avcodec_send_packet\n");
        return false;
    }

    auto fr = std::shared_ptr<AVFrame>(
              av_frame_alloc()
            , [](AVFrame* fr) {
                av_frame_free(&fr);
            } );
    if (!fr) {
        av_log(nullptr, AV_LOG_ERROR, "Can't allocate video frame\n");
        return false;
    }

    ret = avcodec_receive_frame(ctx_video_.get(), fr.get());
    if (ret < 0) {
        if (ret != AVERROR_EOF)
            av_log(nullptr, AV_LOG_ERROR, "video error avcodec_receive_frame\n");
        return false;
    }

    /*
    *   corvert
    */
    std::shared_ptr<VideoData> data = nullptr;

    if (this->swsNeedConvert()) {
        // need corvert
        if (!this->swsResetConvert())
            return false;

        auto info = dc_ts_info(info_video_.get());
        data = std::shared_ptr<VideoData>(
              new VideoData(info.get())
            , [](VideoData * dat){
                av_freep(&dat->data[0]);
                delete dat;
            });
        if (!data) {
            av_log(nullptr, AV_LOG_ERROR, "Can't allocate video VideoData\n");
            return false;
        }

        ret = av_image_alloc(
              data->data
            , data->linesize
            , data->width
            , data->height
            , info_video_->format
            , 16);
        if (ret < 0) {
            data = nullptr;
            av_log(nullptr, AV_LOG_ERROR, "Can't allocate video av_image_alloc\n");
            return nullptr;
        }

        ret = sws_scale( ctx_sws_.get()
                , fr->data
                , fr->linesize
                , 0
                , fr->height
                , data->data
                , data->linesize);
        if (ret < 0) {
            data = nullptr;
            av_log(nullptr, AV_LOG_ERROR, "error video sws_scale\n");
            return false;
        }
    } else {
        // don't need corvert
        auto info = this->getVideoInfo();
        data = std::shared_ptr<VideoData>(
              new VideoData(info.get())
            , [](VideoData * dat){
                av_freep(&dat->data[0]);
                delete dat;
            });
        if (!data) {
            av_log(nullptr, AV_LOG_ERROR, "Can't allocate video VideoData\n");
            return false;
        }

        ret = av_image_alloc(
              data->data
            , data->linesize
            , data->width
            , data->height
            , ctx_video_->pix_fmt
            , 16);
        if (ret < 0) {
            data = nullptr;
            av_log(nullptr, AV_LOG_ERROR, "Can't allocate video av_image_alloc\n");
            return nullptr;
        }

        av_image_copy( data->data
                , data->linesize
                , (const uint8_t **)fr->data
                , fr->linesize
                , ctx_video_->pix_fmt
                , data->width
                , data->height);
    }

    if (data) {
        data->pts = fr->pts;
        if (data->pts == AV_NOPTS_VALUE) {
            if (data_video_last_)
                data->pts = data_video_last_->pts + 1;
            else
                data->pts = 0;
        }
        data->time = data->pts * av_q2d(
            ctx_fmt_->streams[stream_video_]->time_base );
        // init delay as AV_DELAY_TIME_LAST_FRAME millisecond.
        data->delay = AV_DELAY_TIME_LAST_FRAME;

        /*
        *   set last data delay time
        *   delay = nowtime - lasttime.
        */
        if (data_video_last_) {
            double delay = data->time - data_video_last_->time;
            if (delay > 0)
                data_video_last_->delay = (uint32_t)(delay * 1000);
            else
                av_log(nullptr, AV_LOG_ERROR, "error video set delay time: delay <= 0, set it to 10.\n");  
        }
    } else {
        av_log(nullptr, AV_LOG_ERROR, "video error no data.\n");
        return false;
    }

    /*
    *   if had last data, push last data.
    *   set data as last data.
    *   while done, do not forget push the last data into queue.
    *   you must reset it when close.
    *   push last data and not push data cause you can calculate
    *       the delay time.
    */
    if (data_video_last_) {
        // push data to queue.
        if (queue_data_video_.Count() >= queue_max_size_) {
            sleep_for(sleep_time_after_queue_full_);
        }
        queue_data_video_.Push(data_video_last_);
    }
    data_video_last_ = data;

    av_log(nullptr, AV_LOG_INFO, "+");

    return false;
}

bool DecoderImpl::decodeAudio() {
    auto pkt = queue_audio_.Pop();
    if (!pkt) {
        if (eState::done == es_pkg_)
            return true;
        sleep_for(10);
        return false;
    }

    /*
    * decode
    */
    int ret = avcodec_send_packet(ctx_audio_.get(), pkt.get());
    if (ret < 0) {
        if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
            av_log(nullptr, AV_LOG_ERROR, "audio error avcodec_send_packet\n");
        return false;
    }

    auto fr = std::shared_ptr<AVFrame>(
              av_frame_alloc()
            , [](AVFrame* fr) {
                av_frame_free(&fr);
            } );
    if (!fr) {
        av_log(nullptr, AV_LOG_ERROR, "Can't allocate audio frame\n");
        return false;
    }

    ret = avcodec_receive_frame(ctx_audio_.get(), fr.get());
    if (ret < 0) {
        if (ret != AVERROR_EOF)
            av_log(nullptr, AV_LOG_ERROR, "audio error avcodec_receive_frame\n");
        return false;
    }

    /*
    *   corvert
    */
    std::shared_ptr<AudioData> data = nullptr;
    if (this->swrNeedConvert()) {
        // need corvert
        if (!this->swrResetConvert())
            return false;
        // corvert, now the ctx_swr_ and info_audio_ must had.
        auto info = dc_ts_info(info_audio_.get());
        data = std::shared_ptr<AudioData>(
                new AudioData(info.get())
            , [](AudioData * dat){
                av_freep(&dat->data);
                delete dat;
            });
        if (!data) {
            av_log(nullptr, AV_LOG_ERROR, "Can't allocate audio AudioData\n");
            return false;
        }

        // get samples
        data->samples = av_rescale_rnd(
                  swr_get_delay(ctx_swr_.get(), ctx_audio_->sample_rate)
                        + fr->nb_samples
                , data->rate
                , ctx_audio_->sample_rate
                , AV_ROUND_UP);
        if (data->samples < 0) {
            data = nullptr;
            av_log(nullptr, AV_LOG_ERROR, "error audio av_rescale_rnd\n");
            return false;
        }
        
        // alloc samples data.
        ret = av_samples_alloc(
                  &data->data
                , &data->linesize,   
                data->channels,   
                data->samples,   
                info_audio_->format, 1);
        if (ret < 0) {
            data = nullptr;
            av_log(nullptr, AV_LOG_ERROR, "Can't allocate audio av_samples_alloc\n");
            return false;
        }

        // convert
        data->samples = swr_convert(ctx_swr_.get()
                , &data->data, data->samples
                , (const uint8_t **)fr->data
                , fr->nb_samples);
        if (data->samples < 0) {
            data = nullptr;
            av_log(nullptr, AV_LOG_ERROR, "error audio swr_convert\n");
            return false;
        }

        data->size = av_samples_get_buffer_size(
                  &data->linesize
                , data->channels
                , data->samples
                , info_audio_->format
                , 1);
        if (data->size <= 0) {
            data = nullptr;
            av_log(nullptr, AV_LOG_ERROR, "error audio av_samples_get_buffer_size\n");
            return false;
        }
    } else {
        // don't need corvert

        auto info = this->getAudioInfo();
        data = std::shared_ptr<AudioData>(
                new AudioData(info.get())
            , [](AudioData * dat){
                av_freep(&dat->data);
                delete dat;
            });
        if (!data) {
            av_log(nullptr, AV_LOG_ERROR, "Can't allocate audio AudioData\n");
            return false;
        }

        // alloc samples data.
        ret = av_samples_alloc(
                  &data->data
                , &data->linesize,   
                fr->channels,   
                fr->nb_samples,   
                (AVSampleFormat)fr->format, 0);
        if (ret < 0) {
            data = nullptr;
            av_log(nullptr, AV_LOG_ERROR, "Can't allocate audio av_samples_alloc\n");
            return false;
        }

        data->samples = fr->nb_samples;
        data->size = av_samples_get_buffer_size(
                  nullptr
                , fr->channels
                , fr->nb_samples
                , (AVSampleFormat)fr->format
                , 0);
    }

    if (data) {
        data->pts = fr->pts;
        data->time = data->pts * av_q2d(
            ctx_fmt_->streams[stream_audio_]->time_base);
    } else {
        av_log(nullptr, AV_LOG_ERROR, "audio error no data.\n");
        return false;
    }

    // push data to queue.
    if (queue_data_audio_.Count() >= queue_max_size_) {
        sleep_for(sleep_time_after_queue_full_);
    }
    queue_data_audio_.Push(data);

    av_log(nullptr, AV_LOG_INFO, "-");    

    return false;
}

bool DecoderImpl::decodeSubtitle() {
    bool done = false;
    auto pkt = queue_subtitle_.Pop();
    if (!pkt) {
        if (eState::done == es_pkg_)
            return true;
        sleep_for(10);
        return false;
    }

    av_log(nullptr, AV_LOG_INFO, "*");

    return done;
}

//--------------------//
void DecoderImpl::tdCreate() {
    auto fun = std::bind(
              &DecoderImpl::tdMgr
            , this
            , std::placeholders::_1
            , std::placeholders::_2
            , std::placeholders::_3 );

    td_pkg_.Create(
              fun
            , td_pkg_.Name()
            , &es_pkg_
            , std::bind(
                  &DecoderImpl::readFrame
                , this) );
    if (stream_video_ >= 0) {
        td_video_.Create(
              fun
            , td_video_.Name()
            , &es_video_
            , std::bind(
                  &DecoderImpl::decodeVideo
                , this) );
    }

    if (stream_audio_ >= 0) {
        td_audio_.Create(
              fun
            , td_audio_.Name()
            , &es_audio_
            , std::bind(
                  &DecoderImpl::decodeAudio
                , this) );
    }

    if (stream_subtitle_ >= 0) {
        td_subtitle_.Create(
              fun
            , td_subtitle_.Name()
            , &es_subtitle_
            , std::bind(
                  &DecoderImpl::decodeSubtitle
                , this) );
    }
    
}

void DecoderImpl::tdMgr(
          const std::string &tdname
        , std::atomic<eState>* es
        , std::function<bool()> fun_run) {
    
    av_log(nullptr, AV_LOG_INFO
            , "%s thread is running.\n"
            , tdname.c_str());

    bool quit = false;
    while (!quit) {
        switch (*es) {
            // wait 40 ms
            case eState::null:
            case eState::nused:
            case eState::done: {
                sleep_for(40);
            } break;
            case eState::run: {
                ++wait_state_;
                if (fun_run()) {
                    *es = eState::done;
                    this->checkDone();
                }
                --wait_state_;
            } break;
            case eState::pause: break;
            case eState::seek: break;
            case eState::cont: break;
            // quit
            case eState::stop: quit = true; break;
            default : quit = true; break;
        }
    }
    
    av_log(nullptr, AV_LOG_INFO
            , "%s thread is stopped.\n"
            , tdname.c_str());
}
//--------------------//


} // end namespace av
} // end namespace dzlua