#include "hal_stream_convert.h"
#include "base_log.h"
#include <chrono>

namespace El {
namespace Hal {

StreamConverter::StreamConverter()
    : format_ctx(nullptr,
                 [](AVFormatContext *ctx) {
                     avformat_close_input(&ctx);
                     avformat_free_context(ctx);
                 }),
      video_codec_ctx(nullptr, [](AVCodecContext *ctx) { avcodec_free_context(&ctx); }),
      video_packet(nullptr, [](AVPacket *pkt) { av_packet_free(&pkt); }),
      video_bsf_ctx(nullptr, [](AVBSFContext *ctx) { av_bsf_free(&ctx); }),
      audio_codec_ctx(nullptr, [](AVCodecContext *ctx) { avcodec_free_context(&ctx); }),
      audio_bsf_ctx(nullptr, [](AVBSFContext *ctx) { av_bsf_free(&ctx); })
{

}

StreamConverter::~StreamConverter()
{
    Stop();
    Close();
}

bool StreamConverter::Open(const std::string &inputFile)
{
    input_file = inputFile;

    // Open input file
    if (avformat_open_input(&format_ctx_raw, input_file.c_str(), nullptr, nullptr) != 0) {
        LOG_ERROR("Unable to open input file.");
        return false;
    }

    format_ctx.reset(format_ctx_raw);

    if (avformat_find_stream_info(format_ctx.get(), nullptr) < 0) {
        LOG_ERROR("Unable to find stream information.");
        return false;
    }

    // Find video stream
    video_stream_index = -1;
    for (unsigned int i = 0; i < format_ctx->nb_streams; i++) {
        const AVCodecParameters *codecpar = format_ctx->streams[i]->codecpar;
        if (codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            video_stream_index = i;
            video_codec = avcodec_find_decoder(codecpar->codec_id);
            break;
        }
    }

    if (video_stream_index != -1) {
        // Create video codec context
        video_codec_ctx.reset(avcodec_alloc_context3(video_codec));

        if (!video_codec_ctx) {
            LOG_ERROR("Unable to allocate video codec context.");
            return false;
        }

        if (avcodec_parameters_to_context(video_codec_ctx.get(), format_ctx->streams[video_stream_index]->codecpar) < 0) {
            LOG_ERROR("Unable to copy video codec parameters to context.");
            return false;
        }

        if (avcodec_open2(video_codec_ctx.get(), video_codec, nullptr) < 0) {
            LOG_ERROR("Unable to open video codec.");
            return false;
        }

        video_packet.reset(av_packet_alloc());

        // Initialize video bitstream filter
        const char *bsf_name = nullptr;
        if (video_codec_ctx->codec_id == AV_CODEC_ID_H264) {
            bsf_name = "h264_mp4toannexb";
        } else if (video_codec_ctx->codec_id == AV_CODEC_ID_HEVC) {
            bsf_name = "hevc_mp4toannexb";
        } else {
            LOG_ERROR("Unsupported video codec.");
            return false;
        }

        video_bsf = av_bsf_get_by_name(bsf_name);
        if (!video_bsf) {
            LOG_ERROR("Cannot find video bsf: {}", bsf_name);
            return false;
        } else {
            LOG_INFO("Found video bsf: {}", bsf_name);
        }

        if (av_bsf_alloc(video_bsf, &video_bsf_ctx_raw) < 0) {
            LOG_ERROR("Unable to allocate video bitstream filter context.");
            return false;
        }

        video_bsf_ctx.reset(video_bsf_ctx_raw);

        if (avcodec_parameters_copy(video_bsf_ctx->par_in, format_ctx->streams[video_stream_index]->codecpar) < 0) {
            LOG_ERROR("Unable to copy video codec parameters to bitstream filter context.");
            return false;
        }

        video_bsf_ctx->time_base_in = format_ctx->streams[video_stream_index]->time_base;

        if (av_bsf_init(video_bsf_ctx.get()) < 0) {
            LOG_ERROR("Unable to initialize video bitstream filter context.");
            return false;
        }
    } else {
        LOG_WARN("No video stream found.");
    }

    // Find audio stream
    audio_stream_index = -1;
    for (unsigned int i = 0; i < format_ctx->nb_streams; i++) {
        const AVCodecParameters *codecpar = format_ctx->streams[i]->codecpar;
        if (codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
            audio_stream_index = i;
            audio_codec = avcodec_find_decoder(codecpar->codec_id);
            break;
        }
    }

    if (audio_stream_index != -1) {
        // Create audio codec context
        audio_codec_ctx.reset(avcodec_alloc_context3(audio_codec));

        if (!audio_codec_ctx) {
            LOG_ERROR("Unable to allocate audio codec context.");
            return false;
        }

        if (avcodec_parameters_to_context(audio_codec_ctx.get(), format_ctx->streams[audio_stream_index]->codecpar) <
            0) {
            LOG_ERROR("Unable to copy audio codec parameters to context.");
            return false;
        }

        if (avcodec_open2(audio_codec_ctx.get(), audio_codec, nullptr) < 0) {
            LOG_ERROR("Unable to open audio codec.");
            return false;
        }

        // Initialize audio bitstream filter
        const char *audio_bsf_name = nullptr;
        if (audio_codec_ctx->codec_id == AV_CODEC_ID_AAC) {
            audio_bsf_name = "aac_adtstoasc";
        } else if (audio_codec_ctx->codec_id == AV_CODEC_ID_MP3) {
            audio_bsf_name = "mp3decomp";
        } else if (audio_codec_ctx->codec_id == AV_CODEC_ID_PCM_ALAW ||
                   audio_codec_ctx->codec_id == AV_CODEC_ID_PCM_MULAW) {
            // G.711 A-law or μ-law encoding
            // Usually no bitstream filter is needed, but if there are special requirements, it can be added here
            audio_bsf_name = nullptr; // No filter needed
        } else {
            LOG_ERROR("Unsupported audio codec.");
            return false;
        }

        if (audio_bsf_name) {
            audio_bsf = av_bsf_get_by_name(audio_bsf_name);
            if (!audio_bsf) {
                LOG_ERROR("Unable to find audio bitstream filter: {}", audio_bsf_name);
                return false;
            } else {
                LOG_INFO("Found audio bitstream filter: {}", audio_bsf_name);
            }

            if (av_bsf_alloc(audio_bsf, &audio_bsf_ctx_raw) < 0) {
                LOG_ERROR("Unable to allocate audio bitstream filter context.");
                return false;
            }

            audio_bsf_ctx.reset(audio_bsf_ctx_raw);

            if (avcodec_parameters_copy(audio_bsf_ctx->par_in, format_ctx->streams[audio_stream_index]->codecpar) < 0) {
                LOG_ERROR("Unable to copy audio codec parameters to bitstream filter context.");
                return false;
            }

            audio_bsf_ctx->time_base_in = format_ctx->streams[audio_stream_index]->time_base;

            if (av_bsf_init(audio_bsf_ctx.get()) < 0) {
                LOG_ERROR("Unable to initialize audio bitstream filter context.");
                return false;
            }
        }
    } else {
        LOG_WARN("No audio stream found.");
    }

    return true;
}

bool StreamConverter::RegisterVideoCallback(std::function<void(const VideoEncPacket &)> videoCallback)
{
    std::lock_guard<std::mutex> lock(callback_mutex);
    video_callback = videoCallback;
    return true;
}

bool StreamConverter::RegisterAudioCallback(std::function<void(const AudioEncPacket &)> audioCallback)
{
    std::lock_guard<std::mutex> lock(callback_mutex);
    audio_callback = audioCallback;
    return true;
}

bool StreamConverter::Start(bool loop)
{
    if (is_running.load()) {
        LOG_ERROR("Conversion is already running.");
        return false;
    }

    is_running.store(true);
    should_loop.store(loop);  // Store loop flag
    processing_thread = std::thread(&StreamConverter::ProcessingLoop, this);
    return true;
}

void StreamConverter::Stop()
{
    if (!is_running.load()) {
        return;
    }

    is_running.store(false);
    if (processing_thread.joinable()) {
        processing_thread.join();
    }
}

void StreamConverter::ProcessingLoop()
{
    AVPacket *pkt = av_packet_alloc();
    if (!pkt) {
        LOG_ERROR("Unable to allocate AVPacket.");
        return;
    }

    uint64_t total_video_pts = 0;
    uint64_t total_audio_pts = 0;

    do {  // Use do-while loop to support repeated playback
        // Reset file position to the beginning
        if (av_seek_frame(format_ctx.get(), -1, 0, AVSEEK_FLAG_BACKWARD) < 0) {
            LOG_ERROR("Unable to reset file position to the beginning.");
            break;
        }

        // Reset first frame flags
        is_first_video_frame = true;
        is_first_audio_frame = true;

        while (is_running.load()) {
            int ret = av_read_frame(format_ctx.get(), pkt);
            if (ret < 0) {
                // End of file or error occurred
                if (ret == AVERROR_EOF) {
                    // LOG_INFO("Reached end of file.");
                    break;  // Exit inner loop, will restart if looping is enabled
                } else {
                    LOG_ERROR("Unable to read AVPacket. Error code: {}", ret);
                    is_running.store(false);
                    break;
                }
            }

            if (pkt->stream_index == video_stream_index && video_stream_index != -1) {
                ProcessVideoPacket(pkt, total_video_pts);
            } else if (pkt->stream_index == audio_stream_index && audio_stream_index != -1) {
                ProcessAudioPacket(pkt, total_audio_pts);
            }

            av_packet_unref(pkt);
        }

        if (should_loop.load() && is_running.load()) {
            // Update total PTS to maintain continuity
            AVRational video_time_base = format_ctx->streams[video_stream_index]->time_base;
            AVRational audio_time_base = format_ctx->streams[audio_stream_index]->time_base;
            
            total_video_pts += av_rescale_q(format_ctx->duration, AV_TIME_BASE_Q, video_time_base);
            total_audio_pts += av_rescale_q(format_ctx->duration, AV_TIME_BASE_Q, audio_time_base);
        }
    } while (should_loop.load() && is_running.load());  // Decide whether to continue playing based on loop flag

    av_packet_free(&pkt);
}

void StreamConverter::ProcessVideoPacket(AVPacket *pkt, uint64_t &total_pts)
{
    AVRational time_base = format_ctx->streams[video_stream_index]->time_base;

    if (av_bsf_send_packet(video_bsf_ctx.get(), pkt) < 0) {
        LOG_ERROR("Error sending video packet to bitstream filter.");
        return;
    }

    while (av_bsf_receive_packet(video_bsf_ctx.get(), pkt) == 0) {
        pkt->pts += total_pts;
        pkt->dts += total_pts;

        int64_t pts_time = av_rescale_q(pkt->pts, time_base, AV_TIME_BASE_Q);
        LOG_DEBUG("Video frame PTS time: {} microseconds", pts_time);

        if (is_first_video_frame) {
            video_start_time = std::chrono::steady_clock::now() - std::chrono::microseconds(pts_time);
            is_first_video_frame = false;
        }

        auto frame_time = video_start_time + std::chrono::microseconds(pts_time);
        auto now = std::chrono::steady_clock::now();
        if (frame_time > now) {
            std::this_thread::sleep_until(frame_time);
        }

        VideoEncPacket frame;

        frame.buf = (uint8_t *)pkt->data;
        frame.len = (uint32_t)pkt->size;
        frame.frameType = (pkt->flags & AV_PKT_FLAG_KEY) ? MEDIA_FRAME_I : MEDIA_FRAME_P;
        frame.codecType = (video_codec_ctx->codec_id == AV_CODEC_ID_H264)   ? MEDIA_VIDEO_CODEC_H264
                          : (video_codec_ctx->codec_id == AV_CODEC_ID_HEVC) ? MEDIA_VIDEO_CODEC_H265
                                                                            : MEDIA_VIDEO_CODEC_UNKOWN;

        AVRational frame_rate = format_ctx->streams[video_stream_index]->avg_frame_rate;
        frame.fps = av_q2d(frame_rate);
        frame.pts = static_cast<uint64_t>(pkt->pts * av_q2d(time_base) * 1e6);
        frame.utc = static_cast<uint64_t>(
            std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::steady_clock::now().time_since_epoch())
                .count());

        {
            std::lock_guard<std::mutex> lock(callback_mutex);
            if (video_callback) {
                video_callback(frame);
            }
        }

        av_packet_unref(pkt);

        if (!is_running.load()) {
            break;
        }
    }
}

void StreamConverter::ProcessAudioPacket(AVPacket *pkt, uint64_t &total_pts)
{
    AVRational time_base = format_ctx->streams[audio_stream_index]->time_base;

    pkt->pts += total_pts;
    pkt->dts += total_pts;

    if (audio_bsf_ctx) {
        // Process audio packet using bitstream filter
        if (av_bsf_send_packet(audio_bsf_ctx.get(), pkt) < 0) {
            LOG_ERROR("Error sending audio packet to bitstream filter.");
            return;
        }

        while (av_bsf_receive_packet(audio_bsf_ctx.get(), pkt) == 0) {
            double pts_time = pkt->pts * av_q2d(time_base) * 1e6;

            if (is_first_audio_frame) {
                audio_start_time =
                    std::chrono::steady_clock::now() - std::chrono::microseconds(static_cast<int64_t>(pts_time));
                is_first_audio_frame = false;
            }

            auto frame_time = audio_start_time + std::chrono::microseconds(static_cast<int64_t>(pts_time));
            auto now = std::chrono::steady_clock::now();
            if (frame_time > now) {
                std::this_thread::sleep_until(frame_time);
            }

            AudioEncPacket frame;
            std::vector<uint8_t> data;
            data.assign(pkt->data, pkt->data + pkt->size);

            // Add ADTS header
            if (audio_codec_ctx->codec_id == AV_CODEC_ID_AAC) {
                std::vector<uint8_t> adts_header = GenerateADTSHeader(pkt->size, audio_codec_ctx->sample_rate);
                data.insert(data.begin(), adts_header.begin(), adts_header.end());
            }
            frame.buf = (uint8_t *)data.data();
            frame.len = (uint32_t)data.size();

            frame.codecType = (audio_codec_ctx->codec_id == AV_CODEC_ID_AAC)   ? MEDIA_AUDIO_CODEC_AAC
                              : (audio_codec_ctx->codec_id == AV_CODEC_ID_MP3) ? MEDIA_AUDIO_CODEC_MP3
                              : (audio_codec_ctx->codec_id == AV_CODEC_ID_PCM_ALAW) ? MEDIA_AUDIO_CODEC_G711A
                              : (audio_codec_ctx->codec_id == AV_CODEC_ID_PCM_MULAW) ? MEDIA_AUDIO_CODEC_G711U
                                  : MEDIA_AUDIO_CODEC_UNKOWN;

            frame.frameType = (audio_codec_ctx->codec_id == AV_CODEC_ID_AAC)   ? MEDIA_FRAME_AAC
                              : (audio_codec_ctx->codec_id == AV_CODEC_ID_PCM_ALAW) ? MEDIA_FRAME_G711A
                              : (audio_codec_ctx->codec_id == AV_CODEC_ID_PCM_MULAW) ? MEDIA_FRAME_G711U
                                : MEDIA_FRAME_UNKOWN;
            frame.sampleRate = static_cast<double>(audio_codec_ctx->sample_rate);
            frame.channels = audio_codec_ctx->channels;
            frame.pts = static_cast<uint64_t>(pkt->pts * av_q2d(time_base) * 1e6);
            frame.utc = static_cast<uint64_t>(std::chrono::duration_cast<std::chrono::microseconds>(
                                                  std::chrono::steady_clock::now().time_since_epoch())
                                                  .count());

            {
                std::lock_guard<std::mutex> lock(callback_mutex);
                if (audio_callback) {
                    audio_callback(frame);
                }
            }

            av_packet_unref(pkt);

            if (!is_running.load()) {
                break;
            }
        }
    } else {
        // Process raw audio packet (e.g. G.711)
        double pts_time = pkt->pts * av_q2d(time_base) * 1e6;

        if (is_first_audio_frame) {
            audio_start_time =
                std::chrono::steady_clock::now() - std::chrono::microseconds(static_cast<int64_t>(pts_time));
            is_first_audio_frame = false;
        }

        auto frame_time = audio_start_time + std::chrono::microseconds(static_cast<int64_t>(pts_time));
        auto now = std::chrono::steady_clock::now();
        if (frame_time > now) {
            std::this_thread::sleep_until(frame_time);
        }

        AudioEncPacket frame;
        std::vector<uint8_t> data;
        data.assign(pkt->data, pkt->data + pkt->size);

        // Add ADTS header
        if (audio_codec_ctx->codec_id == AV_CODEC_ID_AAC) {
            std::vector<uint8_t> adts_header = GenerateADTSHeader(pkt->size, audio_codec_ctx->sample_rate);
            data.insert(data.begin(), adts_header.begin(), adts_header.end());
        }

        frame.buf = (uint8_t *)data.data();
        frame.len = (uint32_t)data.size();

        frame.codecType =
            (audio_codec_ctx->codec_id == AV_CODEC_ID_AAC)   ? MEDIA_AUDIO_CODEC_AAC
            : (audio_codec_ctx->codec_id == AV_CODEC_ID_PCM_ALAW) ? MEDIA_AUDIO_CODEC_G711A
            : (audio_codec_ctx->codec_id == AV_CODEC_ID_PCM_MULAW) ? MEDIA_AUDIO_CODEC_G711U
                : MEDIA_AUDIO_CODEC_UNKOWN;

        frame.frameType = (audio_codec_ctx->codec_id == AV_CODEC_ID_AAC)   ? MEDIA_FRAME_AAC
            : (audio_codec_ctx->codec_id == AV_CODEC_ID_PCM_ALAW) ? MEDIA_FRAME_G711A
            : (audio_codec_ctx->codec_id == AV_CODEC_ID_PCM_MULAW) ? MEDIA_FRAME_G711U
                : MEDIA_FRAME_UNKOWN;
        frame.sampleRate = static_cast<double>(audio_codec_ctx->sample_rate);
        frame.channels = audio_codec_ctx->channels;
        frame.pts = static_cast<uint64_t>(pkt->pts * av_q2d(time_base) * 1e6);
        frame.utc = static_cast<uint64_t>(
            std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::steady_clock::now().time_since_epoch())
                .count());

        {
            std::lock_guard<std::mutex> lock(callback_mutex);
            if (audio_callback) {
                audio_callback(frame);
            }
        }

        av_packet_unref(pkt);
    }

    // Check if stopped running
    if (!is_running.load()) {
        return;
    }
}

std::vector<uint8_t> StreamConverter::GenerateADTSHeader(int aac_data_length, int sample_rate)
{
    int profile = 2; // AAC LC
    int freqIdx = GetFrequencyIndex(sample_rate);
    if (freqIdx == -1) {
        LOG_ERROR("Unsupported sample rate: {}", sample_rate);
        // Use 44.1KHz by default
        freqIdx = 4;
    }
    int chanCfg = audio_codec_ctx->channels;
    int frameLength = aac_data_length + 7;

    std::vector<uint8_t> adts_header(7);
    adts_header[0] = 0xFF;
    adts_header[1] = 0xF1;
    adts_header[2] = (char)(((profile - 1) << 6) + (freqIdx << 2) + (chanCfg >> 2));
    adts_header[3] = (char)(((chanCfg & 3) << 6) + (frameLength >> 11));
    adts_header[4] = (char)((frameLength & 0x7FF) >> 3);
    adts_header[5] = (char)(((frameLength & 7) << 5) + 0x1F);
    adts_header[6] = 0xFC;

    return adts_header;
}

int StreamConverter::GetFrequencyIndex(int sample_rate)
{
    switch (sample_rate) {
        case 96000:
            return 0;
        case 88200:
            return 1;
        case 64000:
            return 2;
        case 48000:
            return 3;
        case 44100:
            return 4;
        case 32000:
            return 5;
        case 24000:
            return 6;
        case 22050:
            return 7;
        case 16000:
            return 8;
        case 12000:
            return 9;
        case 11025:
            return 10;
        case 8000:
            return 11;
        case 7350:
            return 12;
        default:
            return -1; // Unsupported sample rate
    }
}

void StreamConverter::Close()
{
    Stop();

    format_ctx.reset();
    video_codec_ctx.reset();
    video_packet.reset();
    video_bsf_ctx.reset();
    audio_codec_ctx.reset();
    audio_bsf_ctx.reset();

    is_first_video_frame = true;
    is_first_audio_frame = true;
}

bool StreamConverter::GetVideoInfo(VideoEncFormat &video)
{
    if (video_stream_index == -1 || !video_codec_ctx) {
        LOG_ERROR("Video stream not initialized.");
        return false;
    }

    // Get codec type
    switch (video_codec_ctx->codec_id) {
        case AV_CODEC_ID_H264:
            video.codecType = MEDIA_VIDEO_CODEC_H264;
            break;
        case AV_CODEC_ID_HEVC:
            video.codecType = MEDIA_VIDEO_CODEC_H265;
            break;
        default:
            video.codecType = MEDIA_VIDEO_CODEC_UNKOWN;
    }

    // Get width and height
    video.width = video_codec_ctx->width;
    video.height = video_codec_ctx->height;

    // Get frame rate
    AVRational frame_rate = format_ctx->streams[video_stream_index]->avg_frame_rate;
    video.fps = av_q2d(frame_rate);

    return true;
}

bool StreamConverter::GetAudioInfo(AudioEncFormat &audio)
{
    if (audio_stream_index == -1 || !audio_codec_ctx) {
        LOG_ERROR("Audio stream not initialized.");
        return false;
    }

    // Get codec type
    switch (audio_codec_ctx->codec_id) {
        case AV_CODEC_ID_AAC:
            audio.codecType = MEDIA_AUDIO_CODEC_AAC;
            break;
        case AV_CODEC_ID_MP3:
            audio.codecType = MEDIA_AUDIO_CODEC_MP3;
            break;
        case AV_CODEC_ID_PCM_ALAW:
            audio.codecType = MEDIA_AUDIO_CODEC_G711A;
            break;
        case AV_CODEC_ID_PCM_MULAW:
            audio.codecType = MEDIA_AUDIO_CODEC_G711U;
            break;
        default:
            audio.codecType = MEDIA_AUDIO_CODEC_UNKOWN;
    }

    // Get sample rate
    audio.sampleRate = audio_codec_ctx->sample_rate;

    // Get number of channels
    audio.channels = audio_codec_ctx->channels;

    return true;
}

} // namespace Hal
} // namespace El