#include "MySQLClient.hpp"
#include <iostream>
#include <iostream>
#include <memory>
#include <vector>
#include <string>
#include <cstring>
#include <stdexcept>
#include <iomanip>
#include <sstream>
#include <chrono>
#include <fstream>
using std::cout;
using std::endl;
using std::string;
using namespace std::chrono;

extern "C" {
#include <libavformat/avformat.h>
#include <libavutil/avutil.h>
#include <libavutil/error.h>
#include <libavutil/time.h>
}

// 自定义删除器用于FFmpeg资源
struct AVFormatContextDeleter {
    void operator()(AVFormatContext* ctx) const {
        if (ctx) {
            if (ctx->pb) {
                avio_closep(&ctx->pb);
            }
            avformat_free_context(ctx);
        }
    }
};

using AVFormatContextPtr = std::unique_ptr<AVFormatContext, AVFormatContextDeleter>;

struct AVDictionaryDeleter {
    void operator()(AVDictionary* dict) const {
        av_dict_free(&dict);
    }
};


long Timestamp_s(){
    auto now = system_clock::now();
    return duration_cast<seconds>(now.time_since_epoch()).count();
}

long Timestamp_ms(){
    auto now = system_clock::now();
    return duration_cast<milliseconds>(now.time_since_epoch()).count();
}

long Timestamp_us(){
    auto now = system_clock::now();
    return duration_cast<microseconds>(now.time_since_epoch()).count();
}

using AVDictionaryPtr = std::unique_ptr<AVDictionary, AVDictionaryDeleter>;

// 自定义异常类
class FFmpegException : public std::runtime_error {
public:
    FFmpegException(int errorCode, const std::string& message)
        : std::runtime_error(message + ": " + avErrorToString(errorCode)),
        errorCode_(errorCode) {}

    int errorCode() const { return errorCode_; }

private:
    static std::string avErrorToString(int errnum) {
        char buf[AV_ERROR_MAX_STRING_SIZE];
        av_make_error_string(buf, sizeof(buf), errnum);
        return buf;
    }

    int errorCode_;
};

// 日志函数
static void log_packet(const AVPacket* pkt, const std::string& tag) {
    std::cout << tag << ": pts: " << pkt->pts 
        << ", dts: " << pkt->dts
        << ", duration: " << pkt->duration
        << ", index: " << pkt->stream_index << "\n";
}

// 创建输出文件
AVFormatContextPtr create_output_file(
                                      const std::string& filename,
                                      AVFormatContext* inFmtCtx,
                                      const std::vector<int>& streamMapping,
                                      int64_t& video_frame_duration,
                                      int64_t& audio_frame_duration
                                     ) {
    AVFormatContext* rawOutCtx = nullptr;
    int ret = avformat_alloc_output_context2(&rawOutCtx, nullptr, "mpegts", filename.c_str());
    if (!rawOutCtx) {
        throw std::runtime_error("Could not create output context");
    }

    AVFormatContextPtr outCtx(rawOutCtx);
    AVOutputFormat* outFmt = outCtx->oformat;

    // 创建输出流并复制参数
    for (unsigned int i = 0; i < inFmtCtx->nb_streams; ++i) {
        if (streamMapping[i] < 0) continue;

        AVStream* inStream = inFmtCtx->streams[i];
        AVCodecParameters* inCodecpar = inStream->codecpar;

        AVStream* outStream = avformat_new_stream(outCtx.get(), nullptr);
        if (!outStream) {
            throw std::runtime_error("Failed allocating output stream");
        }

        ret = avcodec_parameters_copy(outStream->codecpar, inCodecpar);
        if (ret < 0) {
            throw FFmpegException(ret, "Failed to copy codec parameters");
        }

        outStream->codecpar->codec_tag = 0;

        // 计算帧间隔
        if (inCodecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            AVRational frame_rate = inStream->avg_frame_rate;
            if (frame_rate.num == 0 || frame_rate.den == 0) {
                frame_rate = inStream->r_frame_rate;
            }

            if (frame_rate.num != 0 && frame_rate.den != 0) {
                AVRational frame_duration = {frame_rate.den, frame_rate.num};
                video_frame_duration = av_rescale_q(1, frame_duration, outStream->time_base);
            } else {
                video_frame_duration = av_rescale_q(1, (AVRational){1, 25}, outStream->time_base);
            }
        } 
        else if (inCodecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
            int samples_per_frame = inCodecpar->frame_size > 0 ? inCodecpar->frame_size : 1024;
            AVRational audio_frame_duration_input = {samples_per_frame, inCodecpar->sample_rate};
            audio_frame_duration = av_rescale_q(1, audio_frame_duration_input, outStream->time_base);
        }
    }

    // 打开文件
    if (!(outFmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&outCtx->pb, filename.c_str(), AVIO_FLAG_WRITE);
        if (ret < 0) {
            throw FFmpegException(ret, "Could not open output file '" + filename + "'");
        }
    }

    // 写入文件头
    ret = avformat_write_header(outCtx.get(), nullptr);
    if (ret < 0) {
        throw FFmpegException(ret, "Error writing header");
    }

    return outCtx;
}


// 将时间戳（毫秒）转换为日期时间字符串 (UTC)
std::string timestampToDateTime(long long timestamp_ms) {
    // 1. 将毫秒转换为秒和纳秒
    auto seconds = std::chrono::seconds(timestamp_ms / 1000);
    auto nanoseconds = std::chrono::nanoseconds((timestamp_ms % 1000) * 1000000);

    // 2. 创建时间点
    auto tp = std::chrono::system_clock::time_point(
                                                    seconds + nanoseconds
                                                   );

      // 3. 转换为北京时间（UTC+8）
    auto tp_beijing = tp + std::chrono::hours(8);

    // 3. 转换为 time_t (UTC)
    std::time_t tt = std::chrono::system_clock::to_time_t(tp_beijing);

    // 4. 转换为 UTC 时间结构
    std::tm tm;
#ifdef _WIN32
    gmtime_s(&tm, &tt);  // Windows 安全版本
#else
    gmtime_r(&tt, &tm);  // POSIX 安全版本
#endif

    // 5. 格式化为字符串
    std::ostringstream oss;
    oss << std::put_time(&tm, "%Y-%m-%d %H:%M:%S");

    // 添加毫秒部分
    auto ms = (timestamp_ms % 1000);
    if (ms > 0) {
        oss << '.' << std::setfill('0') << std::setw(3) << ms;
    }

    return oss.str();
}

// 生成带时间戳的文件名
std::string generate_filename(const std::string& base) {
    std::ostringstream oss;
    oss << base << timestampToDateTime(Timestamp_ms()) << ".ts";
    return oss.str();
}

void recorded(const string & url,int cameraId,int channel,const string & camera_serial_no) {
    const std::string & inFilename =url ;
    string outFilenameBase = "./videos/";

    outFilenameBase += camera_serial_no+"/channel"+std::to_string(channel)+"/";


    int packetCount = 0;
    int segmentIndex = 0;
    constexpr int64_t segmentDuration = 60 * 1000000; // 60秒(微秒)
                                                      //constexpr int64_t totalDuration = 30 * 60 * 1000000; // 30分钟(微秒)

                                                      // 1. 打开输入文件
    AVFormatContext* rawInCtx = nullptr;
    AVDictionary* rawOptions = nullptr;
    av_dict_set(&rawOptions, "stimeout", "5000000", 0);
    av_dict_set(&rawOptions, "buffer_size", "1024000", 0);

    AVDictionaryPtr options(rawOptions);
    int ret = avformat_open_input(&rawInCtx, inFilename.c_str(), nullptr, &rawOptions);
    options.release(); // 释放所有权，avformat_open_input会接管

    if (ret < 0) {
        throw FFmpegException(ret, "avformat_open_input error");
    }

    AVFormatContextPtr inCtx(rawInCtx);

    // 2. 查找流信息
    ret = avformat_find_stream_info(inCtx.get(), nullptr);
    if (ret < 0) {
        throw FFmpegException(ret, "avformat_find_stream_info error");
    }

    // 3. 打印输入信息
    av_dump_format(inCtx.get(), -1, inFilename.c_str(), 0);

    // 4. 创建流映射
    std::vector<int> streamMapping(inCtx->nb_streams, -1);
    int streamIndex = 0;

    for (unsigned int i = 0; i < inCtx->nb_streams; ++i) {
        AVCodecParameters* codecpar = inCtx->streams[i]->codecpar;
        if (codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
            codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
            codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) 
        {
            streamMapping[i] = streamIndex++;
        }
    }

    // 5. 创建第一个输出文件
    int64_t videoFrameDuration = 0;
    int64_t audioFrameDuration = 0;
    std::string currentFilename = generate_filename(outFilenameBase);
    auto outCtx = create_output_file(
                                     currentFilename, inCtx.get(), streamMapping, 
                                     videoFrameDuration, audioFrameDuration
                                    );

    std::cout << "Created output file: " << currentFilename << "\n";
    std::cout << "Video frame duration: " << videoFrameDuration 
        << ", Audio frame duration: " << audioFrameDuration << "\n";

    // 6. 开始录制
    int64_t lastVideoDts = AV_NOPTS_VALUE;
    int64_t lastAudioDts = AV_NOPTS_VALUE;
    const auto startTime = av_gettime();
    auto segmentStartTime = startTime;

    AVPacket packet;
    av_init_packet(&packet);

    long begin_time = Timestamp_us();
    long end_time = begin_time;
    MySQLClient SQLConn;
    while (true) {
        // 检查总录制时间
        const auto currentTime = av_gettime();
        /*if (currentTime - startTime >= totalDuration) {
          std::cout << "Total recording time reached, stopping.\n";
          break;
          }*/

        // 检查是否需要切换到新文件
        if (currentTime - segmentStartTime >= segmentDuration) {
            std::cout << "Segment time reached, switching to new file.\n";

            // 关闭当前文件
            av_write_trailer(outCtx.get());
            outCtx.reset(); // 释放当前输出上下文

            //结束时间
            end_time += currentTime - segmentStartTime;
            char temp[1024]={0};
            sprintf(temp,"INSERT INTO record_file (NAME, start_time, end_time, camera_id, channel) VALUES ('%s', %ld, %ld, %d, %d)",currentFilename.c_str(),begin_time,end_time,cameraId,channel);
            begin_time = end_time;
            SQLConn.writeOperationQuery(temp);


            // 创建新文件
            currentFilename = generate_filename(outFilenameBase);
            outCtx = create_output_file(
                                        currentFilename, inCtx.get(), streamMapping, 
                                        videoFrameDuration, audioFrameDuration
                                       );

            std::cout << "Created new output file: " << currentFilename << "\n";
            segmentStartTime = currentTime;

            // 重置时间戳状态
            lastVideoDts = AV_NOPTS_VALUE;
            lastAudioDts = AV_NOPTS_VALUE;
        }

        // 读取数据包
        ret = av_read_frame(inCtx.get(), &packet);
        if (ret < 0) {
            if (ret == AVERROR_EOF) {
                std::cout << "End of input file\n";
            } else {
                std::cerr << "av_read_frame error: " << "\n";
            }
            break;
        }

        // 自动释放数据包

        // 跳过不需要的流
        if (packet.stream_index >= static_cast<int>(streamMapping.size()) || 
            streamMapping[packet.stream_index] < 0) {
            continue;
        }

        // 处理数据包
        AVStream* inStream = inCtx->streams[packet.stream_index];
        const int newStreamIndex = streamMapping[packet.stream_index];
        AVStream* outStream = outCtx->streams[newStreamIndex];
        packet.stream_index = newStreamIndex;

        // 时间戳转换
        packet.pts = av_rescale_q_rnd(packet.pts, inStream->time_base, outStream->time_base,
                                      static_cast<AVRounding>(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        packet.dts = av_rescale_q_rnd(packet.dts, inStream->time_base, outStream->time_base,
                                      static_cast<AVRounding>(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        packet.duration = av_rescale_q(packet.duration, inStream->time_base, outStream->time_base);
        packet.pos = -1;

        // 修正时间戳
        if (outStream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            if (lastVideoDts != AV_NOPTS_VALUE && packet.dts <= lastVideoDts) {
                packet.dts = lastVideoDts + videoFrameDuration;
                if (packet.pts != AV_NOPTS_VALUE && packet.pts < packet.dts) {
                    packet.pts = packet.dts;
                }
            }
            if (packet.dts != AV_NOPTS_VALUE) {
                lastVideoDts = packet.dts;
            }
        } 
        else if (outStream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
            if (lastAudioDts != AV_NOPTS_VALUE && packet.dts <= lastAudioDts) {
                packet.dts = lastAudioDts + audioFrameDuration;
                if (packet.pts != AV_NOPTS_VALUE && packet.pts < packet.dts) {
                    packet.pts = packet.dts;
                }
            }
            if (packet.dts != AV_NOPTS_VALUE) {
                lastAudioDts = packet.dts;
            }
        }

        // 写入数据包
        ret = av_interleaved_write_frame(outCtx.get(), &packet);
        if (ret < 0) {
            std::cerr << "Error muxing packet: "  << "\n";
        }

        packetCount++;

        if (packetCount % 300 == 0) {
        //    std::cout << "Processed " << packetCount << " packets, current file: " 
          //      << currentFilename << "\n";
        }
        av_packet_unref(&packet);
    }

    // 7. 写入文件尾
    if (outCtx) {
        av_write_trailer(outCtx.get());
    }

    std::cout << "Recording completed. Total packets processed: " << packetCount << "\n";
}





