/*
    基于beamcoder实现的视频文件的按帧读取
    1. 目前支持的视频文件格式：ts
    2. decode默认解出来的视频是yuv格式，音频是pcm_fltp格式
    3. 使用filter将yuv转为rgba，将pcm_fltp转为pcm_s16
    4. 为了有一个稳定的帧率输出，使用定时器定时读取输出队列
    5. 为了音视频对齐，目前视频的帧率必须固定为fps=25，音频也为25，一帧视频对应一帧音频
 */
// const beamcoder = require('beamcoder');
// 使用自己编译的beamcoder node模块
const beamcoder = require('D:\\jscode\\beamcoder-master\\build\\Release\\beamcoder.node');
const EventEmitter = require("events");
const {validate_mp4, get_mp4_info} = require("./BeamcoderHelper")

const logger = require("log4js").getLogger('video_helper')

class BufferHelper {
    /**
     * @param framePerSize  每帧大小
     */
    constructor(framePerSize) {
        this.framePerSize = framePerSize;
        this.buff = Buffer.alloc(0);
    }

    /**
     * 针对新存入的数据，内部会判断是否够一帧，够的话就返回。
     * @param data
     */
    feedData(data) {
        this.buff = Buffer.concat([this.buff, data]);
    }

    /**
     * 用于判断当前是否有帧，可供返回
     * @returns {boolean}
     */
    hasOneFrame() {
        return this.buff.length >= this.framePerSize;
    }

    /**
     * 尝试获取一帧数据(如果获取的数据满一帧的化)
     */
    getOneFrame() {
        if (!this.hasOneFrame()) {
            console.log('frame not available, skip getFrame request');
            return;
        }

        //检查buffer的大小，是否凑够一帧，够了就直接返回
        let frame = Buffer.from(this.buff.slice(0, this.framePerSize))
        this.buff = this.buff.slice(this.framePerSize)
        return frame;
    }
}

class VideoFrameReader extends EventEmitter {
    /**
     * init
     * @param sessionId
     * @param videoFps      视频帧率
     * @param videoFile     视频文件
     * @param videoWidth    视频宽度
     * @param videoHeight   视频高度
     * @param audioSampleRate   音频采样率
     * @param audioChannelCount 音频通道数
     * @param debug             是否显示调试信息
     * @param isAutoEmitFrame   是否自动吐出读取好的帧，true则通过事件吐出帧，false则需要调用者手工调用getFrame()方法来获取。
     */
    constructor({
                    sessionId,
                    videoFps,
                    videoFile,
                    videoWidth,
                    videoHeight,
                    audioSampleRate = 48000,
                    audioChannelCount = 2,
                    isAutoEmitFrame = true,
                    debug = {ffmpeg: false, pipe: false, sum: false}
                }) {
        super();
        this.sessionId = sessionId;

        //是否自动吐出读取好的帧，true则通过事件吐出帧，false则需要调用者手工调用getFrame()方法来获取。
        this.isAutoEmitFrame = isAutoEmitFrame;

        //视频参数
        this.fps = videoFps;
        this.videoFile = videoFile;

        this.videoWidth = videoWidth;
        this.videoHeight = videoHeight;
        this.videoFramePerSize = this.videoWidth * this.videoHeight * 4;
        logger.info(this.sessionId, `VideoHelper,video init, fps:${this.fps}, videoFile:${videoFile}, videoRatio:${videoWidth}x${videoHeight}, videoFramePerSize:${this.videoFramePerSize}`);

        //音频参数
        this.audioSampleRate = audioSampleRate;
        this.audioChannelCount = audioChannelCount;
        //每帧大小 = 采样率  * 声道数  * 每次采样字节数(默认两字节存储s16le) / 帧率
        this.audioFramePerSize = audioSampleRate * audioChannelCount * 2 / videoFps;
        logger.info(this.sessionId, `VideoHelper,audio init,audioSampleRate:${audioSampleRate}, channels:${audioChannelCount}, audioFramePerSize:${this.audioFramePerSize}`);

        //存储收到的所有视频帧
        this.videoFrameArray = [];
        //用于存储和对齐音频帧，需要按照视频帧率对齐，比如40ms视频对应40ms音频
        this.audioBufferHelper = new BufferHelper(this.audioFramePerSize);

        //发送队列，存储待发送出去的音视频帧（已经对齐完毕）
        this.audioVideoSendArray = [];

        //用于定时发送音视频帧
        this.sendInterval = undefined;

        //退出状态
        this.isExit = false;

        this.debug = debug;
    }

    /**
     * 音频过滤器，用于将float32转为s16
     * @param audioParam
     * @returns {Promise<Filterer>}
     * @private
     */
    async _getAudioFilter(audioParam = {sampleRate, channelLayout}) {
        let channelLayout = audioParam?.channelLayout ?? 'stereo';
        let sampleRate = audioParam?.sampleRate ?? '48000';
        let timeBase = [1, sampleRate]
        return await beamcoder.filterer({
            filterType: 'audio',
            //输入参数
            inputParams: [
                {
                    sampleFormat: 'fltp',
                    sampleRate,
                    channelLayout,
                    timeBase
                }
            ],
            //输出参数
            outputParams: [
                {
                    sampleFormat: 's16',
                    sampleRate,
                    channelLayout,
                    timeBase
                }
            ],
            filterSpec: `aformat=sample_fmts=s16:channel_layouts=${channelLayout}`
        });
    }

    /**
     * 获取视频过滤器, 用于将yuv转为rgba
     * @param videoInfo     原始视频信息
     * @param videoParam    目的视频参数
     * @returns {Promise<*>}
     * @private
     */
    async _getVideoFilter(videoInfo, videoParam) {
        return await beamcoder.filterer({
            filterType: 'video',
            //输入参数
            inputParams: [
                {
                    width: videoInfo.width,
                    height: videoInfo.height,
                    pixelFormat: 'yuv420p',
                    sample_aspect_ratio: 0,
                    timeBase: [1, 25],
                    pixelAspect: [videoInfo.width, videoInfo.height]
                }
            ],
            //输出参数，需要输出为rgba
            outputParams: [
                {
                    pixelFormat: 'rgba'
                }
            ],
            filterSpec: `crop=${videoParam.width}:${videoParam.height}`
        });
    }

    /**
     * 启动
     * @returns {Promise<void>}
     */
    async start() {
        let result = await validate_mp4(this.videoFile);
        if (!result) {
            throw 'invalid video width, must be in multiples of 8';
        }

        this.mediaInfo = await get_mp4_info(this.videoFile);
        logger.info(this.sessionId, `video file:${this.videoFile} info is:${JSON.stringify(this.mediaInfo)}`);

        //启动音视频帧读取
        await this.startFrameReader();

        if (this.isAutoEmitFrame) {
            //启动发送流程
            await this.startFrameSender();
        } else {
            logger.info(this.sessionId, 'no need to start SendFrameInterval, you need to get by getFrame() method');
        }
    }

    /**
     * 返回一帧完整的音视频帧
     * @returns {*}
     */
    getFrame() {
        if (this.isAutoEmitFrame) {
            logger.warn(this.sessionId, 'fail to get frame for isAudioEmitFrame=true');
            return;
        }

        return this.audioVideoSendArray.shift();
    }

    /**
     * 按照fps定时发送音视频帧
     * @returns {Promise<void>}
     */
    async startFrameSender() {
        logger.info(this.sessionId, 'start to send packets');
        this.sendInterval = setInterval(() => {
            let frame = this.audioVideoSendArray.shift();
            if (frame) {
                this.emit('audio_video_frame', frame);
            } else {
                logger.warn(this.sessionId, 'no available frame, skip one');
            }
        }, 1000 / this.fps);
    }

    /**
     * 开始启动视频音频帧读取流程
     * @returns {Promise<void>}
     */
    async startFrameReader() {
        //队列检查间隔
        let checkInterval = 2;
        let isFirstFrame = true;

        let reader = this.frameReader();
        let that = this;
        //定期检查一次发送队列，如果队列为空，则读取5个音视频帧放入发送队列中
        setTimeout(async function readFunc() {
            if (that.isExit) {
                logger.info(that.sessionId, 'frame read is over, stop reader');
                return;
            }

            //检查队列中待发送的数量过少
            if (that.audioVideoSendArray.length < 2) {
                logger.debug(that.sessionId, 'audioVideoSendArray.length is < 2, need to read more')
                //队列不足，则需要确保发送队列中有5个音视频帧（音视频帧一一匹配）
                while (that.audioVideoSendArray.length < 5) {
                    let nextValue = await reader.next();
                    if (!nextValue.done) {
                        let {type, data} = nextValue.value;
                        if (type === 'audio') {
                            that.audioBufferHelper.feedData(data);
                            // that.audioFrameArray.push(data);
                        } else if (type === 'video') {
                            that.videoFrameArray.push(data);
                        }

                        //数据读取之后，检查是否能凑够一帧完整的音视频，够的话，就发到发送队列中
                        if (that.audioBufferHelper.hasOneFrame() > 0 && that.videoFrameArray.length > 0) {
                            that.debug.sum && logger.info(`终于凑够了一帧，放到发送队列中, 发送队列大小${that.audioVideoSendArray.length},  videoFrameArray:${that.videoFrameArray.length}`);
                            let fullData = {
                                audioFrame: that.audioBufferHelper.getOneFrame(),
                                videoFrame: that.videoFrameArray.shift(),
                                isFirstFrame,
                                //是否读取结束
                                isReadOver: false
                            }
                            that.audioVideoSendArray.push(fullData);

                            if (isFirstFrame) {
                                isFirstFrame = false;
                            }
                        }
                    } else {
                        logger.info(that.sessionId, 'video file is over');
                        //在文件的最后，压入一帧，用于通知调用者，文件读取已经结束了
                        that.audioVideoSendArray.push({isReadOver: true});
                        return;
                    }
                }
            }

            setTimeout(readFunc, checkInterval);
        }, checkInterval);
    }

    /**
     * 读取视频文件中的音视频帧
     * @returns {Promise<void>}
     */
    async* frameReader() {
        let demuxer = await beamcoder.demuxer(this.videoFile);
        let videoDecoder = beamcoder.decoder({params:demuxer.streams[0].codecpar});
        let audioDecoder = beamcoder.decoder({params:demuxer.streams[1].codecpar});

        //该过滤器用于将视频由yuv转为rgba
        let videoFilter = await this._getVideoFilter(this.mediaInfo.videoInfo, {width: this.videoWidth, height: this.videoHeight});
        let channelLayout = this.audioChannelCount === 2 ? 'stereo' : 'mono';
        //该过滤器拥有将float转为s16
        let audioFilter = await this._getAudioFilter({sampleRate: this.audioSampleRate, channelLayout});

        let audioCount = 0, videoCount = 0;
        let packet = {};

        /**
         * 解析视频帧, 将yuv转为rgba
         * @param frames
         * @returns {AsyncGenerator<*, {data: Buffer, type: string}, *>}
         */
        async function parseVideoFrame(frames) {
            //默认读取出来的视频帧是yuv格式，需要调用filter将其转为rgba
            let filteredData = await videoFilter.filter([frames]);
            let realData = filteredData[0]?.frames[0]?.data[0];
            if (realData) {
                //去掉最后面的对齐数据
                let finalData = realData.slice(0, this.videoWidth * this.videoHeight * 4);
                return {type: 'video', data: finalData}
            } else {
                console.log(this.sessionId, `fail to receive video frame, data is null`);
            }
        }

        /**
         * 解析音频帧，将pcm_fltp转为pcm_s16
         * @param audioFrames
         * @returns {AsyncGenerator<{data: *, type: string}, void, *>}
         */
        async function parseAudioFrame(audioFrames) {
            let filteredData = await audioFilter.filter([audioFrames]);
            let realAudioData = filteredData[0]?.frames[0]?.data[0];
            // console.log(this.sessionId, `>>>>${Date.now()},收到音频帧，frameLength:${realAudioData.byteLength}, total_time:${audioFrames.total_time}`);
            if (realAudioData) {
                //需要去掉buffer最后为了对齐增加的空白数据
                //该次采样的真实大小 = nb_samples(单通道采样数) * 通道数  * 每次采样占用字节数
                let realSize = filteredData[0].frames[0].nb_samples * this.audioChannelCount * 2;
                realAudioData = realAudioData.slice(0, realSize);
                return {type: 'audio', data: realAudioData}
            } else {
                console.warn(this.sessionId, `fail to receive audio frame, data is null`);
            }
        }

        //开始不断的读取原始音视频帧， 如果读取不到，则代表读取结束
        while (packet = await demuxer.read()) {
            if (packet.stream_index === 0) {
                let frames = await videoDecoder.decode(packet);
                //刚开始的几帧里面是空白数据，需要跳过
                if (frames.frames.length === 0) {
                    continue;
                }
                //将视频帧转为rgba
                let videoData = await parseVideoFrame.call(this, frames);
                if (videoData) {
                    yield videoData;
                    videoCount++;
                }
            } else if (packet.stream_index === 1) {
                let audioFrames = await audioDecoder.decode(packet);
                //将音频帧转为pcm s16
                let audioData = await parseAudioFrame.call(this, audioFrames);
                if (audioData) {
                    yield audioData;
                    audioCount++;
                }
            }
        }
        console.log(`===================================`);
        console.log(`videoCount:${videoCount}, audioCount:${audioCount}`);

        //处理最后剩下的几帧音视频
        let frames = await videoDecoder.flush(); // Must tell the videoDecoder when we are done
        if (frames && frames.frames) {
            for (let i = 0; i < frames.frames.length; i++) {
                yield await parseVideoFrame.call(this, frames.frames[i]);
            }
            console.log(this.sessionId, 'finally video frame flush', frames.total_time, frames.frames.length);
        }
        let audioFrames = await audioDecoder.flush()
        if (audioFrames && audioFrames.frames) {
            for (let i = 0; i < audioFrames.frames.length; i++) {
                yield await parseAudioFrame.call(this, audioFrames.frames[i]);
            }
        }
    }


    /**
     * need to clear pipe resources
     */
    close() {
        this.isExit = true;

        if (this.sendInterval) {
            clearInterval(this.sendInterval);
        }
    }
}

module.exports = {VideoFrameReader}
