//
// Created by linciping on 2020/9/15.
//
#include "rtmp_output_picture.h"

RtmpPool *rtmpPool = NULL;

int printfError(int errorCode, const char *tag) {
    char *error = malloc(sizeof(char) * 20);
    av_strerror(errorCode, error, 20);
    printf("%s err msg->%s\n", tag, error);
    return 0;
}

void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag) {
    AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
    printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
           tag,
           av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
           av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
           av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
           pkt->stream_index);
}


void outputRtmp2PictureFree(AVFrame *frame, AVFrame *frameYUV, unsigned char *out_buffer) {
    av_free(out_buffer);
    av_frame_free(&frameYUV);
    av_frame_free(&frame);
}


int createPicture(AVCodecContext *codecContext, const char *picturePath, AVFrame *frameYUV, int width, int height) {
    AVFormatContext *picFormatContext = avformat_alloc_context();
    AVOutputFormat *outputFormat = av_guess_format("mjpeg", NULL, NULL);
    picFormatContext->oformat = outputFormat;
    if (avio_open(&picFormatContext->pb, picturePath, AVIO_FLAG_READ_WRITE) < 0) {
        printf("open output file error.\n");
        return CREATE_PICTURE_ERROR;
    }
    AVStream *pictureStream = avformat_new_stream(picFormatContext, 0);
    if (pictureStream == NULL) {
        return CREATE_PICTURE_ERROR;
    }
    AVCodecContext *picCodecContext = pictureStream->codec;
    picCodecContext->codec_id = outputFormat->video_codec;
    picCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
    picCodecContext->pix_fmt = AV_PIX_FMT_YUVJ420P;
    if (width == 0) {
        picCodecContext->width = codecContext->width;
    } else {
        picCodecContext->height = width;
    }

    if (height == 0) {
        picCodecContext->height = codecContext->height;
    } else {
        picCodecContext->height = height;
    }
    picCodecContext->time_base.num = 1;
    picCodecContext->time_base.den = 25;
    av_dump_format(picFormatContext, 0, picturePath, 1);
    AVCodec *picCodec = avcodec_find_encoder(picCodecContext->codec_id);
    if (!picCodec) {
        printf("codec not found.\n");
        return CREATE_PICTURE_ERROR;
    }
    if (avcodec_open2(picCodecContext, picCodec, NULL) < 0) {
        printf("open codec error.\n");
        return CREATE_PICTURE_ERROR;
    }
    AVFrame *picture = av_frame_alloc();
    int size = av_image_get_buffer_size(picCodecContext->pix_fmt, picCodecContext->width,
                                        picCodecContext->height, 1);
    uint8_t *pictureBuff = av_malloc(size);
    if (!pictureBuff) {
        printf("create picture buff error.\n");
        return CREATE_PICTURE_ERROR;
    }
    av_image_fill_arrays(picture->data, picture->linesize, pictureBuff, picCodecContext->pix_fmt,
                         picCodecContext->width,
                         picCodecContext->height, 1);
    avformat_write_header(picFormatContext, NULL);
    picture->data[0] = frameYUV->data[0];
    picture->data[1] = frameYUV->data[1];
    picture->data[2] = frameYUV->data[2];
    int y_size = picCodecContext->width * picCodecContext->height;
    AVPacket *packet = av_malloc(sizeof(AVPacket));;
    av_new_packet(packet, y_size * 3);
    int ret = avcodec_send_frame(picCodecContext, picture);
    if (ret < 0) {
        printfError(ret, "avcodec_send_frame");
        return CREATE_PICTURE_ERROR;
    }
    ret = avcodec_receive_packet(picCodecContext, packet);
    if (ret < 0) {
        printfError(ret, "avcodec_receive_packet");
        return CREATE_PICTURE_ERROR;
    }
    packet->stream_index = pictureStream->index;
    av_write_frame(picFormatContext, packet);
    av_write_trailer(picFormatContext);
    printf("encode successful.\n");
    av_free(pictureBuff);
    av_frame_free(&picture);
    av_packet_unref(packet);
    av_packet_free(&packet);
    avcodec_close(picCodecContext);
    avio_close(picFormatContext->pb);
    avformat_free_context(picFormatContext);
    return 0;
}

/**
 * 输出RTMP图片
 * @param codecContext
 * @param inputFormatContext
 * @param packet
 * @param imgConvertContext
 * @param allowPictureTime
 * @param pictureInterval
 * @param outputDirPath
 * @param realVideoName
 * @return
 */
int outputRtmp2Picture(const char *outputDirPath, const char *videoName, int pictureInterval, int width, int height) {
    const char *videoFilePath;
    char *result = malloc(sizeof(strlen(outputDirPath) + strlen(videoName)));
    sprintf(result, "%s%s", outputDirPath, videoName);
    videoFilePath = result;
    const char *realVideoName;
    char *cacheVideoName = malloc(strlen(videoName));
    strcpy(cacheVideoName, videoName);
    realVideoName = strtok(cacheVideoName, ".");
    AVFormatContext *formatContext = avformat_alloc_context();
    int ret;
    if ((ret = avformat_open_input(&formatContext, videoFilePath, NULL, NULL)) < 0) {
        printfError(ret, "open_input");
        return CREATE_PICTURE_ERROR;
    }
    if ((ret = avformat_find_stream_info(formatContext, NULL)) < 0) {
        printfError(ret, "find_stream");
        return CREATE_PICTURE_ERROR;
    }
    int videoIndex = -1;
    for (int i = 0; i < formatContext->nb_streams; ++i) {
        if (formatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoIndex = i;
            break;
        }
    }
    if (videoIndex < 0) {
        printf("file in not a video file.\n");
        return CREATE_PICTURE_ERROR;
    }
    AVStream *stream = formatContext->streams[videoIndex];
    AVCodec *codec = avcodec_find_decoder(stream->codecpar->codec_id);
    AVCodecContext *codecContext = avcodec_alloc_context3(codec);
    avcodec_parameters_to_context(codecContext, stream->codecpar);
    if ((ret = avcodec_open2(codecContext, codec, NULL)) < 0) {
        printfError(ret, "avcodec_open2");
        return CREATE_PICTURE_ERROR;
    }

    AVFrame *frame = av_frame_alloc();
    AVFrame *frameYuv = av_frame_alloc();
    AVPacket *packet = av_packet_alloc();
    struct SwsContext *swsContext = sws_getContext(codecContext->width, codecContext->height, codecContext->pix_fmt,
                                                   codecContext->width, codecContext->height, AV_PIX_FMT_YUV420P,
                                                   SWS_FAST_BILINEAR, NULL, NULL, NULL);
    unsigned char *pictureBuff = av_malloc(
            av_image_get_buffer_size(AV_PIX_FMT_YUV420P, codecContext->width, codecContext->height, 1));
    av_image_fill_arrays(frameYuv->data, frameYuv->linesize, pictureBuff, AV_PIX_FMT_YUV420P, codecContext->width,
                         codecContext->height, 1);
    int videoTime = (int) (stream->duration / (stream->time_base.den / stream->time_base.num));
    double nowTime = 0;
    int allowPictureTime = 0;
    while (nowTime < videoTime) {
        ret = av_read_frame(formatContext, packet);
        if (ret < 0) {
            printfError(ret, "read_frame");
            return CREATE_PICTURE_ERROR;
        }
        if (packet->stream_index != videoIndex) {
            continue;
        }
        ret = avcodec_send_packet(codecContext, packet);
        if (ret < 0) {
            printfError(ret, "send packet");
            continue;
        }
        ret = avcodec_receive_frame(codecContext, frame);
        if (ret < 0) {
            printfError(ret, "receive_frame");
            continue;
        }
        if (nowTime < allowPictureTime) {
            nowTime = av_q2d(stream->time_base) * packet->pts;
            av_packet_unref(packet);
            continue;
        }
        log_packet(formatContext, packet, "encode");
        ret = sws_scale(swsContext, (const uint8_t *const *) frame->data, frame->linesize, 0, codecContext->height,
                        frameYuv->data, frameYuv->linesize);
        if (ret < 0) {
            printfError(ret, "sws scale");
            continue;
        }
        int pictureIndex = (allowPictureTime / pictureInterval) + 1;
        int pictureIndexLength = ((int) log10(pictureIndex)) + 1;
        char *pictureName = malloc(strlen(realVideoName) + pictureIndexLength + 5);
        sprintf(pictureName, "%s_%d.jpg", realVideoName, pictureIndex);
        char *picturePath = malloc(strlen(outputDirPath) + strlen(pictureName));
        sprintf(picturePath, "%s%s", outputDirPath, pictureName);
        ret = createPicture(codecContext, picturePath, frameYuv, width, height);
        if (ret >= 0) {
            allowPictureTime += pictureInterval;
        }
        nowTime = av_q2d(stream->time_base) * packet->pts;
        av_packet_unref(packet);
    }
    sws_freeContext(swsContext);
    av_packet_free(&packet);
    av_free(pictureBuff);
    av_frame_free(&frame);
    av_frame_free(&frameYuv);
    avcodec_close(codecContext);
    avformat_close_input(&formatContext);
    return ret;
}

/**
 * 输出指定长度的rtmp流为MP4视频
 * @param inputFormatContext
 * @param outputVideoPath
 * @param videoTime 视频长度
 * @param pictureInterval 截图生成间隔
 * @return
 */
int outputRtmp2Video(AVFormatContext *inputFormatContext, const char *outputVideoPath, const double videoTime) {
    AVOutputFormat *outputFormat = NULL;
    AVFormatContext *outputFormatContext = NULL;
    AVPacket *packet;
    int ret;
    int streamIndex = 0;
    int *streamMapping = NULL;
    unsigned int streamMappingSize = 0;

    ret = avformat_alloc_output_context2(&outputFormatContext, NULL, NULL, outputVideoPath);
    if (!outputFormatContext) {
        printfError(ret, "avformat_alloc_output_context2");
        return OUTPUT_FILE_ERROR;
    }
    streamMappingSize = inputFormatContext->nb_streams;
    streamMapping = av_mallocz_array(streamMappingSize, sizeof(*streamMapping));
    if (!streamMapping) {
        return ERROR;
    }
    outputFormat = outputFormatContext->oformat;
    AVCodecContext *codecContext = NULL;
    struct SwsContext *imgConvertContext = NULL;
    for (int i = 0; i < inputFormatContext->nb_streams; ++i) {
        AVStream *outStream, *inStream = inputFormatContext->streams[i];
        AVCodecParameters *inCodecParam = inStream->codecpar;
        if (inCodecParam->codec_type != AVMEDIA_TYPE_VIDEO) {
            streamMapping[i] = -1;
            continue;
        }
        streamMapping[i] = streamIndex++;
        AVCodec *codec = avcodec_find_decoder(inStream->codecpar->codec_id);
        if (codec == NULL) {
            printf("codec not find,\n");
            return FIND_DECODER_ERROR;
        }
        codecContext = avcodec_alloc_context3(codec);
        avcodec_parameters_to_context(codecContext, inStream->codecpar);
        if ((ret = avcodec_open2(codecContext, codec, NULL)) < 0) {
            printfError(ret, "avcodec_open2");
            return OPEN_DECODER_ERROR;
        }
        imgConvertContext = sws_getContext(codecContext->width, codecContext->height, codecContext->pix_fmt,
                                           codecContext->width, codecContext->height, AV_PIX_FMT_YUV420P,
                                           SWS_BICUBIC, NULL, NULL, NULL);
        outStream = avformat_new_stream(outputFormatContext, NULL);
        if (!outStream) {
            return ERROR;
        }
        ret = avcodec_parameters_copy(outStream->codecpar, inCodecParam);
        if (ret < 0) {
            printfError(ret, "avcodec_parameters_copy");
            return ERROR;
        }
        outStream->codecpar->codec_tag = 0;
    }
    av_dump_format(outputFormatContext, 0, outputVideoPath, 1);
    if (!(outputFormat->flags & AVFMT_NOFILE)) {
        ret = avio_open(&outputFormatContext->pb, outputVideoPath, AVIO_FLAG_WRITE);
        if (ret < 0) {
            printfError(ret, "avio_open");
            return AVIO_OPEN_ERROR;
        }
    }
    avformat_write_header(outputFormatContext, NULL);
    int64_t prePts = -1;
    double nowTime = 0;
    int sleepTime = 50;
    int errCount = 1;
    packet = av_malloc(sizeof(AVPacket));
    while ((nowTime < videoTime)) {
        AVStream *in_stream, *out_stream;
        if (inputFormatContext->pb->error >= 0) {
            ret = av_read_frame(inputFormatContext, packet);
            if (ret < 0) {
                printfError(ret, "av_read_frame");
                if (errCount >= MAX_RETRY_NUM) {
                    printf("read time out");
                    av_write_trailer(outputFormatContext);
                    /* close output */
                    if (outputFormatContext && !(outputFormat->flags & AVFMT_NOFILE))
                        avio_closep(&outputFormatContext->pb);
                    avformat_free_context(outputFormatContext);
                    av_packet_free(&packet);
                    av_freep(&streamMapping);
                    avcodec_close(codecContext);
                    avcodec_free_context(&codecContext);
                    sws_freeContext(imgConvertContext);
                    if (nowTime > 0) {
                        return 0;
                    } else {
                        return TIME_OUT_ERROR;
                    }
                }
                errCount++;
                av_packet_unref(packet);
                av_usleep(sleepTime);
                continue;
            }
            errCount = 0;
            in_stream = inputFormatContext->streams[packet->stream_index];
            if (packet->stream_index >= streamMappingSize ||
                streamMapping[packet->stream_index] < 0) {
                av_packet_unref(packet);
                av_usleep(sleepTime);
                continue;
            }
            if (prePts > 0 && packet->pts <= prePts) {
                av_packet_unref(packet);
                av_usleep(sleepTime);
                continue;
            }
            log_packet(inputFormatContext, packet, "in");
            packet->stream_index = streamMapping[packet->stream_index];
            out_stream = outputFormatContext->streams[packet->stream_index];
            prePts = packet->pts;
            nowTime = av_q2d(in_stream->time_base) * prePts;
            /* copy packet */
            packet->pts = av_rescale_q_rnd(packet->pts, in_stream->time_base, out_stream->time_base,
                                           AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
            packet->dts = av_rescale_q_rnd(packet->dts, in_stream->time_base, out_stream->time_base,
                                           AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
            packet->duration = av_rescale_q(packet->duration, in_stream->time_base, out_stream->time_base);
            packet->pos = -1;
            log_packet(outputFormatContext, packet, "out");
            ret = av_interleaved_write_frame(outputFormatContext, packet);
            if (ret < 0) {
                printfError(ret, "av_interleaved_write_frame");
                break;
            }
            av_packet_unref(packet);
            av_usleep(sleepTime);
        } else {
            return ERROR;
        }
    }
    ret = av_write_trailer(outputFormatContext);
    /* close output */
    if (outputFormatContext && !(outputFormat->flags & AVFMT_NOFILE))
        avio_closep(&outputFormatContext->pb);
    av_packet_free(&packet);
    av_freep(&streamMapping);
    sws_freeContext(imgConvertContext);
    if (ret < 0 && ret != AVERROR_EOF) {
        printfError(ret, "av_write_trailer");
        return WRITE_TRAILER_ERROR;
    }
    avcodec_close(codecContext);
    avformat_close_input(&outputFormatContext);
    return 0;
}

/**
 * 连接rtsp流
 * @param rtspUrl
 * @return
 */
int connectionRtmp(const char *rtmpUrl, const char *picturePath, int width, int height) {
    AVFormatContext *formatContext;
    int videoIndex;
    AVCodecContext *codecContext;
    AVCodec *codec;
    AVStream *videoStream;
    int ret;
    struct SwsContext *imgConvertContext;

    avformat_network_init();
    formatContext = avformat_alloc_context();
    if ((ret = avformat_open_input(&formatContext, rtmpUrl, NULL, NULL)) < 0) {
        printfError(ret, "avformat_open_input");
        return OPEN_INPUT_ERROR;
    }
    if ((ret = avformat_find_stream_info(formatContext, NULL)) < 0) {
        printfError(ret, "avformat_find_stream_info");
        return FIND_STREAM_ERROR;
    }
    videoIndex = -1;
    for (int i = 0; i < formatContext->nb_streams; ++i) {
        if (formatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoIndex = i;
            break;
        }
    }
    if (videoIndex == -1) {
        printf("file is not video.\n");
        return NO_VIDEO_ERROR;
    }
    videoStream = formatContext->streams[videoIndex];
    codec = avcodec_find_decoder(videoStream->codecpar->codec_id);
    if (codec == NULL) {
        printf("codec not found.\n");
        return FIND_DECODER_ERROR;
    }
    codecContext = avcodec_alloc_context3(codec);
    avcodec_parameters_to_context(codecContext, videoStream->codecpar);
    if ((ret = avcodec_open2(codecContext, codec, NULL)) < 0) {
        printfError(ret, "avcodec_open2");
        return OPEN_DECODER_ERROR;
    }
    imgConvertContext = sws_getContext(codecContext->width, codecContext->height, codecContext->pix_fmt,
                                       codecContext->width, codecContext->height, AV_PIX_FMT_YUV420P,
                                       SWS_BICUBIC, NULL, NULL, NULL);
    if (videoIndex > 0) {
        AVPacket *packet = av_packet_alloc();
        for (int i = 0; i < videoIndex; ++i) {
            av_read_frame(formatContext, packet);
            av_packet_unref(packet);
        }
        av_packet_free(&packet);
    }
    av_dump_format(formatContext, 0, rtmpUrl, 0);
    FFmepgStruct *ffmepgStruct = malloc(sizeof(FFmepgStruct));
    ffmepgStruct->formatContext = formatContext;
    ffmepgStruct->rtmpUrl = rtmpUrl;
    ffmepgStruct->codecContext = codecContext;
    ffmepgStruct->codec = codec;
    ffmepgStruct->videoIndex = videoIndex;
    ffmepgStruct->imgConvertContext = imgConvertContext;
    ffmepgStruct->index = 0;
    int frameRate = videoStream->avg_frame_rate.num / videoStream->avg_frame_rate.den;
    ffmepgStruct->timeFrameCount = ffmepgStruct->time * frameRate;
    if (rtmpPool == NULL) {
        rtmpPool = malloc(sizeof(RtmpPool));
        initRtmpPool(rtmpPool);
    }
    add(rtmpPool, *ffmepgStruct);
    return outputRtmpFramePicture(rtmpUrl, picturePath, width, height);
}

/**
 * 关闭rtsp流
 * @return
 */
int closeRtmp(const char *rtmpUrl) {
    FFmepgStruct *ffmepgStruct = get(*rtmpPool, rtmpUrl);
    sws_freeContext(ffmepgStruct->imgConvertContext);
    avformat_close_input(&ffmepgStruct->formatContext);
    avcodec_close(ffmepgStruct->codecContext);
    removeItem(rtmpPool, rtmpUrl);
    return 0;
}

/**
 * 输出rtsp当前流图片
 * @param picturePath
 * @param width
 * @param height
 * @param time
 */
int outputRtmpFramePicture(const char *rtmpUrl, const char *picturePath, int width, int height) {
    FFmepgStruct *ffmepgStruct = get(*rtmpPool, rtmpUrl);
    AVCodecContext *codecContext = ffmepgStruct->codecContext;
    AVFormatContext *formatContext = ffmepgStruct->formatContext;
    AVPacket *packet = av_malloc(sizeof(AVPacket));
    AVFrame *frame = av_frame_alloc();
    AVFrame *frameYUV = av_frame_alloc();
    unsigned char *out_buffer = av_malloc(
            av_image_get_buffer_size(AV_PIX_FMT_YUV420P, codecContext->width, codecContext->height, 1));
    av_image_fill_arrays(frameYUV->data, frameYUV->linesize, out_buffer, AV_PIX_FMT_YUV420P, codecContext->width,
                         codecContext->height, 1);
    avcodec_flush_buffers(codecContext);
    if (ffmepgStruct->index >= 0) {
        for (int i = 0; i < ffmepgStruct->timeFrameCount - 1; ++i) {
            int ret = av_read_frame(formatContext, packet);
            if (ret >= 0) {
                avcodec_send_packet(codecContext, packet);
                avcodec_receive_frame(codecContext, frame);
            }
            av_packet_unref(packet);
        }
    }
    while (1) {
        int ret = av_read_frame(formatContext, packet);
        if (ret >= 0) {
            if (packet->stream_index == ffmepgStruct->videoIndex) {
                ret = avcodec_send_packet(codecContext, packet);
                if (ret < 0) {
                    printfError(ret, "avcodec_send_packet");
                    continue;
                }
                while (ret >= 0) {
                    ret = avcodec_receive_frame(codecContext, frame);
                    if (ret < 0) {
                        printfError(ret, "avcodec_receive_frame");
                        continue;
                    }
                    ret = sws_scale(ffmepgStruct->imgConvertContext, (const uint8_t *const *) frame->data,
                                    frame->linesize,
                                    0, codecContext->height, frameYUV->data, frameYUV->linesize);
                    if (ret <= 0) {
                        printfError(ret, "sws_scale");
                        continue;
                    }
                    ret = createPicture(codecContext, picturePath, frameYUV, width, height);
                    if (ret < 0) {
                        return CREATE_PICTURE_ERROR;
                    }
                    ffmepgStruct->index = frame->pts;
                    break;
                }
                if (ret >= 0) {
                    break;
                }
            }
        } else {
            printfError(ret, "av_read_frame");
            continue;
        }
        av_usleep(30);
    }
    av_packet_unref(packet);
    av_packet_free(&packet);
    av_free(out_buffer);
    av_frame_free(&frameYUV);
    av_frame_free(&frame);
    return 0;
}

int outputVideo(const char *rtmpUrl, const char *outputVideoPath, int videoTime) {
    FFmepgStruct *ffmepgStruct = get(*rtmpPool, rtmpUrl);
    int ret = outputRtmp2Video(ffmepgStruct->formatContext, outputVideoPath, videoTime);
    return ret;
}

int outputVideoAndPicture(const char *rtmpUrl, const char *outputDirPath, const char *videoName, int videoTime,
                          int pictureInterval, int width, int height) {
    AVFormatContext *formatContext = avformat_alloc_context();
    int ret;
    AVDictionary *avdic = NULL;
    char option_key[] = "max_delay";
    char option_value[] = "5000000";
    av_dict_set(&avdic, option_key, option_value, 0);
    if ((ret = avformat_open_input(&formatContext, rtmpUrl, NULL, NULL)) < 0) {
        printfError(ret, "avformat_open_input");
        return OPEN_INPUT_ERROR;
    }
    if ((ret = avformat_find_stream_info(formatContext, NULL)) < 0) {
        printfError(ret, "avformat_find_stream_info");
        return FIND_STREAM_ERROR;
    }
    const char *outputVideoPath;
    if (pictureInterval > 0) {
        char *result;
        result = malloc(sizeof(strlen(outputDirPath) + strlen(videoName)));
        sprintf(result, "%s%s", outputDirPath, videoName);
        outputVideoPath = result;
    } else {
        outputVideoPath = outputDirPath;
    }
    ret = outputRtmp2Video(formatContext, outputVideoPath, videoTime);
    if (ret >= 0) {
        ret = outputRtmp2Picture(outputDirPath, videoName, pictureInterval, width, height);
    }
    avformat_close_input(&formatContext);
    return ret;
}