﻿#include "LanAudioServer.h"

#include "qhttpserver.h"
#include "qhttprequest.h"
#include "qhttpresponse.h"
#include "AudioFile.h"
#include <QtGui>


extern "C" {
#include <libavutil/avassert.h>
#include <libavutil/channel_layout.h>
#include <libavutil/opt.h>
#include <libavutil/mathematics.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
}

struct StreamInfo {
    AVFormatContext *formatContext;

    AVStream *avstream;
    AVCodecContext *avCodecContext;
    AVCodec *avCodec;
    int64_t next_pts;
    int samples_count;
    AVFrame *frame;
    QByteArray audiopcms16; //16000 1channel mono
    int audioIndex;
    int audioLength;
    QImage videoimage; //800, 600

    float duration;
    StreamInfo() : formatContext(nullptr), avstream(nullptr), avCodecContext(nullptr),
        avCodec(nullptr), next_pts(0), samples_count(0), frame(nullptr), audioIndex(0), audioLength(0), duration(0.0f) {}
};

static int writeFrame(AVFormatContext *fmt_ctx, AVCodecContext *c, AVStream *st, AVFrame *frame)
{
    int ret = avcodec_send_frame(c, frame);
    if (ret < 0) {
        exit(__LINE__);
    }

    AVPacket *pkt = av_packet_alloc();
    av_init_packet(pkt);

    while (ret >= 0) {
        ret = avcodec_receive_packet(c, pkt);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
            break;
        else if (ret < 0) {
            exit(__LINE__);
        }
        av_packet_rescale_ts(pkt, c->time_base, st->time_base);
        pkt->stream_index = st->index;

        ret = av_interleaved_write_frame(fmt_ctx, pkt);
        av_packet_unref(pkt);
        if (ret < 0) {
            exit(__LINE__);
        }
    }

    av_packet_unref(pkt);
    av_packet_free(&pkt);

    return ret == AVERROR_EOF ? 1 : 0;
}

static void createStream(StreamInfo *streamInfo, enum AVCodecID codec_id)
{
    streamInfo->avCodec = avcodec_find_encoder(codec_id);
    if (!streamInfo->avCodec) {
        exit(__LINE__);
    }
    streamInfo->avstream = avformat_new_stream(streamInfo->formatContext, NULL);
    if (!streamInfo->avstream) {
        exit(__LINE__);
    }
    streamInfo->avstream->id = streamInfo->formatContext->nb_streams - 1;
    AVCodecContext *c = avcodec_alloc_context3(streamInfo->avCodec);
    if (!c) {
        exit(__LINE__);
    }
    streamInfo->avCodecContext = c;

    switch (streamInfo->avCodec->type) {
    case AVMEDIA_TYPE_AUDIO:
        c->sample_fmt  = AV_SAMPLE_FMT_FLTP;
        c->bit_rate    = 32000;
        c->sample_rate = 16000;
        c->channel_layout = AV_CH_LAYOUT_MONO;
        c->channels        = 1;
        c->frame_size      = 1024;
        streamInfo->avstream->time_base = { 1, c->sample_rate };
        streamInfo->avstream->start_time = 0;
        break;
    case AVMEDIA_TYPE_VIDEO:
        c->codec_id = codec_id;
        c->bit_rate = 4000000;
        c->width    = 1280;
        c->height   = 720;
        streamInfo->avstream->time_base = { 1, 20 };
        c->time_base       = streamInfo->avstream->time_base;
        c->gop_size      = 20;
        c->pix_fmt       = AV_PIX_FMT_YUV420P;
        break;
    default:
        break;
    }
    if (streamInfo->formatContext->oformat->flags & AVFMT_GLOBALHEADER)
        c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}


static AVFrame *allocAudioFrame(enum AVSampleFormat sample_fmt,
                                uint64_t channel_layout,
                                int sample_rate, int nb_samples)
{
    AVFrame *frame = av_frame_alloc();
    int ret;
    if (!frame) {
        exit(__LINE__);
    }
    frame->format = sample_fmt;
    frame->channel_layout = channel_layout;
    frame->sample_rate = sample_rate;
    frame->nb_samples = nb_samples;
    if (nb_samples) {
        ret = av_frame_get_buffer(frame, 0);
        if (ret < 0) {
            exit(__LINE__);
        }
    }
    return frame;
}

static void startAudioEncodec(StreamInfo *streamInfo)
{
    AVCodecContext *avCodecContext = streamInfo->avCodecContext;
    int ret = avcodec_open2(avCodecContext, streamInfo->avCodec, 0);
    if (ret < 0) {
        exit(__LINE__);
    }

    int nb_samples = avCodecContext->frame_size;
    streamInfo->frame     = allocAudioFrame(avCodecContext->sample_fmt, avCodecContext->channel_layout,
                                            avCodecContext->sample_rate, nb_samples);

    ret = avcodec_parameters_from_context(streamInfo->avstream->codecpar, avCodecContext);
    if (ret < 0) {
        exit(__LINE__);
    }
}

static bool audioDurationLimit(StreamInfo *streamInfo)
{
    auto ret = av_compare_ts(streamInfo->next_pts, streamInfo->avCodecContext->time_base, streamInfo->duration, { 1, 1000 });
    if (ret > 0)
        return false;
    return true;
}

static int writeAudioFrame(StreamInfo *streamInfo)
{
    int ret = av_frame_make_writable(streamInfo->frame);
    if (ret < 0)
        exit(__LINE__);

    if (!audioDurationLimit(streamInfo) || streamInfo->audioLength <= 0) {
        writeFrame(streamInfo->formatContext, streamInfo->avCodecContext, streamInfo->avstream, nullptr);
        return 1;
    }

    uint8_t *outptr = (uint8_t*)streamInfo->audiopcms16.data();

    QByteArray tempData;
    int psfltp = av_get_bytes_per_sample(AV_SAMPLE_FMT_FLTP);
    int sampleSize = streamInfo->frame->nb_samples * psfltp;
    if (sampleSize > streamInfo->audioLength) {

        streamInfo->audioLength = 0;
        writeFrame(streamInfo->formatContext, streamInfo->avCodecContext, streamInfo->avstream, nullptr);
        return 1;
//        int offset = sampleSize - streamInfo->audioLength;
//        tempData = QByteArray((char*)outptr, streamInfo->audioLength);
//        tempData.append(QByteArray(offset, 0));

//        outptr = (uint8_t*)tempData.data();
//        streamInfo->audioIndex = 0;
//        streamInfo->audioLength = sampleSize;
    }

    int ret1 = avcodec_fill_audio_frame(streamInfo->frame, 1, AV_SAMPLE_FMT_FLTP, (uint8_t*)(outptr + streamInfo->audioIndex), streamInfo->audioLength, 0);
    if (ret1 < 0) {
        exit(__LINE__);
    }
    int dst_nb_samples = ret1 / psfltp;

    streamInfo->audioIndex += ret1;
    streamInfo->audioLength -= ret1;

    streamInfo->next_pts += dst_nb_samples;

    streamInfo->frame->pts = av_rescale_q(streamInfo->samples_count, {1, streamInfo->avCodecContext->sample_rate}, streamInfo->avCodecContext->time_base);
    streamInfo->samples_count += dst_nb_samples;
    return writeFrame(streamInfo->formatContext, streamInfo->avCodecContext, streamInfo->avstream, streamInfo->frame);
}

static AVFrame *allocVidoeFrame(enum AVPixelFormat pix_fmt, int width, int height)
{
    AVFrame *picture = av_frame_alloc();
    if (!picture)
        return NULL;
    picture->format = pix_fmt;
    picture->width  = width;
    picture->height = height;

    int ret = av_frame_get_buffer(picture, 0);
    if (ret < 0) {
        exit(__LINE__);
    }
    return picture;
}
static void startVideoEncodec(StreamInfo *streamInfo)
{
    AVCodecContext *avCodecContext = streamInfo->avCodecContext;
    int ret = avcodec_open2(avCodecContext, streamInfo->avCodec, 0);

    if (ret < 0) {
        exit(__LINE__);
    }
    streamInfo->frame = allocVidoeFrame(avCodecContext->pix_fmt, avCodecContext->width, avCodecContext->height);
    if (!streamInfo->frame) {
        exit(__LINE__);
    }

    ret = avcodec_parameters_from_context(streamInfo->avstream->codecpar, avCodecContext);
    if (ret < 0) {
        exit(__LINE__);
    }
}

static AVFrame *fillVideoFrame(StreamInfo *streamInfo)
{
    auto ret = av_compare_ts(streamInfo->next_pts, streamInfo->avCodecContext->time_base, streamInfo->duration, { 1, 1000 });
    if (ret > 0) return nullptr;

    if (av_frame_make_writable(streamInfo->frame) < 0)
        exit(__LINE__);

    const QImage &image = streamInfo->videoimage;
    AVFrame *frame = streamInfo->frame;

    for (int y = 0; y < image.height(); y++) {
        for (int x = 0; x < image.width(); x++) {
            QRgb rgb = image.pixel(x, y);
            float r = qRed(rgb);
            float g = qGreen(rgb);
            float b = qBlue(rgb);
            frame->data[0][y * frame->linesize[0] + x] = (uint8_t)(0.256789 * r + 0.504129 * g + 0.097906 * b + 16.0);
        }
    }

    for (int y = 0; y < image.height() / 2; y++) {
        for (int x = 0; x < image.width() / 2; x++) {
            QRgb rgb = image.pixel(x, y);
            float r = qRed(rgb);
            float g = qGreen(rgb);
            float b = qBlue(rgb);
            frame->data[1][y * frame->linesize[1] + x] = (uint8_t)(-0.148223 * r - 0.290992 * g + 0.439215 * b + 128.0);
            frame->data[2][y * frame->linesize[2] + x] = (uint8_t)(0.439215 * r - 0.367789 * g - 0.071426 * b + 128.0);
        }
    }

    frame->pts = streamInfo->next_pts++;
    return frame;
}

static int writeVideoFrame(StreamInfo *streamInfo)
{
    return writeFrame(streamInfo->formatContext, streamInfo->avCodecContext, streamInfo->avstream, fillVideoFrame(streamInfo));
}

static void closeStream(StreamInfo *streamInfo)
{
    av_frame_unref(streamInfo->frame);
    av_frame_free(&streamInfo->frame);

    avcodec_close(streamInfo->avCodecContext);
    avcodec_free_context(&streamInfo->avCodecContext);

}

static QByteArray convertToFltp(const QByteArray &dat) {

    const char *dataIn = dat.data();
    int countIn = dat.size();

    SwrContext *swr = swr_alloc();
    av_opt_set_int(swr, "in_channel_layout",  AV_CH_LAYOUT_MONO, 0);
    av_opt_set_int(swr, "out_channel_layout", AV_CH_LAYOUT_MONO,  0);
    av_opt_set_int(swr, "in_sample_rate",     16000, 0);
    av_opt_set_int(swr, "out_sample_rate",    16000, 0);
    av_opt_set_sample_fmt(swr, "in_sample_fmt",  AV_SAMPLE_FMT_S16, 0);
    av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_FLTP,  0);
    swr_init(swr);
    int pss16 = av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
    int sampleCount = countIn / pss16;
    int psfltp = av_get_bytes_per_sample(AV_SAMPLE_FMT_FLTP);

    const uint8_t* inptr = (uint8_t*)dataIn;

    QByteArray dataOut(psfltp * sampleCount, 0);

    uint8_t *outptr = (uint8_t*)dataOut.data();
    swr_convert(swr, (uint8_t**)(&outptr), sampleCount, &inptr, sampleCount);
    swr_close(swr);
    swr_free(&swr);

    return dataOut;
}

static QPair<QByteArray, float> GenerateMpegts(const QByteArray &pcm16, const QImage &image, float tsOffsetSeconds = 0.0f) {

    QByteArray audio = convertToFltp(pcm16);

    StreamInfo video_st, audio_st;

    video_st.videoimage = image;
    audio_st.audiopcms16 = audio;
    audio_st.audioLength = audio.length();

    audio_st.duration = (float)pcm16.length() / 16000.0f / 2.0f * 1000.0f;
    video_st.duration = audio_st.duration;

    AVFormatContext *formatContext = nullptr;
    avformat_alloc_output_context2(&formatContext, NULL, "mpegts", nullptr);
    if (!formatContext) return QPair<QByteArray, float>();

    video_st.formatContext = formatContext;
    audio_st.formatContext = formatContext;
    formatContext->output_ts_offset = tsOffsetSeconds * 1000000.0 + (16000.0 * 4.0);

    createStream(&video_st, AV_CODEC_ID_H264);
    createStream(&audio_st, AV_CODEC_ID_AAC);

    startVideoEncodec(&video_st);
    startAudioEncodec(&audio_st);

    av_dump_format(formatContext, 0, 0, 1);

    int ret = avio_open_dyn_buf(&formatContext->pb);
    if (ret < 0) {
        return QPair<QByteArray, float>();
    }

    ret = avformat_write_header(formatContext, nullptr);
    if (ret < 0) {
        return QPair<QByteArray, float>();
    }
    int encode_video = 1;
    int encode_audio = 1;
    while (encode_video || encode_audio) {
        if (encode_video && (!encode_audio || av_compare_ts(video_st.next_pts, video_st.avCodecContext->time_base, audio_st.next_pts, audio_st.avCodecContext->time_base) <= 0)) {
            encode_video = !writeVideoFrame(&video_st);
        } else {
            encode_audio = !writeAudioFrame(&audio_st);
        }
    }

    av_write_trailer(formatContext);

    closeStream(&video_st);
    closeStream(&audio_st);

    uint8_t *ptrout = nullptr;
    int size = avio_close_dyn_buf(formatContext->pb, &ptrout);
    QByteArray byteRet((char*)ptrout, size);

    av_free(ptrout);
    avformat_free_context(formatContext);

    return QPair<QByteArray, float>(byteRet, audio_st.duration / 1000.0f);
}


LanAudioServer::LanAudioServer(TextSpeak *speak) : speak_(speak), lastNeedWaiting_(0), converter_(this)
{
    signIndex_ = 1;
    isPlaying_ = false;

    av_log_set_level(AV_LOG_ERROR);
    this->start();
    this->moveToThread(this);
    QTimer::singleShot(0, this, SLOT(onInit()));
}

LanAudioServer::~LanAudioServer()
{
    qDebug() << __FUNCTION__;
    QTimer::singleShot(0, this, SLOT(onRelease()));
    this->wait();
}

void LanAudioServer::pushMpegTs(const QByteArray &mdat, float duration)
{
    static int index = 1;

    QMutexLocker locker(&mutex_);
    MpegtsInfo info;
    info.bytes = mdat;
    info.duration = duration;
    info.fileName = QString::number(index++) + ".ts";
    bytesTsList_.append(info);

    qDebug() << "push ts server list:" << info.fileName << " count:" << bytesTsList_.count();

    if (index > 0xFFFFF) {
        index = 1;
    }
}



void LanAudioServer::onResponseBytes(const QByteArray &data, const QString &text)
{
    converter_.push(data, text);
}

void LanAudioServer::onInit()
{
    qDebug() << __FUNCTION__;
    converter_.start();

    httpserver_ = QSharedPointer<QHttpServer>(new QHttpServer());
    if (!httpserver_->listen(QHostAddress::Any, 8080)) {
        qCritical() << "listen failed.";
    }
    connect(httpserver_.data(), SIGNAL(newRequest(QHttpServerRequest*,QHttpResponse*)),
            this, SLOT(onRequest(QHttpServerRequest*,QHttpResponse*)), Qt::QueuedConnection);
    audioFile_ = QSharedPointer<AudioFile>(new AudioFile("temp.mp3"));
}

void LanAudioServer::onRelease()
{
    qDebug() << __FUNCTION__;

    httpserver_.clear();
    this->exit();
}

void LanAudioServer::onRequest(QHttpServerRequest *request, QHttpResponse *response)
{
    const int minListCount = 5;
    QUrl url = request->url();
    QString urlStr = url.toString();

    qDebug() << "start request:" << urlStr;

    if (urlStr == "/index.m3u8") {

        mutex_.lock();
        bool isEmpty = bytesTsList_.isEmpty();
        mutex_.unlock();

        if (isEmpty && signIndex_ == 1) {
            timeLast_ = QDateTime::currentDateTime();
            emit doStartRequest();
            for (int i = 0; i < 100; ++i) {
                this->msleep(500);
                {
                    QMutexLocker locker(&mutex_);
                    if (bytesTsList_.count() >= minListCount)
                        break;
                }
            }
        }

        if (isPlaying_)
        {
            //驱动时间推移, ts文件队列减少
            QMutexLocker locker(&mutex_);
            qint64 msecTakes = timeLast_.msecsTo(QDateTime::currentDateTime());

            while(!bytesTsList_.isEmpty()) {
                MpegtsInfo &info = *(bytesTsList_.begin());
                int curMsec = info.duration * 1000.0f;
                //超时判断
                if (msecTakes >= curMsec) {
                    //留三个在队列里面.
                    if (bytesTsList_.count() > minListCount) {
                        qDebug() << "take:" << info.fileName;
                        bytesTsList_.pop_front();
                        timeLast_ = timeLast_.addMSecs(curMsec);
                        break;
                    }
                    //超过但是队列<=3
                    break;
                }
                break;
            }

            if (bytesTsList_.count() <= minListCount) {
                emit doResponed();
            }
        }

        static const QByteArray bytesM3u8 = "#EXTM3U\n"
                                            "#EXT-X-VERSION:3\n"
                                            "#EXT-X-MEDIA-SEQUENCE:%1\n"
                                            "#EXT-X-ALLOW-CACHE:YES\n"
                                            "#EXT-X-TARGETDURATION:2\n\n";


        QByteArray byteCopy = bytesM3u8;

        QMutexLocker locker(&mutex_);
        for (int i = 0; i < bytesTsList_.count() && i < minListCount; ++i) {
            const MpegtsInfo &info = bytesTsList_.at(i);
            byteCopy.append("#EXTINF:" + QString::number(info.duration, 'f', 2) + ",\n");
            byteCopy.append(info.fileName + "\n");
        }

        if (byteCopyLast_ != byteCopy) {
            signIndex_++;
            byteCopyLast_ = byteCopy;
        }

        byteCopy.replace("%1", QByteArray::number(signIndex_));
        qDebug() << "response:" << urlStr;
        //response->setHeader("content-type", "audio/x-mpegurl");
        response->setHeader("keep-alive", "timeout=3,max=50");
        response->setHeader("connection", "keep-alive");
        response->writeHead(200);
        response->write(byteCopy);
        response->end();
        qDebug() << byteCopy;

    } else if (urlStr.indexOf(".ts") > 0) {
        QMutexLocker locker(&mutex_);
        urlStr = urlStr.mid(1);
        qDebug() << "request ts:" << urlStr;

        for (int i = 0; i < bytesTsList_.count(); ++i) {
            const MpegtsInfo &info = bytesTsList_.at(i);
            if (info.fileName == urlStr) {
                qDebug() << "response:" << urlStr;
                //response->setHeader("content-type", "video/mp2t");
                response->setHeader("keep-alive", "timeout=3,max=50");
                response->setHeader("connection", "keep-alive");

                response->writeHead(200);
                response->write(info.bytes);
                response->end();
                response->flush();
                return;
            }
        }

        qDebug() << "response:" << urlStr;

        response->setHeader("content-type", "video/mp2t");
        response->setHeader("keep-alive", "timeout=3,max=50");
        response->setHeader("connection", "keep-alive");
        response->writeHead(200);
        response->end();

    } else if (urlStr == "/" || urlStr == "index.html") {
        QFile f(":/images/video.html");
        f.open(QIODevice::ReadOnly);
        QByteArray html = f.readAll();
        f.close();
        qDebug() << "response:" << urlStr;
        response->writeHead(200);
        response->write(html);
        response->end();
    } else if (urlStr == "/pause" && isPlaying_) {
        isPlaying_ = false;
        qDebug() << "pause...";
        response->writeHead(200);
        response->end();
    } else if (urlStr == "/play" && !isPlaying_) {
        isPlaying_ = true;
        qDebug() << "play...";
        qDebug() << "response:" << urlStr;
        response->writeHead(200);
        response->end();

        //增加索引. 会使之前的文件重新播放
        signIndex_++;
        timeLast_ = QDateTime::currentDateTime();
    }
    else {
        //response->setHeader("location", "/index.html");
        qDebug() << "unknow reuqest:" << url;
        response->writeHead(200);
        response->end();
        return;
    }
}

void LanAudioServer::onHttpDone()
{

}


void Converter::run()
{
    do
    {
        bool isEmpty = false;
        {
            QMutexLocker locker(&mutex_);
            isEmpty = caches_.isEmpty();
        }

        if (isEmpty) {
            this->msleep(100);
            continue;
        }

        QPair<QByteArray, QString> pair;
        {
            QMutexLocker locker(&mutex_);
            pair = caches_.takeFirst();
        }

        QPair<QByteArray, float> mpegts;
        {
            qDebug() << "start gen ts file:" << pair.second;
            QImage image(1280, 720, QImage::Format_RGB32);
            image.fill(QColor("#F9F9FA"));
            QPainter painter;
            painter.begin(&image);
            QFont font(QString::fromWCharArray(L"微软雅黑"));
            font.setPixelSize(50);
            painter.setFont(font);
            QPen pen(Qt::black);
            painter.setPen(pen);

            QTextOption option;
            option.setAlignment(Qt::AlignCenter);
            painter.drawText(QRect(0, 0, 1280, 720), pair.second, option);
            painter.end();

            mpegts = GenerateMpegts(pair.first, image, totalTimestamp_);

            qDebug() << "gen ts file finished:" << pair.second;
            server_->pushMpegTs(mpegts.first, mpegts.second);

            totalTimestamp_ += mpegts.second;
        }

    } while(!needBreak_);
}
