#include "core.h"
#include <QPoint>
#include <QScreen>
#include <QFileInfo>
#include <QDateTime>
#include <QApplication>
#include <QDebug>
#include <QThread>
#include <QProcess>
#include <QFile>
#include <QDir>

#include <mutex>
#include <condition_variable>
#include <global/global.h>

#include "devices.h"
#include "qstandardpaths.h"

using namespace Global;

std::mutex mu;
std::condition_variable cond;

Core *Core::getInstance()
{
    static Core C;
    return &C;
}

QString Core::getXimagesrc()
{
    QString value = "";

    if (!Global::isShowCursor)
    {
        m_showPointer = "false";
    }
    else
    {
        m_showPointer = "true";
    }

//    if(m_isWindow) {
//        QStringList stringList;
//        stringList << "ximagesrc"
//                   << "display-name=" + qgetenv( "DISPLAY" )
//                   << "use-damage=false"
//                   << "show-pointer=" + m_showPointer
//                   << "xid=" + m_xid;
//        value = stringList.join( " " );
//    }
//    else
    {
        QStringList stringList;
        stringList << "ximagesrc"
                   << "display-name=" + qgetenv( "DISPLAY" )
                   << "use-damage=false"
                   << "show-pointer=" + m_showPointer
                   << "startx=" + QString::number( m_x )
                   << "starty=" + QString::number( m_y )
                   << "endx="   + QString::number( m_right )
                   << "endy="   + QString::number( m_bottom );
        value = stringList.join( " " );
    }

    return value;
}

QString Core::getCapsFilter()
{
   QStringList stringList;
   stringList << "video/x-raw, framerate="
              << QString::number(m_fps)
              << "/1";
   return QString( stringList.join( "" ) );
}

QString Core::getScale()
{
    QString value = "";
    int modulo = 2;
    if (m_isWindow)
    {
        int width = (int)m_width;
        int height = (int)m_height;

        if ( ( width % modulo ) > 0 ) {
            width = width - ( width % modulo );
        }

        if ( ( height % modulo ) > 0 ) {
            height = height - ( height % modulo );
        }

        value = "videoscale ! video/x-raw, width=" + QString::number( width ) + ", height=" + QString::number( height )  + " !";
    }
    else
    {
        int width = m_width;
        int height = m_height;

        if ( ( m_width % modulo ) > 0 ) {
            width = m_width - ( m_width % modulo ) + modulo;
        }

        if ( ( m_height % modulo ) > 0 ) {
            height = m_height - ( m_height % modulo ) + modulo;
        }

        value = "videoscale ! video/x-raw, width=" + QString::number( width ) + ", height=" + QString::number( height )  + " !";
    }

    return value;
}

QString Core::get_Videocodec_Encoder()
{
    QString value;
    if ( m_encoder == "x264enc" ) {
        QStringList list;
        list << getScale();
        list << "x264enc";
        list << "bitrate=" + QString::number(m_bitrate);
        list << "qp-min=" + QString::number( m_qpmin );
        list << "qp-max=" + QString::number( m_qpmax );
        list << "speed-preset=" + m_speedpreset;
        list << "threads=" + QString::number( QThread::idealThreadCount() );
        list.removeAll( "" );
        value = list.join( " " );
        value.append( " ! video/x-h264, profile=" + m_profile );
    }
    else if ( m_encoder == "openh264enc" ) {
        QStringList list;
        list << "openh264enc";
        list << "bitrate=" + QString::number(m_bitrate);
        list << "qp-min=" + QString::number( m_qpmin );
        list << "qp-max=" + QString::number( m_qpmax );
        list << "usage-type=camera"; // We need camera not screen. With screen and a fast sequence of images the video jerks.
        list << "complexity=low";
        list << "multi-thread=" + QString::number( QThread::idealThreadCount() );
        list << "slice-mode=auto"; // Number of slices equal to number of threads
        value = list.join( " " );
        if ( m_openh264ProfileTest == true ) {
            value.append( " ! video/x-h264, profile=" ).append( "\"" ).append( m_profile.append( "\"" ) );
        }
        value.append( " ! h264parse" );
    }
    else if ( m_encoder == "vp8enc" ) {
        QStringList list;
        list << "vp8enc";
        list << "target-bitrate=" + QString::number(m_bitrate * 1000); // vp8enc的单位为bps，需要*1000
        list << "min_quantizer=" + QString::number( m_qpmin );
        list << "max_quantizer=" + QString::number( m_qpmax );
        list << "cpu-used=" + QString::number( QThread::idealThreadCount() );
        list << "deadline=1000000";
        list << "threads=" + QString::number( QThread::idealThreadCount() );
        value = list.join( " " );
    }

    if ( m_encoder == "gifenc" ) {
        QStringList list;
        list << "gifenc";
        list << "speed=30";
//        if ( ui->checkBoxGifLoop->isChecked() ) {
//            list << "repeat=-1";
//        } else {
            list << "repeat=0";
//        }
        value = list.join( " " );
    }

    return value;
}

bool Core::isAvailable()
{
    bool value = false;

    pa_mainloop *pa_ml;
    pa_mainloop_api *pa_mlapi;
    pa_context *context = NULL;

    // Create a mainloop API and connection to the default server
    pa_ml = pa_mainloop_new();
    pa_mlapi = pa_mainloop_get_api( pa_ml );
    context = pa_context_new( pa_mlapi, NULL );

    // This function connects to the pulse server
    int status = pa_context_connect( context, NULL, PA_CONTEXT_NOAUTOSPAWN, NULL );
    if ( status < 0 ) {
        value = false;
    } else {
        value = true;
    }

    pa_context_unref( context );
    pa_mainloop_free( pa_ml );

    return value;
}

void Core::setEncoderAndAudioCodec()
{
    switch (m_format) {
    case MP4:
        m_encoder = "x264enc"; // openh264enc
        m_sFormat = "mp4";
        m_sMuxer = "mp4mux";
        m_sAudioCodec = "lamemp3enc"; // opusenc
        break;
    case MKV:
        m_encoder = "vp8enc"; // openh264enc、x264enc
        m_sMuxer = "matroskamux";
        m_sAudioCodec = "lamemp3enc";   //vorbisenc、flacenc、opusenc
        m_sFormat = "mkv";
        break;
    case AVI:
        m_encoder = "vp8enc"; //openh264enc x264enc
        m_sMuxer = "avimux";
        m_sAudioCodec = "lamemp3enc";
        m_sFormat = "avi";
        break;
    case MOV:
        m_encoder = "x264enc"; // openh264enc
        m_sMuxer = "qtmux";
        m_sAudioCodec = "lamemp3enc"; // vp8enc
        m_sFormat = "mov";
        break;
    case WEBM:
        m_sMuxer = "webmmux";
        m_encoder = "vp8enc";
        m_sAudioCodec = "opusenc"; // vorbisenc
        m_sFormat = "webm";
        break;
    case GIF:
        m_encoder = "gifenc";
        m_sFormat = "gif";
        break;
    default:
        break;
    }
}

QStringList Core::getAllDevices()
{
    QStringList list;
    if ( isAvailable() ) {
        const char *ss = get_all_audio_devices();
        QString s1 = QString::fromUtf8( ss );
        QString s2 = s1.left( QString::fromUtf8( ss ).length() - 3 );
        list = s2.split( "---" );
    }

    if ( list.contains( "" ) ) {
        list.clear();
    }
    return list;
}

void Core::getDevices(QStringList &ls, QString soundtype, bool isName)
{
    QStringList list;
    list << getAllDevices();
    if ( list.empty() )
    {
        return;
    }

    for ( int i = 0; i < list.count(); i++ ) {
        QString name = QString(list.at(i)).section( ":::", 1, 1 );
        QString device = QString(list.at(i)).section( ":::", 0, 0 );
        if (device.contains(soundtype))
        {
            if (device.contains("echo-cancel"))
            {
                continue;
            }

            if (isName)
            {
                ls << "";
            }
            else
            {
                ls << device;
                //qDebug().noquote() << Global::nameOutput << "[Audio] Found:" << name << "Device:" << device;
            }
        }

    }
}

void Core::getMicDevices(QStringList &ls, QString soundtype, bool isName)
{
    QStringList list;
    list << getAllDevices();
    if ( list.empty() )
    {
        return;
    }
    for ( int i = 0; i < list.count(); i++ ) {
        QString name = QString(list.at(i)).section( ":::", 1, 1 );
        QString device = QString(list.at(i)).section( ":::", 0, 0 );

        if (!device.contains(soundtype))
        {
            if (name.contains("echo"))
            {
                continue;
            }
            mapNameToDevice.insert(name,device);
            if (isName)
            {
                ls << name;
            }
            else
            {
                ls << device;
                //qDebug().noquote() << Global::nameOutput << "[Audio] Found:" << name << "Device:" << device;
            }
        }
    }
}

QString Core::getUsingMicDevices(bool isName)
{
    QString usingMicName = ConfigHandler().getMicrophone();
    if(isName){
        return usingMicName;
    }else{
        return mapNameToDevice[usingMicName];
    }
}

QStringList Core::getSelectedAudioDeviceName()
{
    QStringList list;
    if (Global::isOpenSysSound)
    {
        getDevices(list, "monitor", true);
    }

//    if (Global::isOpenMic)
    {
        getMicDevices(list, "monitor", true);
    }
    return list;
}


QStringList Core::getSelectedAudioDevice()
{
    QStringList list;
    if (Global::isOpenSysSound)
    {
        getDevices(list, "monitor");
    }

//    if (Global::isOpenMic)
    {
//        getMicDevices(list, "monitor");
        QString usingMicName = ConfigHandler().getMicrophone();
        QString usingMicDevice = mapNameToDevice[usingMicName];
        if(!usingMicDevice.isEmpty()){
            list<<usingMicDevice;
        }
    }
    if (!Global::isOpenMic)
    {
        closeMicrophone();
    }

    return list;
}

QString Core::getMuxer()
{
    QString value = m_sMuxer;
    if ( m_sMuxer == "matroskamux" ) {
        value = m_sMuxer + " name=mux writing-app=" + Global::name + "_" + QString( Global::version ).replace( " ", "_" );
    } else {
        value = m_sMuxer + " name=mux";
    }

    if ( m_sMuxer == "gifenc" ) {
        value = "";
    }

    return value;
}

QString Core::pipeline_structured_output( QString pipeline )
{
    QString string;
    QString nl;
#ifdef Q_OS_UNIX
    nl = "\\";
    string = pipeline.prepend( "gst-launch-1.0 -e " + nl + "\n    " );
#endif

    string = pipeline.replace( "mux.", "mux. " + nl + "\n   " );
    string = pipeline.replace( "mix.", "mix. " + nl + "\n   " );
    string = pipeline.replace( "!", nl + "\n        !" );
    string.append( "\n" );
    return string;
}

void Core::gst_Elements_available()
{
    qDebug().noquote() << Global::nameOutput << "Symbols: + available, - not available";
    QStringList list;
#ifdef Q_OS_WIN
    list << "d3d11screencapturesrc";
    list << "wasapi2src";
    list << "bz2dec";
#endif
#ifdef Q_OS_UNIX
    list << "ximagesrc";
    list << "pulsesrc";
#endif
    list << "queue";
    list << "appsrc";
    list << "capsfilter";
    list << "videoconvert";
    list << "videorate";
    list << "videoscale";
    list << "h264parse";
    list << "audioconvert";
    list << "audiorate";
    list << "audiomixer";
    list << "audioresample";
    list << "filesink";

    for ( int i = 0; i < list.count(); i++ ) {
        GstElementFactory *factory = gst_element_factory_find( QString( list.at(i) ).toLatin1() );
        if ( !factory ) {
            qDebug().noquote() << Global::nameOutput << "-" << list.at(i);
        } else {
            qDebug().noquote() << Global::nameOutput << "+" << list.at(i);
            gst_object_unref( factory );
        }
    }
    qDebug();
}

QString Core::get_AudioSystem()
{
    return "pulsesrc";
}

bool Core::checkAudioAvailable(QString encoder)
{
    bool isAvailable = true;
    GstElementFactory *factory = gst_element_factory_find( encoder.toLatin1() );
    if ( !factory ) {
        qDebug().noquote() << Global::nameOutput << "-" << encoder;
        isAvailable = false;
    } else {
        QString message = Global::nameOutput + " + " + encoder;
        GstElement *source = gst_element_factory_create( factory, "source" );
        if ( !source ) {
            isAvailable = false;
            message = Global::nameOutput + " - " + encoder + " available but codec is missing";
        } else {
            gst_object_unref( source );
        }

        qDebug().noquote() << message;
        gst_object_unref( factory );
    }

    return isAvailable;
}

void Core::startRecord(uint id, const QRect &rect)
{
    setEncoderAndAudioCodec();
    m_x = rect.left() * m_scale;
    m_y = rect.top() * m_scale;
    m_right = rect.right() * m_scale;
    m_bottom = rect.bottom() * m_scale;
    qDebug() << "rect:" << rect;
    qDebug() << "id:" << id;
    if (id > 0)
    {
        m_xid = QString::number(id);
        m_isWindow = true;
    }
    else
    {
        m_isWindow = false;
    }

    QStringList pipelineList;
    pipelineList << getXimagesrc();
    pipelineList << getCapsFilter();
    pipelineList << "videoconvert";
    //pipelineList << "videoscale"; // 添加videoscale
    //pipelineList << "video/x-raw,width="+QString::number(outputWidth)+",height="+QString::number(outputHeight); // 设置目标输出分辨率
    pipelineList << "videorate";
    pipelineList << "queue max-size-bytes=1073741824 max-size-time=10000000000 max-size-buffers=1000";
    pipelineList << get_Videocodec_Encoder();

    if ( ( getSelectedAudioDevice().count() > 0 ) and !m_sAudioCodec.isEmpty()) {
        pipelineList << "queue";
        pipelineList << "mux.";
    }

    // Pipeline for one selected audiodevice
    if ( ( getSelectedAudioDevice().count() == 1 ) and !m_sAudioCodec.isEmpty())
    {
#ifdef Q_OS_UNIX
        pipelineList << get_AudioSystem().append( " device=" ).append( getSelectedAudioDevice().at(0) )
                        .append( " client-name=" ).append( Global::nameOutput + "." + QString( getSelectedAudioDeviceName().at(0) ).replace( " ", "-") );
        pipelineList << "audio/x-raw, channels=2";
        pipelineList << "audioconvert";
        pipelineList << "audiorate";
        pipelineList << "queue max-size-bytes=1000000 max-size-time=10000000000 max-size-buffers=1000";
        pipelineList << m_sAudioCodec;
        pipelineList << "queue";
        pipelineList << "mux.";
#endif
    }

    // Pipeline for more as one audiodevice
    if ( ( getSelectedAudioDevice().count() > 1 ) and !m_sAudioCodec.isEmpty() )
    {
        for ( int x = 0; x < getSelectedAudioDevice().count(); x++ )
        {
#ifdef Q_OS_UNIX
            pipelineList << get_AudioSystem().append( " device=" ).append( getSelectedAudioDevice().at(x) )
                            .append( " client-name=" ).append( Global::nameOutput + "." + QString( getSelectedAudioDeviceName().at(x) ).replace( " ", "-") );
            pipelineList << "audioconvert";
            pipelineList << "audioresample";
            pipelineList << "queue";
            pipelineList << "mix.";
#endif
        }
        pipelineList << "audiomixer name=mix";
        pipelineList << "audioconvert";
        pipelineList << "audiorate";
        pipelineList << "queue";
        pipelineList << m_sAudioCodec;
        pipelineList << "queue";
        pipelineList << "mux.";
    }

    pipelineList << getMuxer();
    pipelineList.removeAll( "" );

    QString newVideoFilename;
    newVideoFilename = Global::name + "-" + QDateTime::currentDateTime().toString( "yyyy-MM-dd_hh-mm-ss" ) + "." + m_sFormat;
    pipelineList << "filesink location=\"" + m_sVideoPath + "/" + newVideoFilename + "\"";
    m_completePathRec = m_sVideoPath + "/" + newVideoFilename;

    QString pipeline = pipelineList.join( gstr_Pipe );
    pipeline = pipeline.replace( "mix. !", "mix." );
    pipeline = pipeline.replace( "mux. !", "mux." );

    qDebug();
    qDebug().noquote() << Global::nameOutput << "Start record with:" << pipeline;
    qDebug();
    qDebug().noquote() << pipeline_structured_output( pipeline );

    QByteArray byteArray = pipeline.toUtf8();
    const gchar *line = byteArray.constData();
    GError *error = Q_NULLPTR;
    gstPipeline = gst_parse_launch( line, &error );
    qDebug().noquote() <<error;
    // Start playing
    GstStateChangeReturn ret = gst_element_set_state( gstPipeline, GST_STATE_PLAYING );
    if ( ret == GST_STATE_CHANGE_FAILURE )   { qDebug().noquote() << Global::nameOutput << "Start was clicked" << "GST_STATE_CHANGE_FAILURE" << "Returncode =" << ret;   } // 0
    if ( ret == GST_STATE_CHANGE_SUCCESS )   { qDebug().noquote() << Global::nameOutput << "Start was clicked" << "GST_STATE_CHANGE_SUCCESS" << "Returncode =" << ret;   } // 1
    if ( ret == GST_STATE_CHANGE_ASYNC )     { qDebug().noquote() << Global::nameOutput << "Start was clicked" << "GST_STATE_CHANGE_ASYNC"   << "Returncode =" << ret;   } // 2
    if ( ret == GST_STATE_CHANGE_NO_PREROLL ){ qDebug().noquote() << Global::nameOutput << "Start was clicked" << "GST_STATE_CHANGE_NO_PREROLL" << "Returncode =" << ret; }// 3
    if ( ret == GST_STATE_CHANGE_FAILURE )
    {
        qDebug().noquote() << Global::name << "Unable to set the pipeline to the playing state.";
        gst_object_unref( gstPipeline );
        return;
    }
}

void Core::stopRecord(QString &completePath)
{
    qDebug()<<"Core - 停止录制并保存";

    if(gstPipeline){
        // wait for EOS
        bool a = gst_element_send_event( gstPipeline, gst_event_new_eos() );
        Q_UNUSED(a);

        GstClockTime timeout = 5 * GST_SECOND;
        GstMessage *msg = gst_bus_timed_pop_filtered( GST_ELEMENT_BUS (gstPipeline), timeout, GST_MESSAGE_EOS );
        Q_UNUSED(msg);

        GstStateChangeReturn ret ;
        Q_UNUSED(ret);
        ret = gst_element_set_state( gstPipeline, GST_STATE_PAUSED );
        Q_UNUSED(ret);
        ret = gst_element_set_state( gstPipeline, GST_STATE_READY );
        Q_UNUSED(ret);
        ret = gst_element_set_state( gstPipeline, GST_STATE_NULL );
        Q_UNUSED(ret);
        gst_object_unref( gstPipeline );
        qDebug().noquote() << Global::nameOutput << "Stop record";
    }else{
        m_completePathRec = "";
    }

    completePath = m_completePathRec;
    openMicrophone();
}

void Core::pauseRecord()
{
    qDebug().noquote() << Global::nameOutput << "Pause was clicked";
    GstStateChangeReturn ret = gst_element_set_state(gstPipeline, GST_STATE_PAUSED );
    if ( ret == GST_STATE_CHANGE_FAILURE )   { qDebug().noquote() << Global::nameOutput << "Pause was clicked" << "GST_STATE_CHANGE_FAILURE" << "Returncode =" << ret;   } // 0
    if ( ret == GST_STATE_CHANGE_SUCCESS )   { qDebug().noquote() << Global::nameOutput << "Pause was clicked" << "GST_STATE_CHANGE_SUCCESS" << "Returncode =" << ret;   } // 1
    if ( ret == GST_STATE_CHANGE_ASYNC )     { qDebug().noquote() << Global::nameOutput << "Pause was clicked" << "GST_STATE_CHANGE_ASYNC" << "Returncode =" << ret;   }   // 2
    if ( ret == GST_STATE_CHANGE_NO_PREROLL ){ qDebug().noquote() << Global::nameOutput << "Pause was clicked" << "GST_STATE_CHANGE_NO_PREROLL" << "Returncode =" << ret; }// 3

    /* wait until it's up and running or failed */
    if (gst_element_get_state (gstPipeline, NULL, NULL, -1) == GST_STATE_CHANGE_FAILURE) {
        g_error ("Failed to go into PAUSED state");
    }
}

void Core::contineRecord()
{
    GstStateChangeReturn ret = gst_element_set_state( gstPipeline, GST_STATE_PLAYING );
    if ( ret == GST_STATE_CHANGE_FAILURE )   { qDebug().noquote() << Global::nameOutput << "Continue was clicked" << "GST_STATE_CHANGE_FAILURE" << "Returncode =" << ret;   } // 0
    if ( ret == GST_STATE_CHANGE_SUCCESS )   { qDebug().noquote() << Global::nameOutput << "Continue was clicked" << "GST_STATE_CHANGE_SUCCESS" << "Returncode =" << ret;   } // 1
    if ( ret == GST_STATE_CHANGE_ASYNC )     { qDebug().noquote() << Global::nameOutput << "Continue was clicked" << "GST_STATE_CHANGE_ASYNC" << "Returncode =" << ret;   }   // 2
    if ( ret == GST_STATE_CHANGE_NO_PREROLL ){ qDebug().noquote() << Global::nameOutput << "Continue was clicked" << "GST_STATE_CHANGE_NO_PREROLL" << "Returncode =" << ret; }// 3

    /* wait until it's up and running or failed */
    if (gst_element_get_state (gstPipeline, NULL, NULL, -1) == GST_STATE_CHANGE_FAILURE) {
        g_error ("Failed to go into PLAYING state");
    } else {
        qDebug().noquote() << Global::nameOutput << "Continue was clicked";
    }
}

void Core::openMicrophone()
{
    qDebug()<<"core - openMicrophone";

    toggle_microphone(false);
    m_closeMicrophone = false;

 /*   if (gstPipeline && !m_micSourceList.empty()) {
        pauseRecord();

        for (GstElement *elem : m_micSourceList) {
            gst_bin_add(GST_BIN(gstPipeline), elem);

            GstElement *mixElement = gst_bin_get_by_name(GST_BIN(gstPipeline), "mix");  //获取mix
            if (mixElement) {
                gst_element_set_state(elem, GST_STATE_READY);
                if (gst_element_set_state(elem, GST_STATE_PAUSED) == GST_STATE_CHANGE_FAILURE) {
                    qDebug() << "Failed to set element to PAUSED state.";
                    return;
                }

                GstPad *sinkPad = gst_element_get_request_pad(mixElement, "sink_%u");
                if (sinkPad) {
                    // 进行链接
                    GstPad *srcPad = gst_element_get_static_pad(elem, "src");
                    if (srcPad) {
                        GstPadLinkReturn ret = gst_pad_link(srcPad, sinkPad);
                        if (GST_PAD_LINK_FAILED(ret)) {
                            g_print("Failed to link pads\n");
                        }
                        qDebug() << "已添加设备：" << gst_element_get_name(elem);\

                        gst_object_unref(srcPad);
                    }
                    gst_object_unref(sinkPad);
                } else {
                    g_print("Failed to get a request pad from mixElement.\n");
                }
                gst_object_unref(mixElement); // 释放 mixElement 引用
            }
            gst_object_unref(elem);
        }

//        gst_object_unref(m_micSourceList[0]);
//        gst_object_unref(m_micSourceList[1]);
        m_micSourceList.clear();

        qDebug()<<"重新查看";
        GstIterator *iter = gst_bin_iterate_elements(GST_BIN(gstPipeline));
        GValue item = G_VALUE_INIT;
        while (gst_iterator_next(iter, &item) == GST_ITERATOR_OK) {
            GstElement * element = GST_ELEMENT(g_value_get_object(&item));
            const gchar *element_name = gst_element_get_name(element);
            qDebug()<<"---------------1.元素名称："<<element_name ;

            if (g_str_has_prefix(element_name, "pulsesrc") || g_str_has_prefix(element_name, "alsasrc")) {
                qDebug()<<"---------------2.音频元素名称："<<element_name ;
                gchar *device_name = NULL;
                g_object_get(element, "device", &device_name, NULL);
                if (!QString(device_name).contains("monitor")) {
                    qDebug() << "---------------3.麦克风设备名称：" << device_name;
                }
                g_free(device_name);
            }
            g_value_unset(&item);
        }
        gst_iterator_free(iter);

        contineRecord();

    }
 */
    qDebug()<<"open 结束";

}

void Core::closeMicrophone()
{
    qDebug()<<"core - closeMicrophone";

    toggle_microphone(true);
    m_closeMicrophone = true;

/*    if (gstPipeline) {
        //暂停
        pauseRecord();
        //获取麦克的元素
        GstIterator *iter = gst_bin_iterate_elements(GST_BIN(gstPipeline));
        GValue item = G_VALUE_INIT;
        while (gst_iterator_next(iter, &item) == GST_ITERATOR_OK) {
            GstElement * element = GST_ELEMENT(g_value_get_object(&item));
            const gchar *element_name = gst_element_get_name(element);
            if (g_str_has_prefix(element_name, "pulsesrc") || g_str_has_prefix(element_name, "alsasrc")) {
                gchar *device_name = NULL;
                g_object_get(element, "device", &device_name, NULL);
                if (!QString(device_name).contains("monitor")) {
                    qDebug() << "---------------3.麦克风设备名称：" << device_name;
                    m_micSourceList.append(element);
                    break;
                }
                g_free(device_name);
            }

            g_value_unset(&item);
        }
        gst_iterator_free(iter);

        gst_element_set_state(gstPipeline, GST_STATE_READY);
        //移除麦克元素
        for (GstElement *elem : m_micSourceList) {
            GstElement *mixElement = gst_bin_get_by_name(GST_BIN(gstPipeline), "mix");
            if (mixElement) {
                if (gst_element_set_state(elem, GST_STATE_PAUSED) == GST_STATE_CHANGE_FAILURE) {
                    qDebug() << "Failed to set element to PAUSED state.";
                    return;
                }
                GstPad *sinkPad = gst_element_get_request_pad(mixElement, "sink_%u");
                if (sinkPad) {
                    GstPad *srcPad = gst_element_get_static_pad(elem, "src");
                    if (srcPad) {
                        //查看是否链接
                        if (gst_pad_is_linked(srcPad)) {
                            gboolean ret = gst_pad_unlink(srcPad, sinkPad); // 断开链接
                            qDebug()<<"断开链接： ret =  "<< ret;
                        }else {
                            qDebug()<<"查看是否链接： 否 ";
                        }
                        gst_object_unref(srcPad);
                    }
                    gst_object_unref(sinkPad);
                } else {
                    g_print("Failed to get a request pad from mixElement.\n");
                }
                gst_object_unref(mixElement); // 释放 mixElement 引用
            }
            qDebug()<<"---------";
            gst_element_set_state(elem, GST_STATE_NULL);
            gst_bin_remove(GST_BIN(gstPipeline), elem);
            qDebug() << "已移除设备：" << gst_element_get_name(elem);

            gst_object_unref(elem);

        }
        contineRecord();
    }
*/
    qDebug()<<"closeMicrophone 结束";
}

void Core::toggle_microphone(bool mute)
{
    //获取设备
    QString usingMicName = ConfigHandler().getMicrophone();
    QString usingMicDevice = mapNameToDevice[usingMicName];
    if(usingMicDevice.isEmpty()){
        return;
    }
    const char * device = usingMicDevice.toUtf8().constData();

    //初始化连接
    if(!m_mainloop){
        m_mainloop = pa_mainloop_new();
        m_context = pa_context_new(pa_mainloop_get_api(m_mainloop), "Microphone Control");
        pa_context_connect(m_context, nullptr, PA_CONTEXT_NOFLAGS, nullptr);
        // Wait for context to be ready
        while (pa_context_get_state(m_context) != PA_CONTEXT_READY) {
            pa_mainloop_iterate(m_mainloop, 1, nullptr);
        }
    }

    //切换静音状态
    // Mute or unmute the default source
    pa_operation* op = pa_context_set_source_mute_by_name(m_context, device, mute, nullptr, nullptr);
    if (!op) {
        qCritical() << "Failed to create mute operation for device:" << device;
        return;
    }
    while (pa_operation_get_state(op) == PA_OPERATION_RUNNING) {
        pa_mainloop_iterate(m_mainloop, 1, nullptr);
    }

    pa_operation_unref(op);
}

void Core::levelMeterStart( QString name )
{
    m_microphoneName = name;
    //获取麦克风设备
    QString device;
    if(mapNameToDevice.contains(name)){
        device = mapNameToDevice.value(name);
    }

    qDebug()<<"监测音量的设备: "<< name << device;
    GstElement *audiosrc, *audioconvert, *level, *fakesink;
    GstCaps *caps;
    GstBus *bus;

    caps = gst_caps_from_string( "audio/x-raw,channels=2" );

    pipelineMic = gst_pipeline_new( NULL );
    g_assert (pipelineMic);
    audiosrc = gst_element_factory_make( "pulsesrc", "my_audiosrc" );
    g_assert (audiosrc);
    audioconvert = gst_element_factory_make( "audioconvert", NULL );
    g_assert (audioconvert);
    level = gst_element_factory_make( "level", NULL );
    g_assert (level);
    fakesink = gst_element_factory_make( "fakesink", NULL );
    g_assert (fakesink);

    gst_bin_add_many( GST_BIN( pipelineMic ), audiosrc, audioconvert, level, fakesink, NULL );
    if ( !gst_element_link( audiosrc, audioconvert ) ) {
        g_error( "Failed to link audiosrc and audioconvert" );
    }
    if (!gst_element_link_filtered( audioconvert, level, caps ) ) {
        g_error( "Failed to link audioconvert and level" );
    }
    if ( !gst_element_link( level, fakesink ) ) {
        g_error( "Failed to link level and fakesink" );
    }

    g_object_set( G_OBJECT( audiosrc ), "device", device.toUtf8().constData(), NULL );

    QString m_name = "[kylinscreencap] " + name;
    g_object_set( G_OBJECT( audiosrc ), "client-name", m_name.toUtf8().constData(), NULL );
    g_object_set( G_OBJECT( level ), "post-messages", TRUE, NULL );
    g_object_set( G_OBJECT( fakesink ), "sync", TRUE, NULL );

    bus = gst_element_get_bus (pipelineMic);
    gst_bus_set_sync_handler( bus, (GstBusSyncHandler)message_handler,NULL, NULL );
    gst_element_set_state( pipelineMic, GST_STATE_PLAYING );
}

gboolean Core::message_handler(GstBus *bus, GstMessage *message, gpointer data)
{
    Q_UNUSED(bus)

    if ( message->type == GST_MESSAGE_ELEMENT ) {
        const GstStructure *s = gst_message_get_structure( message );
        const gchar *name = gst_structure_get_name( s );
        if ( strcmp( name, "level" ) == 0 ) {
            gint channels;
            gdouble rms_dB;
            gdouble rms;
            const GValue *array_val;
            const GValue *value;
            GValueArray *rms_arr;

            // the values are packed into GValueArrays with the value per channel
            array_val = gst_structure_get_value( s, "peak" );
            rms_arr = (GValueArray *) g_value_get_boxed( array_val );

            // we can get the number of channels as the length of any of the value arrays
            channels = rms_arr->n_values;
            static double smoothed_rms = 0.0;
            for ( gint i = 0; i < channels; ++i ) {
                value = g_value_array_get_nth( rms_arr, i );
                rms_dB = g_value_get_double( value );

                // converting from dB to normal gives us a value between 0.0 and 1.0
                rms = pow( 10, rms_dB / 20 ) ;
                smoothed_rms = std::max(rms, 0.85 * smoothed_rms);
                //更新音量值
                int int_rms = static_cast<int>(smoothed_rms *100);
                emit getInstance()->updateVolume(int_rms);
            }
        }
    }

    return TRUE;
}

void Core::ModifylevelMeterDevice(QString newDevice)
{
    if(pipelineMic){
        GstElement *audiosrc = gst_bin_get_by_name(GST_BIN(pipelineMic), "my_audiosrc");
        if (!audiosrc) {
            return;
        }
        gst_element_set_state(pipelineMic, GST_STATE_NULL);
        g_object_set(G_OBJECT(audiosrc), "device", newDevice.toUtf8().constData(), NULL);
        //回调函数
        GstBus *bus = gst_element_get_bus (pipelineMic);
        gst_bus_set_sync_handler( bus, (GstBusSyncHandler)message_handler,NULL, NULL );
        gst_element_set_state(pipelineMic, GST_STATE_PLAYING);
        qDebug()<<"core - 检测的麦克风设备已变更: "<< newDevice;
    }
}

void Core::setPropertyOfSelectWid(uint qId, bool isfullscreen, bool isWindow, QString screen, uint id, QRect rect, double scale)
{
    m_screen = screen;
    m_isFullscreen = isfullscreen;
    m_isWindow = isWindow;
    m_xid = QString::number(id);;
    m_x = rect.x();
    m_y = rect.y();
    m_width = rect.width();
    m_height = rect.height();
    m_scale = scale;
}

void Core::setSavePath(QString savePath)
{
    m_sVideoPath = savePath;
}

void Core::setSaveFormat(QString saveformat)
{
    if(saveformat == "mp4"){
        m_format = MP4;
    }
    if(saveformat == "mkv"){
        m_format = MKV;
    }
    if(saveformat == "avi"){
        m_format = AVI;
    }
    if(saveformat == "mov"){
        m_format = MOV;
    }
    if(saveformat == "webm"){
        m_format = WEBM;
    }
    if(saveformat == "gif"){
        m_format = GIF;
    }
    qDebug()<<"Core::setSaveFormat: 已设置 m_format="<<m_format;
}

void Core::setResolution(int resolution)
{
    if (resolution == static_cast<int>(SaveResolution::SUPER_OR_ORIGINAL_QUALITY))
    {
        m_bitrate = 8000; //单位 Mbps
        m_qpmin = 2;
        m_qpmax = 2;
        m_profile = "high-4:4:4";
    }
    else if(resolution == static_cast<int>(SaveResolution::HIGHT_QUALITY)){
        m_bitrate = 5000;
        m_qpmin = 15;
        m_qpmax = 15;
    }
    else if(resolution == static_cast<int>(SaveResolution::STANDARD_QUALITY)){
        m_bitrate = 2500;
        m_qpmin = 35;
        m_qpmax = 35;
    }
    qDebug()<<"core - 已设置 码率  m_bitrate"<<m_bitrate << "kbps";
}

void Core::setFrameRate(QString frameRate)
{

    if(frameRate.contains( "15" )){
        m_fps = 15;
    }

    if(frameRate.contains( "25" )){
        m_fps = 25;
    }

    if(frameRate.contains( "30" )){
        m_fps = 30;
    }

    if(frameRate.contains( "50" )){
        m_fps = 50;
    }

    if(frameRate.contains( "60" )){
        m_fps = 60;
    }
    qDebug()<<"Core::setFrameRate: 已设置 m_fps="<<m_fps;
}

void Core::setMicrophone(QString microphone)
{  
    if(microphone =="" || microphone == m_NoMic){
        m_microphoneName = "";
    }else{
        m_microphoneName = microphone;
    }

    //获取选择的麦克风设备
    QString newDevice = "";
    if (mapNameToDevice.contains(m_microphoneName)) {
        newDevice = mapNameToDevice.value(m_microphoneName);
    }else{
        qDebug()<<"Core::setMicrophone 选择设备不存在";
    }

    //重新设置管道和回调
    ModifylevelMeterDevice(newDevice);

}

Core::Core(QObject *parent) : QObject(parent)
{
    gst_Elements_available();
    m_sVideoPath = QStandardPaths::writableLocation( QStandardPaths::MoviesLocation );
    qDebug() << "m_sVideoPath:" << m_sVideoPath;
}

Core::~Core()
{
    if(m_mainloop){
        pa_context_disconnect(m_context);
        pa_context_unref(m_context);
        pa_mainloop_free(m_mainloop);
        m_mainloop = nullptr;
        m_context = nullptr;
    }
}


