#include <iostream>
#include <acl/acl.h>
#include <cstring>
#include <numeric>
#include <sndfile.h>
#include <chrono>

#include <espeak-ng/speak_lib.h>
#include <text_to_phone.h>
#include <vector>
#include <random>
#include <fstream>
#include <codecvt>
#include <string>
#include <getopt.h>

class TTSExecuteException : public std::exception {
public:
    const char *what() const

    noexcept override {
        return "tts execute error";
    }
};

void checkAclError(aclError error) {
    if (error != ACL_SUCCESS) {
        throw TTSExecuteException();
    }
}


aclError expandHiddenState(void *expand_hidden_state, void *hidden_state, size_t duration_sum, size_t numInput0,
                           const float *duration, size_t hidden_state_length) {
//    const auto expand_hidden_s = std::chrono::system_clock::now();

    for (size_t i = 0; i < numInput0; i++) {
        for (int j = 0; j < (int) duration[i]; j++) {

            aclError cpyErr1 = aclrtMemcpy(expand_hidden_state, hidden_state_length * duration_sum * sizeof(float),
                                           hidden_state, hidden_state_length * 4, ACL_MEMCPY_DEVICE_TO_DEVICE);

            if (cpyErr1 != 0) {
                std::cout << "memory copy error in expanding hidden state." << std::endl;
                return cpyErr1;
            }
            expand_hidden_state = (void *) ((float *) (expand_hidden_state) + hidden_state_length);
            duration_sum--;
        }
        hidden_state = (void *) ((float *) (hidden_state) + hidden_state_length);
    }

//    const auto expand_hidden_e = std::chrono::system_clock::now();
//    std::cout << "expand hidden state time: "
//              << std::chrono::duration_cast<std::chrono::milliseconds>(expand_hidden_e - expand_hidden_s).count()
//              << "ms"
//              << std::endl;

    return ACL_SUCCESS;
}


class VITS {

public:

    VITS(const char *encoderModelPath, const char *decoderModelPath, const char *espeakSharePath,
         const char *language) {


        espeak_Initialize(AUDIO_OUTPUT_RETRIEVAL, 0, espeakSharePath, 0x00000003);

        espeak_SetVoiceByName(language);

        load_phoneIdMap_v2("../ipa-en-us_phone-id-map.txt", phoneIdMap);


        checkAclError(aclInit(nullptr));
        aclrtRunMode runMode;
        checkAclError(aclrtCreateContext(&context, deviceId));
        checkAclError(aclrtCreateStream(&stream));
        aclrtGetRunMode(&runMode);
        if (runMode != ACL_DEVICE) {
            std::cout << "tts must be executed in ascend device" << std::endl;
            throw 0;
        }

        checkAclError(loadModel(encoderModelPath, encoderModelMemPointer, encoderModelWeightPointer, encoderModelId));
        checkAclError(loadModel(decoderModelPath, decoderModelMemPointer, decoderModelWeightPointer, decoderModelId));

        // encoder model prepare input/output data.
        encoderModelDesc = aclmdlCreateDesc();
        aclmdlGetDesc(encoderModelDesc, encoderModelId);
        encoderModelInputDataset = aclmdlCreateDataset();
        encoderModelOutputDataset = aclmdlCreateDataset();

        // decoder model prepare input/output data.
        decoderModelDesc = aclmdlCreateDesc();
        aclmdlGetDesc(decoderModelDesc, decoderModelId);
        decoderModelInputDataset = aclmdlCreateDataset();
        decoderModelOutputDataset = aclmdlCreateDataset();


        for (size_t i = 0; i < aclmdlGetNumInputs(encoderModelDesc); i++)
            prepareInputBuffer(encoderModelDesc, encoderModelInputDataset, i);
        for (size_t i = 0; i < aclmdlGetNumOutputs(encoderModelDesc); i++)
            prepareOutputBuffer(encoderModelDesc, encoderModelOutputDataset, i);
        for (size_t i = 0; i < aclmdlGetNumInputs(decoderModelDesc); i++)
            prepareInputBuffer(decoderModelDesc, decoderModelInputDataset, i);
        for (size_t i = 0; i < aclmdlGetNumOutputs(decoderModelDesc); i++)
            prepareOutputBuffer(decoderModelDesc, decoderModelOutputDataset, i);

    }

    ~VITS() {
        aclmdlUnload(encoderModelId);
        aclmdlUnload(decoderModelId);
        aclmdlDestroyDesc(encoderModelDesc);
        aclmdlDestroyDesc(decoderModelDesc);
        aclmdlDestroyDataset(encoderModelInputDataset);
        aclmdlDestroyDataset(encoderModelOutputDataset);
        aclmdlDestroyDataset(decoderModelInputDataset);
        aclmdlDestroyDataset(decoderModelOutputDataset);

        aclrtDestroyStream(stream);
        aclrtDestroyContext(context);
        aclFinalize();
    }

private:
    int deviceId = 0;
    aclrtContext context;
    aclrtStream stream;
    uint32_t encoderModelId;
    void *encoderModelMemPointer;
    void *encoderModelWeightPointer;
    void *decoderModelMemPointer;
    void *decoderModelWeightPointer;
    uint32_t decoderModelId;

    aclmdlDesc *encoderModelDesc;

    aclmdlDataset *encoderModelInputDataset;
    aclmdlDataset *encoderModelOutputDataset;

    aclmdlDesc *decoderModelDesc;

    aclmdlDataset *decoderModelInputDataset;
    aclmdlDataset *decoderModelOutputDataset;
    std::map<wchar_t, int> phoneIdMap;

    std::random_device rd{};
    std::mt19937 gen{rd()};
    std::normal_distribution<> d{0, 1};


    static aclError
    loadModel(const char *modelPath, void *&modelMemPointer, void *&modelWeightPointer, uint32_t &modelId) {
        size_t modelMemorySize, modelWeightSize;
        aclError ret1 = aclmdlQuerySize(modelPath, &modelMemorySize, &modelWeightSize);

        aclError ret2 = aclrtMalloc(&modelMemPointer, modelMemorySize, ACL_MEM_MALLOC_NORMAL_ONLY);
        aclError ret3 = aclrtMalloc(&modelWeightPointer, modelWeightSize, ACL_MEM_MALLOC_NORMAL_ONLY);
        aclmdlLoadFromFileWithMem(modelPath, &modelId, modelMemPointer, modelMemorySize, modelWeightPointer,
                                  modelWeightSize);

        if (ret1 == 0 && ret2 == 0 && ret3 == 0) {
            std::cout << "Load Model Success" << std::endl;
            return ACL_SUCCESS;
        } else {
            std::cout << "Load Model Fail" << std::endl;
            return ACL_ERROR_FAILURE;
        }
    }

    static aclError getIODim(aclmdlDesc *modelDesc, size_t numInput, size_t dynamic_dim, aclmdlIODims &ioDim) {
        size_t gearCount;
        aclmdlGetInputDynamicGearCount(modelDesc, -1, &gearCount);
        aclmdlIODims dims[100];
        aclmdlGetInputDynamicDims(modelDesc, -1, dims, gearCount);
        size_t target_dim_index = gearCount;
        for (size_t i = 0; i < gearCount; i++) {
            if (dims[i].dims[dynamic_dim] >= (long) numInput) {
                target_dim_index = i;
                break;
            }
        }
        if (target_dim_index == gearCount) {
            printf("unsupported dynamic dims: input length %zu, exceed max length limit %ld\n", numInput,
                   dims[gearCount - 1].dims[dynamic_dim]);
            return ACL_ERROR_FAILURE;
        }
        ioDim = dims[target_dim_index];
        return ACL_SUCCESS;
    }

    void prepareInputBuffer(aclmdlDesc *modelDesc, aclmdlDataset *modelDataset, size_t inputIndex) {
        size_t memorySize = aclmdlGetInputSizeByIndex(modelDesc, inputIndex);
        void *p;
        checkAclError(aclrtMalloc(&p, memorySize, ACL_MEM_MALLOC_NORMAL_ONLY));
        aclDataBuffer *buffer = aclCreateDataBuffer(p, memorySize);
        aclmdlAddDatasetBuffer(modelDataset, buffer);
    }

    void prepareOutputBuffer(aclmdlDesc *modelDesc, aclmdlDataset *modelDataset, size_t inputIndex) {
        size_t memorySize = aclmdlGetOutputSizeByIndex(modelDesc, inputIndex);
        void *p;
        checkAclError(aclrtMalloc(&p, memorySize, ACL_MEM_MALLOC_NORMAL_ONLY));
        aclDataBuffer *buffer = aclCreateDataBuffer(p, memorySize);
        aclmdlAddDatasetBuffer(modelDataset, buffer);
    }


    void prepareInput(aclmdlDesc *modelDesc, aclmdlDataset *modelInputDataset, size_t inputIndex, void *input_data,
                      size_t numInput, aclrtMemcpyKind kind = ACL_MEMCPY_DEVICE_TO_DEVICE) {
        size_t memorySize = aclmdlGetInputSizeByIndex(modelDesc, inputIndex);

        aclDataBuffer *buffer = aclmdlGetDatasetBuffer(modelInputDataset, inputIndex);
        void *p = aclGetDataBufferAddr(buffer);


        if (input_data != nullptr) {
            checkAclError(aclrtMemcpy(p, memorySize, input_data, numInput * 4,
                                      kind));
        }

        if (memorySize - numInput * 4 != 0) {
            aclrtMemset((void *) ((float *) p + numInput), memorySize - numInput * 4, 0,
                        memorySize - numInput * 4);
        }

    }

    void prepareOutput(aclmdlDesc *modelDesc, aclmdlDataset *modelOutputDataset, size_t outputIndex) {
//        size_t memorySize = aclmdlGetOutputSizeByIndex(modelDesc, outputIndex);
//        aclDataBuffer* buffer = aclmdlGetDatasetBuffer(modelOutputDataset,outputIndex);
//        void *p = aclGetDataBufferAddr(buffer);
//
    }

    void executeModel(uint32_t modelId, aclmdlDataset *modelInputDataset, aclmdlDataset *modelOutputDataset) {
        const auto s = std::chrono::system_clock::now();
        checkAclError(aclmdlExecute(modelId, modelInputDataset, modelOutputDataset));
        const auto e = std::chrono::system_clock::now();
        std::cout << "Model" << modelId << " Inference Code:" << 0 << ",Time: "
                  << std::chrono::duration_cast<std::chrono::milliseconds>(e - s).count() << "ms"
                  << std::endl;

    }

private:
    int
    textToSpeechExecute(std::vector<int> input_id, std::vector<float> &wavData, size_t &wavFrames, long &consume_time) {
        const auto s = std::chrono::system_clock::now();
        size_t encoderNumInput = input_id.size();
        int *input_data = input_id.data();

//        aclmdlIODims encoderIoDim;
//        if (getIODim(encoderModelDesc, encoderNumInput, 1, encoderIoDim) != ACL_SUCCESS) {
//            return ACL_ERROR_FAILURE;
//        }
        std::vector<size_t> input_lengths = {encoderNumInput};

        prepareInput(encoderModelDesc, encoderModelInputDataset, 0, input_data, encoderNumInput);
        prepareInput(encoderModelDesc, encoderModelInputDataset, 1, input_lengths.data(), 1);
//        prepareInput(encoderModelDesc, encoderModelInputDataset, 2, nullptr, 0);
//        aclmdlSetInputDynamicDims(encoderModelId, encoderModelInputDataset, 2, &encoderIoDim);

        executeModel(encoderModelId, encoderModelInputDataset, encoderModelOutputDataset);


        void *m_p;
        void *logs_p;
        float *duration;
        size_t hidden_state_length;
        int duration_sum = 0;


        {

            size_t duration_len;
            aclDataBuffer *dataBuffer = aclmdlGetDatasetBuffer(encoderModelOutputDataset, 0);
            m_p = aclGetDataBufferAddr(dataBuffer);
            size_t len = aclGetDataBufferSizeV2(dataBuffer) / sizeof(float);

            aclDataBuffer *dataBuffer1 = aclmdlGetDatasetBuffer(encoderModelOutputDataset, 1);
            logs_p = aclGetDataBufferAddr(dataBuffer1);
//            size_t len1 = aclGetDataBufferSizeV2(dataBuffer1);
            aclDataBuffer *dataBuffer2 = aclmdlGetDatasetBuffer(encoderModelOutputDataset, 2);
            void *data = aclGetDataBufferAddr(dataBuffer2);
            duration_len = aclGetDataBufferSizeV2(dataBuffer2) / sizeof(float);
            duration = reinterpret_cast<float *>(data);

            for (size_t i = 0; i < encoderNumInput; i++) {
                duration_sum += (int) duration[i];
            }
            hidden_state_length = len / duration_len;
        }


        aclmdlIODims decoderIODim;


        if (getIODim(decoderModelDesc, static_cast<size_t>(duration_sum), 1, decoderIODim) != ACL_SUCCESS) {
            return ACL_ERROR_FAILURE;
        }

        const auto cpu_s = std::chrono::system_clock::now();

        {

            size_t memorySize = aclmdlGetInputSizeByIndex(decoderModelDesc, 0);
//        size_t memorySize = decoderIODim.dims[1] * 384 * 4;
            aclDataBuffer *buffer = aclmdlGetDatasetBuffer(decoderModelInputDataset, 0);
            void *devicePointer = aclGetDataBufferAddr(buffer);


            size_t memorySize1 = aclmdlGetInputSizeByIndex(decoderModelDesc, 1);
            aclDataBuffer *buffer1 = aclmdlGetDatasetBuffer(decoderModelInputDataset, 1);
            void *devicePointer1 = aclGetDataBufferAddr(buffer1);


            expandHiddenState(devicePointer, m_p, (size_t) (duration_sum), encoderNumInput, duration,
                              hidden_state_length);

            expandHiddenState(devicePointer1, logs_p, (size_t) (duration_sum), encoderNumInput, duration,
                              hidden_state_length);

//            aclrtMemset((void *) ((float *) devicePointer + static_cast<size_t>(duration_sum) * hidden_state_length),
//                        memorySize - static_cast<size_t>(duration_sum) * hidden_state_length * 4,
//                        0,
//                        memorySize - static_cast<size_t>(duration_sum) * hidden_state_length * 4
//            );
//            aclrtMemset((void *) ((float *) devicePointer1 + static_cast<size_t>(duration_sum) * hidden_state_length),
//                        memorySize1 - static_cast<size_t>(duration_sum) * hidden_state_length * 4,
//                        0,
//                        memorySize1 - static_cast<size_t>(duration_sum) * hidden_state_length * 4
//            );


        }

        std::vector<float> eps;
        size_t decoderNumInput = static_cast<size_t>(duration_sum) * hidden_state_length;
        {
            for (size_t i = 0; i < decoderNumInput; i++) {
                eps.emplace_back(d(gen));
            }
        }
        const auto cpu_e = std::chrono::system_clock::now();
//        std::cout << "Projection Time: " << std::chrono::duration_cast<std::chrono::milliseconds>(cpu_e - cpu_s).count()
//                  <<
//                  "ms" << std::endl;

        std::vector<int> decoder_input_lengths = {duration_sum};
        prepareInput(decoderModelDesc, decoderModelInputDataset, 2, eps.data(), decoderNumInput);
        prepareInput(decoderModelDesc, decoderModelInputDataset, 3, decoder_input_lengths.data(), 1);
        prepareInput(decoderModelDesc, decoderModelInputDataset, 4, nullptr, 0);
        aclmdlSetInputDynamicDims(decoderModelId, decoderModelInputDataset, 4, &decoderIODim);

        executeModel(decoderModelId, decoderModelInputDataset, decoderModelOutputDataset);


        float *decOutputData;
        {
            aclDataBuffer *dataBuffer = aclmdlGetDatasetBuffer(decoderModelOutputDataset, 0);
            void *data = aclGetDataBufferAddr(dataBuffer);
//            size_t len = aclGetDataBufferSizeV2(dataBuffer) / sizeof(float);
            decOutputData = reinterpret_cast<float *>(data);
        }
        auto numSpec = static_cast<size_t>(duration_sum);


        size_t decOutputLen;


        decOutputLen = 256 * numSpec;


        for (size_t i = 0; i < decOutputLen; i++) {
            wavData.emplace_back(((float *) decOutputData)[i]);
        }
        wavFrames += decOutputLen;

        const auto e = std::chrono::system_clock::now();
        consume_time += std::chrono::duration_cast<std::chrono::milliseconds>(e - s).count();
        return ACL_SUCCESS;
    }


public:

    int text_to_speech(const char *input_text, std::vector<float> &wavData, size_t &wavFrames, long &consume_time) {
        std::vector<int> input_phones;
        text_to_phone(input_text, input_phones, phoneIdMap);
        textToSpeechExecute(input_phones, wavData, wavFrames, consume_time);
        return ACL_SUCCESS;
    }


    aclError
    text_to_speech_file(const char *filename, const char *output_directory) {
        if (filename == nullptr) {
            return ACL_ERROR_FAILURE;
        }
        std::ifstream input_stream(filename);


        input_stream.imbue(std::locale("C.UTF-8"));
        std::vector<std::string> lines;

        char s[1000];
        while (input_stream.getline(s, 1000)) {
            lines.emplace_back(s);
        }


        for (size_t i = 0; i < lines.size(); i++) {
            std::vector<int> phones_id;
            if (phones_id.size() > 1024) {
                std::cout << "Exceed Maximum Size 1024." << std::endl;
                continue;
            }
            std::vector<float> wavData;
            size_t wavFrames = 0;
            long consume_time = 0;
            text_to_phone(lines[i].c_str(), phones_id, phoneIdMap);
            aclError tts_result = textToSpeechExecute(phones_id, wavData, wavFrames, consume_time);
            char output_filename[100];
            sprintf(output_filename, "%s/%s.wav", output_directory, std::to_string((int) i).c_str());

            if (tts_result == ACL_SUCCESS) {
                synthesis(output_filename, wavData, wavFrames, 22050);
                std::cout << "RTF: " << (float) consume_time / 1000. / (float) wavFrames * 22050. << std::endl;
            } else {
                std::cout << "tts execute failure." << std::endl;
            }
        }

        return ACL_SUCCESS;
    }


    aclError
    text_to_speech_phonemes_file(const char *filename, const char *output_directory) {
        if (filename == nullptr) {
            return ACL_ERROR_FAILURE;
        }
        std::wifstream input_stream(filename);


        input_stream.imbue(std::locale("C.UTF-8"));
        std::vector<std::wstring> lines;

        wchar_t s[1000];
        while (input_stream.getline(s, 1000)) {
            lines.emplace_back(s);
        }


        for (size_t i = 0; i < lines.size(); i++) {
            size_t spe0 = 0, spe1 = 0;
            for (size_t j = 0; j < lines[i].size(); j++) {
                switch (lines[i][j]) {
                    case L'/':
                        spe0 = j;
                    case L'|':
                        spe1 = j;
                }
            }
            std::wstring wav_filename = lines[i].substr(spe0 + 1, spe1 - spe0 - 1);

//            std::wstring phones = lines[i].substr(spe1 + 1, lines[i].size() - spe1 - 1);

//            if (lines[i].size() - spe1 - 1 > 69) continue;

            std::vector<int> phones_id;
            phones_id.emplace_back(0);
            for (size_t z = spe1 + 1; z < lines[i].size(); z++) {
                if (phoneIdMap.find(lines[i][z]) != phoneIdMap.end()) {
                    phones_id.emplace_back(phoneIdMap.at(lines[i][z]));
                    phones_id.emplace_back(0);

                }
            }

            std::vector<float> wavData;
            size_t wavFrames = 0;
            long consume_time = 0;
            char *output_filename = new char[30];
            sprintf(output_filename, "%s/%s", output_directory,
                    std::string(wav_filename.begin(), wav_filename.end()).c_str());

            aclError tts_result = textToSpeechExecute(phones_id, wavData, wavFrames, consume_time);
            if (tts_result == ACL_SUCCESS) {
                synthesis(output_filename, wavData, wavFrames, 22050);
            }
        }

    }



    static aclError synthesis(const char *filename, std::vector<float> &wavData, size_t wavFrames, int sampleRate) {

        int sample_rate = sampleRate;
        float wav_duration = (float) (wavFrames) / (float) (sample_rate);
//        std::cout << "sample rate: " << sample_rate << ",frames: " << wavFrames << ",wav duration: " << wav_duration
//                  << std::endl;


        SF_INFO sfInfo{
                (sf_count_t) (wavFrames),
                sample_rate,
                1,
                SF_FORMAT_WAV | SF_FORMAT_FLOAT | SF_ENDIAN_FILE,
                1,
                true

        };
        SNDFILE *sndfile = sf_open(filename, SFM_WRITE, &sfInfo);
        sf_writef_float(sndfile, wavData.data(), (sf_count_t) (wavFrames));
        sf_close(sndfile);
        return ACL_SUCCESS;
    }
};

int main(int argc, char *argv[]) {

    int opt;
    int option_index = 0;
    bool file_flag = false;
    bool text_flag = false;
    static struct option long_options[] = {
            {"help",         no_argument,       nullptr, 'h'},
            {"file",         required_argument, nullptr, 'f'},
            {"text",         required_argument, nullptr, 't'},
            {"model",        required_argument, nullptr, 'm'},
            {"language",     required_argument, nullptr, 'l'},
            {"espeak-share", required_argument, nullptr, 'e'},
            {"output",       required_argument, nullptr, 'o'},
            {"mode", required_argument, nullptr, 'd'}


    };
    std::string filename;
    std::string text;
    std::string model_dir_path = "./model";
    std::string language = "en-us";
    std::string espeak_share_path = "/home/HwHiAiUser/Libraries/libespeak-ng/share";
    std::string output_path = "./output";
    std::string filemode = "text";
    while ((opt = getopt_long(argc, argv, "hftmleo", long_options, &option_index)) != -1) {
        switch (opt) {
            case 'h':
                std::cout
                        << "Usage: VITS [options]..." << std::endl
                        << "    --file filename, can't use text option when using file option." << std::endl
                        << "    --text text, can't use file option when using file option." << std::endl
                        << "    --model model_dir, specify model directory path, default is './model'." << std::endl
                        << "    --language language, specify which language should be used, default is 'en-us'."
                        << std::endl
                        << "    --espeak-share espeak-share path, specify espeak share data path, default is '/home/HwHiAiUser/Libraries/libespeak-ng/share'."
                        << std::endl
                        << "    --output output path, specify output wav directory path, which must be created first."
                        << std::endl;
                        << "    --filemode input file mode, 'text' or 'phoneme', specify the input file content format, must be used with --file option."
                break;
            case 'f':
                filename = optarg;
                file_flag = true;
                break;
            case 't':
                text = optarg;
                text_flag = true;
                break;
            case 'm':
                model_dir_path = optarg;
                break;
            case 'l':
                language = optarg;
                break;
            case 'e':
                espeak_share_path = optarg;
                break;
            case 'o':
                output_path = optarg;
                break;
            case 'd':
                filemode = optarg;
                break;
            default:
                std::cerr << "Usage: VITS [options]..." << std::endl;
        }
    }
    if (file_flag && text_flag) {
        std::cout << "Don't support both enable --text and --file option." << std::endl;
        return 0;
    }
    if (!(file_flag || text_flag)) {
        std::cout << "Please specify --text or --file option value." << std::endl;
        return 0;
    }
    char VITS_encoder_path[200];
    char VITS_decoder_path[200];
    sprintf(VITS_encoder_path, "%s/vits_encoder.om", model_dir_path.c_str());
    sprintf(VITS_decoder_path, "%s/vits_decoder.om", model_dir_path.c_str());

    // initialize vits.
    VITS vits(VITS_encoder_path, VITS_decoder_path, espeak_share_path.c_str(), language.c_str());

    if (file_flag) {
        if (filemode == 'text')
            vits.text_to_speech_file(filename.c_str(), output_path.c_str());
        else if (filemode == 'phoneme')
            vits.text_to_speech_phonemes_file(filename.c_str(), output_path.c_str());
    }

    if (text_flag) {

    }


//    std::vector<float> wavData;
//    size_t wavFrames = 0;
//    long t = 0;
//    vits.textToSpeech(argv[2], wavData, wavFrames, t);
    char *output_filename = new char[100];
//    sprintf(output_filename, "./test%d.wav", static_cast<int>(i));
//        std::cout << output_filename << std::endl;
//    VITS::synthesis(output_filename, wavData, wavFrames, 22050);
//    std::cout << "Consume Time: " << t << "ms" << std::endl;

}


