#include "chatglm.h"
#include <fstream>
#include <iomanip>
#include <iostream>

#ifdef _WIN32
#include <codecvt>
#include <fcntl.h>
#include <io.h>
#include <windows.h>
#endif

#include <vector>

enum InferenceMode
{
    INFERENCE_MODE_CHAT,
    INFERENCE_MODE_GENERATE,
};

static inline InferenceMode to_inference_mode(const std::string &s)
{
    static std::unordered_map<std::string, InferenceMode> m{{"chat", INFERENCE_MODE_CHAT},
                                                            {"generate", INFERENCE_MODE_GENERATE}};
    return m.at(s);
}

struct Args
{
    std::string model_path = "models/chatglm-ggml.bin";
    InferenceMode mode = INFERENCE_MODE_CHAT;
    bool sync = false;
    std::string prompt = "你好";
    std::string system = "";
    int max_length = 2048;
    int max_new_tokens = -1;
    int max_context_length = 512;
    bool interactive = false;
    int top_k = 0;
    float top_p = 0.7;
    float temp = 0.95;
    float repeat_penalty = 1.0;
    int num_threads = 0;
    bool verbose = false;
};

static inline void print_message(const chatglm::ChatMessage &message)
{
    std::cout << message.content << "\n";
    if (!message.tool_calls.empty() && message.tool_calls.front().type == chatglm::ToolCallMessage::TYPE_CODE)
    {
        std::cout << message.tool_calls.front().code.input << "\n";
    }
}

// // 模型路径
// #define MODEL_PATH_BIN ("./models/ggml.bin")

// 预设地址
#define MODEL_PATH_BIN ("E:/models/ggml/Baichuan2-13B-Chat_f16.bin")

/***********************************************************************************************************************************/

// 系统设定 预处理
#define CHAT_SYSTEM_SET_PREPROCESSING \
    ("As an intelligent AI assistant, you will provide concise and accurate answers as required.")

// 系统设定
#define CHAT_SYSTEM_SET \
    ("你是翻译专家，会将任何语言的文字翻译为简体中文")

// 预处理 前缀
#define CHAT_PREFIX_PREPROCESSING ("Complete the following requirements: 1, Organize text using Markdown format; 2, do not add new content. Content to be organized:\n")

// 翻译前缀
#define CHAT_PREFIX ("完成以下要求: 1,将英文翻译为简体中文; 2,使用Markdown格式回答; 3,代码需要整理代码格式与代码缩进（代码不翻译,代码中的注释需要翻译）; 内容：\n")

// 系统设定
std::string g_systemInfo = CHAT_SYSTEM_SET;

// 历史消息
std::vector<chatglm::ChatMessage> g_history_messages;

// 角色
std::string m_role;

std::vector<std::string> datasheet_cut(std::string cut_file_name)
{

    std::vector<std::string> cut_paragraphs; // 待翻译原文段落
    std::string paragraph;
    int bytesRead = 0;

    char c;

    std::ifstream input_file(cut_file_name);
    if (input_file.is_open())
    {

        while (input_file.get(c))
        {
            bytesRead++;
            paragraph.push_back(c);

            // 检查是否是英文句号后紧跟两个换行符 读取的字节数大于 1024 则保存到vector中
            // 或者 大于1536 以 回车结尾
            if ((paragraph.size() >= 2 && paragraph[paragraph.size() - 1] == '\n' && paragraph[paragraph.size() - 2] == '.' && bytesRead >= 2048) ||
                paragraph[paragraph.size() - 1] == '\n' && bytesRead >= 3027)
            {
                cut_paragraphs.push_back(paragraph);

                // 重置段落和字节数
                paragraph.clear();
                bytesRead = 0;
            }
        }

        // 文件结束时，如果paragraph中还有内容，也加入到vector中
        if (!paragraph.empty())
        {
            // 避免文件结束无内容导致错误，追加些内容提供翻译
            cut_paragraphs.push_back(paragraph + "\nLife is actually like the weather, with clear skies and overcast days, and occasionally a little rain. It’s the natural order of things. Life isn’t simple, but we should try to live it as simply as possible.\n");
        }

        input_file.close();

        std::cout << "input cut success" << std::endl;
    }
    else
    {
        std::cerr << "Unable to open input file." << std::endl;
    }

    return cut_paragraphs;
}

// 设置预设信息 预处理
void setSystemInfo_preprocessing(void)
{

    std::vector<chatglm::ChatMessage> system_messages;

    std::string systemInfo = CHAT_SYSTEM_SET_PREPROCESSING;

    // 设定放入历史消息
    system_messages.emplace_back(chatglm::ChatMessage::ROLE_SYSTEM, systemInfo);

    // 将优质回答放入历史信息
    std::string input_0 = (std::string)CHAT_PREFIX_PREPROCESSING +
                          "Visible symbols can be configured by setting them in configuration files. The initial configuration\n"
                          "is produced by merging a *_defconfig file for the board with application settings, usually from\n"
                          "prj.conf. See The Initial Configuration below for more details.\n"
                          "Assignments in configuration files use this syntax:\n"
                          "CONFIG_<symbol name>=<value>\n"
                          "There should be no spaces around the equals sign.\n"
                          "bool symbols can be enabled or disabled by setting them to y or n, respectively. The FPU symbol\n"
                          "from the example above could be enabled like this:\n"
                          "CONFIG_FPU=y\n"
                          "Note: A boolean symbol can also be set to n with a comment formatted like this:\n"
                          "# CONFIG_SOME_OTHER_BOOL is not set\n"
                          "This is the format you will see in the merged configuration in zephyr/.config.\n"
                          "This style is accepted for historical reasons: Kconfig configuration files can be parsed as makefiles (though Zephyr doesn’t use this). Having n-valued symbols correspond to unset variables\n"
                          "simplifies tests in Make.\n";

    std::string output_0 =
        "Visible symbols can be configured by setting them in configuration files. The initial configuration is produced by merging a `*_defconfig` file for the board with application settings, usually from `prj.conf`. See The Initial Configuration below for more details.\n"
        "\n"
        "Assignments in configuration files use this syntax:\n"
        "```\n"
        "CONFIG_<symbol name>=<value>\n"
        "```\n"
        "\n"
        "There should be no spaces around the equals sign.\n"
        "bool symbols can be enabled or disabled by setting them to `y` or `n`, respectively. \n"
        "The `FPU` symbol from the example above could be enabled like this:\n"
        "\n"
        "```\n"
        "CONFIG_FPU=y\n"
        "```\n"
        "\n"
        "Note: A boolean symbol can also be set to n with a comment formatted like this:\n"
        "```\n"
        "# CONFIG_SOME_OTHER_BOOL is not set\n"
        "```\n"
        "\n"
        "This is the format you will see in the merged configuration in `zephyr/.config`.\n"
        "This style is accepted for historical reasons: Kconfig configuration files can be parsed as makefiles (though Zephyr doesn’t use this). Having n-valued symbols correspond to unset variables simplifies tests in Make.\n";

    system_messages.emplace_back(chatglm::ChatMessage::ROLE_USER, input_0);
    system_messages.emplace_back(chatglm::ChatMessage::ROLE_ASSISTANT, output_0);

    // 历史消息
    g_history_messages = system_messages;
}

// 设置预设信息
void setSystemInfo(void)
{

    std::vector<chatglm::ChatMessage> system_messages;

    std::string systemInfo = CHAT_SYSTEM_SET;

    // 设定放入历史消息
    system_messages.emplace_back(chatglm::ChatMessage::ROLE_SYSTEM, systemInfo);

    // 将优质回答放入历史信息
    std::string input_1 = (std::string)CHAT_PREFIX + "Visible symbols can be configured by setting them in configuration files. The initial configuration is produced by merging a `*_defconfig` file for the board with application settings, usually from `prj.conf`. See The Initial Configuration below for more details.\n"
                                                     "\n"
                                                     "Assignments in configuration files use this syntax:\n"
                                                     "```\n"
                                                     "CONFIG_<symbol name>=<value>\n"
                                                     "```\n"
                                                     "\n"
                                                     "There should be no spaces around the equals sign.\n"
                                                     "bool symbols can be enabled or disabled by setting them to `y` or `n`, respectively. \n"
                                                     "The `FPU` symbol from the example above could be enabled like this:\n"
                                                     "\n"
                                                     "```\n"
                                                     "CONFIG_FPU=y\n"
                                                     "```\n"
                                                     "\n"
                                                     "Note: A boolean symbol can also be set to n with a comment formatted like this:\n"
                                                     "```\n"
                                                     "# CONFIG_SOME_OTHER_BOOL is not set\n"
                                                     "```\n"
                                                     "\n"
                                                     "This is the format you will see in the merged configuration in `zephyr/.config`.\n"
                                                     "This style is accepted for historical reasons: Kconfig configuration files can be parsed as makefiles (though Zephyr doesn’t use this). Having n-valued symbols correspond to unset variables simplifies tests in Make.\n";

    std::string output_1 = "可见符号可以通过在配置文件中设置它们来进行配置。初始配置是通过将板级`*_defconfig`文件与应用程序设置（通常来自`prj.conf`）合并产生的。有关更多详细信息，请参见下文的“初始配置”。\n"
                           "\n"
                           "配置文件中的赋值使用以下语法：\n"
                           "```c\n"
                           "CONFIG_<symbol name>=<value>\n"
                           "```\n"
                           "\n"
                           "等号周围不应有空格。布尔符号可以通过分别设置为`y`或`n`来启用或禁用。\n"
                           "上面示例中的`FPU`符号可以这样启用：\n"
                           "```c\n"
                           "CONFIG_FPU=y\n"
                           "```\n"
                           "\n"
                           "注意：布尔符号也可以使用如下注释格式设置为`n`：\n"
                           "```c\n"
                           "# CONFIG_SOME_OTHER_BOOL is not set\n"
                           "```\n"
                           "\n"
                           "这是您将在`zephyr/.config`中合并配置中看到的格式。接受这种风格是出于历史原因：Kconfig配置文件可以作为makefile解析（尽管Zephyr不使用这种方式）。让n值的符号对应未设置的变量简化了Make中的测试。\n";

    system_messages.emplace_back(chatglm::ChatMessage::ROLE_USER, input_1);
    system_messages.emplace_back(chatglm::ChatMessage::ROLE_ASSISTANT, output_1);

    // 历史消息
    g_history_messages = system_messages;
}

// 预处理文件
std::vector<std::string> preprocessing_datasheet(std::vector<std::string> input_paragraphs, std::string output_preprocessing_file_path)
{
    std::vector<std::string> preprocessing_paragraphs;

    std::cout << "ggml_time_init..." << std::endl;

    Args args;
    args.model_path = MODEL_PATH_BIN; // 模型路径 D:/ChatGLM3_6B/models/chatglm-ggml/chatglm3-6b-32k-ggml_q4_0.bin
    args.interactive = true;          // -i, --interactive 以交互模式运行 run in interactive mode

    args.mode = INFERENCE_MODE_CHAT; // --mode 推理模式选择 {聊天， 生成}（默认： 聊天） inference mode chose from
                                     // {chat, generate} (default: chat)

    args.max_length = 4096;         //-l, --max_length 最大总长度，包括提示和输出  max total length including prompt and output
                                    //(default: 2048)
    args.max_context_length = 4096; // -c, --max_context_length 最大上下文长度 max context length (default: 512)\n"

    args.top_k = 0;   // --top_k top-k sampling (default: 0) 在生成文本时，模型会考虑在每一步选择概率最高的前 k
                      // 个候选词语，然后从中随机选择一个作为下一个词语
    args.top_p = 0.7; // --top_p top-p sampling (default: 0.7) 控制生成的多样性，允许更多的不确定性
    args.temp =
        0.95; // --temp temperature (default: 0.95)
              // 高温度值会增加词语之间的差异，使得模型更加开放和创造性；低温度值则更加强调高概率的词语，使生成更加确定和保守
    args.repeat_penalty =
        1.0; // --repeat_penalty 为了避免生成文本中过度重复的片段，可以通过引入一个重复惩罚 来减少模型生成相似内容的概率
             // penalize repeat sequence of tokens (default: 1.0, 1.0 = disabled)

    args.num_threads = 0; // -t, --threads 用于推理的线程数 number of threads for inference

    args.verbose = false; // -v, --verbose 显示详细输出，包括配置/系统/性能信息 display verbose output including
                          // config/system/performance info

    args.prompt = "hi"; // p, --prompt PROMPT  开始生成的提示   prompt to start generation with (default: 你好)

    args.sync = false; // --sync 无流的同步生成 synchronized generation without streaming

    // -s, --system SYSTEM 系统消息，用于设置助手的行为 system message to set the behavior of the assistant
    args.system = "As an intelligent AI assistant, you will provide concise and accurate answers as required.";

    args.max_new_tokens = -1; // ---max_new_tokens 要生成的最大令牌数，忽略提示令牌数 max number of tokens to generate,
                              // ignoring the number of prompt tokens

    ggml_time_init();
    int64_t start_load_us = ggml_time_us();
    chatglm::Pipeline pipeline(args.model_path);

    int64_t end_load_us = ggml_time_us();

    std::cout << "ggml_time_init OK!" << std::endl;

    std::string model_name = pipeline.model->config.model_type_name();

    auto text_streamer = std::make_shared<chatglm::TextStreamer>(std::cout, pipeline.tokenizer.get());
    auto perf_streamer = std::make_shared<chatglm::PerfStreamer>();
    std::vector<std::shared_ptr<chatglm::BaseStreamer>> streamers{perf_streamer};

    if (!args.sync)
    {
        streamers.emplace_back(text_streamer);
    }

    auto streamer = std::make_unique<chatglm::StreamerGroup>(std::move(streamers));

    chatglm::GenerationConfig gen_config(args.max_length, args.max_new_tokens, args.max_context_length, args.temp > 0,
                                         args.top_k, args.top_p, args.temp, args.repeat_penalty, args.num_threads);

    if (args.verbose)
    {
        std::cout << "system info: | "
                  << "AVX = " << ggml_cpu_has_avx() << " | "
                  << "AVX2 = " << ggml_cpu_has_avx2() << " | "
                  << "AVX512 = " << ggml_cpu_has_avx512() << " | "
                  << "AVX512_VBMI = " << ggml_cpu_has_avx512_vbmi() << " | "
                  << "AVX512_VNNI = " << ggml_cpu_has_avx512_vnni() << " | "
                  << "FMA = " << ggml_cpu_has_fma() << " | "
                  << "NEON = " << ggml_cpu_has_neon() << " | "
                  << "ARM_FMA = " << ggml_cpu_has_arm_fma() << " | "
                  << "F16C = " << ggml_cpu_has_f16c() << " | "
                  << "FP16_VA = " << ggml_cpu_has_fp16_va() << " | "
                  << "WASM_SIMD = " << ggml_cpu_has_wasm_simd() << " | "
                  << "BLAS = " << ggml_cpu_has_blas() << " | "
                  << "SSE3 = " << ggml_cpu_has_sse3() << " | "
                  << "VSX = " << ggml_cpu_has_vsx() << " |\n";

        std::cout << "inference config: | "
                  << "max_length = " << args.max_length << " | "
                  << "max_context_length = " << args.max_context_length << " | "
                  << "top_k = " << args.top_k << " | "
                  << "top_p = " << args.top_p << " | "
                  << "temperature = " << args.temp << " | "
                  << "repetition_penalty = " << args.repeat_penalty << " | "
                  << "num_threads = " << args.num_threads << " |\n";

        std::cout << "loaded " << pipeline.model->config.model_type_name() << " model from " << args.model_path
                  << " within: " << (end_load_us - start_load_us) / 1000.f << " ms\n";

        std::cout << std::endl;
    }

    if (args.mode != INFERENCE_MODE_CHAT && args.interactive)
    {
        std::cerr << "interactive demo is only supported for chat mode, falling back to non-interactive one\n";
        args.interactive = false;
    }

    std::vector<chatglm::ChatMessage> system_messages;

    // 设置预设信息和优质回答
    setSystemInfo();

    // 打印系统设定
    if (!args.system.empty())
    {
        std::cout << std::setw(model_name.size()) << std::left << "System"
                  << " > " << args.system << std::endl;
    }

    m_role = chatglm::ChatMessage::ROLE_USER;

    // // 缓存上一次成功回答用于联系上下文
    // std::string input_last = "";
    // std::string output_last = "";

    // // 开始前清理回答缓存
    // input_last.clear();
    // output_last.clear();

    // 先整理待翻译内容 预处理
    for (auto &paragraph : input_paragraphs)
    {

    TryAgain: // 预处理失败再次尝试

        // 设置预设信息和优质回答
        setSystemInfo_preprocessing();

        // // 将上一次成功回答放入历史回答用于联系上下文
        // if (!input_last.empty() && !output_last.empty())
        // {
        //     g_history_messages.emplace_back(chatglm::ChatMessage::ROLE_USER, input_last);
        //     g_history_messages.emplace_back(chatglm::ChatMessage::ROLE_ASSISTANT, output_last);
        // }

        std::string input_str = (std::string)CHAT_PREFIX_PREPROCESSING + paragraph; // 取出待翻译的文本

        std::cout << "*******************************************************************";
        std::cout << "\n"
                  << "human"
                  << " > " << paragraph;

        // AI输出的文本
        std::string output_str;

        // 将未打标文本放入历史消息
        g_history_messages.emplace_back(chatglm::ChatMessage::ROLE_USER, input_str);

        std::cout << "\n"
                  << model_name << " > ";

        // ai返回
        chatglm::ChatMessage output = pipeline.chat(g_history_messages, gen_config, streamer.get());

        if (args.sync)
        {
            print_message(output);
        }

        /********************************************************************/
        std::string output_role = output.role;
        std::string contrast_role = chatglm::ChatMessage::ROLE_ASSISTANT; // 助理 回复类型

        // 判断是否是正常聊天类型
        if (output_role == contrast_role)
        {

            // AI输出的文本
            output_str = output.content;

            // 翻译后的内容比原内容长过多则为失败
            if (output_str.size() > paragraph.size() * 1.3)
            {
                std::cerr << "ERROR output_str long size" << std::endl;
                goto TryAgain; // 预处理失败再次尝试
            }

            // // 缓存上一次内容用于联系上下文
            // input_last = input_str;
            // output_last = output_str;

            /***********************************************************************************************************************************/
            // 将AI输出的文本内容保存到文件中
            std::ofstream output_preprocessing(output_preprocessing_file_path, std::ios::app);
            if (output_preprocessing.is_open())
            {
                output_preprocessing << output_str; // 已翻译
                // 强制将缓冲区内的数据写入硬盘
                output_preprocessing.flush();
                output_preprocessing.close();
            }
            else
            {
                std::cerr << "ERROR Unable to open output_preprocessing_file_path file." << std::endl;
                return preprocessing_paragraphs;
            }
            /***********************************************************************************************************************************/

            preprocessing_paragraphs.push_back(output_str); // 将预处理好的放入输出缓冲区
        }

        // 显示详细输出
        if (args.verbose)
        {
            std::cout << "\n"
                      << perf_streamer->to_string() << "\n\n";
        }

        perf_streamer->reset(); // 重置性能统计
    }

    std::cout << "preprocessing ok\n";

    return preprocessing_paragraphs;
}

std::vector<std::string> fuck_datasheet(std::vector<std::string> preprocessing_paragraphs, std::string output_comparison_file_path, std::string output_temporary_file_path)
{
    std::vector<std::string> output_paragraphs; // 翻译好的段落

    std::cout << "ggml_time_init..." << std::endl;

    Args args;
    args.model_path = MODEL_PATH_BIN; // 模型路径 D:/ChatGLM3_6B/models/chatglm-ggml/chatglm3-6b-32k-ggml_q4_0.bin
    args.interactive = true;          // -i, --interactive 以交互模式运行 run in interactive mode

    args.mode = INFERENCE_MODE_CHAT; // --mode 推理模式选择 {聊天， 生成}（默认： 聊天） inference mode chose from
                                     // {chat, generate} (default: chat)

    args.max_length = 8192;         //-l, --max_length 最大总长度，包括提示和输出  max total length including prompt and output
                                    //(default: 2048)
    args.max_context_length = 8192; // -c, --max_context_length 最大上下文长度 max context length (default: 512)\n"

    args.top_k = 0;   // --top_k top-k sampling (default: 0) 在生成文本时，模型会考虑在每一步选择概率最高的前 k
                      // 个候选词语，然后从中随机选择一个作为下一个词语
    args.top_p = 0.7; // --top_p top-p sampling (default: 0.7) 控制生成的多样性，允许更多的不确定性
    args.temp =
        0.95; // --temp temperature (default: 0.95)
              // 高温度值会增加词语之间的差异，使得模型更加开放和创造性；低温度值则更加强调高概率的词语，使生成更加确定和保守
    args.repeat_penalty =
        1.0; // --repeat_penalty 为了避免生成文本中过度重复的片段，可以通过引入一个重复惩罚 来减少模型生成相似内容的概率
             // penalize repeat sequence of tokens (default: 1.0, 1.0 = disabled)

    args.num_threads = 0; // -t, --threads 用于推理的线程数 number of threads for inference

    args.verbose = false; // -v, --verbose 显示详细输出，包括配置/系统/性能信息 display verbose output including
                          // config/system/performance info

    args.prompt = "hi"; // p, --prompt PROMPT  开始生成的提示   prompt to start generation with (default: 你好)

    args.sync = false; // --sync 无流的同步生成 synchronized generation without streaming

    // -s, --system SYSTEM 系统消息，用于设置助手的行为 system message to set the behavior of the assistant
    args.system = "As an intelligent AI assistant, you will provide concise and accurate answers as required.";

    args.max_new_tokens = -1; // ---max_new_tokens 要生成的最大令牌数，忽略提示令牌数 max number of tokens to generate,
                              // ignoring the number of prompt tokens

    ggml_time_init();
    int64_t start_load_us = ggml_time_us();
    chatglm::Pipeline pipeline(args.model_path);

    int64_t end_load_us = ggml_time_us();

    std::cout << "ggml_time_init OK!" << std::endl;

    std::string model_name = pipeline.model->config.model_type_name();

    auto text_streamer = std::make_shared<chatglm::TextStreamer>(std::cout, pipeline.tokenizer.get());
    auto perf_streamer = std::make_shared<chatglm::PerfStreamer>();
    std::vector<std::shared_ptr<chatglm::BaseStreamer>> streamers{perf_streamer};

    if (!args.sync)
    {
        streamers.emplace_back(text_streamer);
    }

    auto streamer = std::make_unique<chatglm::StreamerGroup>(std::move(streamers));

    chatglm::GenerationConfig gen_config(args.max_length, args.max_new_tokens, args.max_context_length, args.temp > 0,
                                         args.top_k, args.top_p, args.temp, args.repeat_penalty, args.num_threads);

    if (args.verbose)
    {
        std::cout << "system info: | "
                  << "AVX = " << ggml_cpu_has_avx() << " | "
                  << "AVX2 = " << ggml_cpu_has_avx2() << " | "
                  << "AVX512 = " << ggml_cpu_has_avx512() << " | "
                  << "AVX512_VBMI = " << ggml_cpu_has_avx512_vbmi() << " | "
                  << "AVX512_VNNI = " << ggml_cpu_has_avx512_vnni() << " | "
                  << "FMA = " << ggml_cpu_has_fma() << " | "
                  << "NEON = " << ggml_cpu_has_neon() << " | "
                  << "ARM_FMA = " << ggml_cpu_has_arm_fma() << " | "
                  << "F16C = " << ggml_cpu_has_f16c() << " | "
                  << "FP16_VA = " << ggml_cpu_has_fp16_va() << " | "
                  << "WASM_SIMD = " << ggml_cpu_has_wasm_simd() << " | "
                  << "BLAS = " << ggml_cpu_has_blas() << " | "
                  << "SSE3 = " << ggml_cpu_has_sse3() << " | "
                  << "VSX = " << ggml_cpu_has_vsx() << " |\n";

        std::cout << "inference config: | "
                  << "max_length = " << args.max_length << " | "
                  << "max_context_length = " << args.max_context_length << " | "
                  << "top_k = " << args.top_k << " | "
                  << "top_p = " << args.top_p << " | "
                  << "temperature = " << args.temp << " | "
                  << "repetition_penalty = " << args.repeat_penalty << " | "
                  << "num_threads = " << args.num_threads << " |\n";

        std::cout << "loaded " << pipeline.model->config.model_type_name() << " model from " << args.model_path
                  << " within: " << (end_load_us - start_load_us) / 1000.f << " ms\n";

        std::cout << std::endl;
    }

    if (args.mode != INFERENCE_MODE_CHAT && args.interactive)
    {
        std::cerr << "interactive demo is only supported for chat mode, falling back to non-interactive one\n";
        args.interactive = false;
    }

    std::vector<chatglm::ChatMessage> system_messages;

    // 设置预设信息和优质回答
    setSystemInfo();

    // 打印系统设定
    if (!args.system.empty())
    {
        std::cout << std::setw(model_name.size()) << std::left << "System"
                  << " > " << args.system << std::endl;
    }

    m_role = chatglm::ChatMessage::ROLE_USER;

    // // 缓存上一次成功回答用于联系上下文
    // std::string input_last = "";
    // std::string output_last = "";

    // // 开始前清理回答缓存
    // input_last.clear();
    // output_last.clear();

    for (auto &paragraph : preprocessing_paragraphs)
    {

    TryAgainTranslation: // 翻译失败再次尝试

        // 设置预设信息和优质回答
        setSystemInfo();

        // // 将上一次成功回答放入历史回答用于联系上下文
        // if (!input_last.empty() && !output_last.empty())
        // {
        //     g_history_messages.emplace_back(chatglm::ChatMessage::ROLE_USER, input_last);
        //     g_history_messages.emplace_back(chatglm::ChatMessage::ROLE_ASSISTANT, output_last);
        // }

        std::string input_str = (std::string)CHAT_PREFIX + paragraph; // 取出待翻译的文本

        std::cout << "*******************************************************************";
        std::cout << "\n"
                  << "human"
                  << " > " << paragraph;

        // AI输出的文本
        std::string output_str;

        // 将未打标文本放入历史消息
        g_history_messages.emplace_back(chatglm::ChatMessage::ROLE_USER, input_str);

        std::cout << "\n"
                  << model_name << " > ";

        // ai返回
        chatglm::ChatMessage output = pipeline.chat(g_history_messages, gen_config, streamer.get());

        if (args.sync)
        {
            print_message(output);
        }

        /********************************************************************/
        std::string output_role = output.role;
        std::string contrast_role = chatglm::ChatMessage::ROLE_ASSISTANT; // 助理 回复类型

        // 判断是否是正常聊天类型
        if (output_role == contrast_role)
        {
            // AI输出的文本
            output_str = output.content;

            // 翻译后的内容比原内容长过多则为失败
            if (output_str.size() > paragraph.size() * 1.8)
            {
                std::cerr << "ERROR output_str long size" << output_str.size() << " > " << paragraph.size() << std::endl;
                goto TryAgainTranslation; // 翻译失败再次尝试
            }

            // // 缓存上一次内容用于联系上下文
            // input_last = input_str;
            // output_last = output_str;

            /***********************************************************************************************************************************/
            // 将AI输出的文本内容保存到文件中
            std::ofstream output_comparison(output_comparison_file_path, std::ios::app);
            if (output_comparison.is_open())
            {
                output_comparison << "\n\n--------------------------------------------------------\n\n";
                output_comparison << paragraph; // 未翻译
                output_comparison << "\n\n--------------------------------------------------------\n\n";
                output_comparison << output_str; // 已翻译
                // 强制将缓冲区内的数据写入硬盘
                output_comparison.flush();
                output_comparison.close();
            }
            else
            {
                std::cerr << "ERROR Unable to open output_comparison_file_path file." << std::endl;
                return output_paragraphs;
            }
            /***********************************************************************************************************************************/

            /***********************************************************************************************************************************/
            // 将AI输出的文本内容保存到文件中
            std::ofstream output_temporary(output_temporary_file_path, std::ios::app);
            if (output_temporary.is_open())
            {
                output_temporary << "\n\n--------------------------------------------------------\n\n";
                output_temporary << output_str; // 已翻译
                output_temporary << "\n\n--------------------------------------------------------\n\n";
                // 强制将缓冲区内的数据写入硬盘
                output_temporary.flush();
                output_temporary.close();
            }
            else
            {
                std::cerr << "ERROR Unable to open output_temporary_file_path file." << std::endl;
                return output_paragraphs;
            }
            /***********************************************************************************************************************************/

            output_paragraphs.push_back(output_str); // 将翻译好的放入输出缓冲区
        }

        // 显示详细输出
        if (args.verbose)
        {
            std::cout << "\n"
                      << perf_streamer->to_string() << "\n\n";
        }

        perf_streamer->reset(); // 重置性能统计
    }

    std::cout << "Bye\n";

    return output_paragraphs;
}

void datasheet_out(std::vector<std::string> output_paragraphs, std::string output_file_path)
{
    // 将vector中的内容保存到output.txt文件中
    std::ofstream output_file(output_file_path);
    if (output_file.is_open())
    {
        for (const auto &p : output_paragraphs)
        {
            output_file << "\n" + p + "\n";
        }
        output_file.close();
        std::cout << "output success" << std::endl;
    }
    else
    {
        std::cerr << "Unable to open output file." << std::endl;
    }
}

// UTF8 编码格式（xxx 是用来填充二进制 Unicode 码点的）
//
// 1字节	0xxxxxxx
// 2字节	110xxxxx_10xxxxxx
// 3字节	1110xxxx_10xxxxxx_10xxxxxx
// 4字节	11110xxx_10xxxxxx_10xxxxxx_10xxxxxx
// 5字节	111110xx_10xxxxxx_10xxxxxx_10xxxxxx_10xxxxxx
// 6字节	1111110x_10xxxxxx_10xxxxxx_10xxxxxx_10xxxxxx_10xxxxxx
//
// 有效的 Unicode 码点范围为 0-0x10FFFF，最多用到 4 字节 UTF8 编码

// using namespace std;

// 读取 UTF8 文件
std::vector<uint32_t> read_utf8_file(std::string filename)
{
    // 打开文件
    std::ifstream input_file(filename);
    if (!input_file.is_open())
    {
        perror("std::ifstream -> open()");
        exit(1);
    }

    // 跳过 UTF8 BOM（0xEFBBBF）
    if (input_file.get() != 0xEF || input_file.get() != 0xBB || input_file.get() != 0xBF)
    {
        input_file.seekg(0, std::ios::beg);
    }

    unsigned char utf8_chat;           // UTF8 码点，涉及位运算，必须使用无符号数
    uint32_t utf8_wchat;               // Unicode 码点
    std::vector<uint32_t> utf8_vector; // 用于存储转换结果的 Unicode 码点序列

    int utf8_len; // 单个 UTF8 字符的编码长度

    while ((utf8_chat = input_file.get()) && !input_file.eof())
    {
        if (utf8_chat < 0b10000000)
        {
            // 单字节编码
            utf8_wchat = utf8_chat;
        }
        else
        {
            // 多字节编码，获取编码长度
            if (utf8_chat > 0b11110100)
            {
                std::cout << (uint32_t)utf8_chat << std::endl;
                // 超出可用 Unicode 范围 0x10FFFF
                // 11110100_10001111_10111111_10111111
                fprintf(stderr, "Invalid unicode range\n");
                exit(1);
            }
            else if (utf8_chat >= 0b11110000)
            {
                utf8_len = 4;
            }
            else if (utf8_chat >= 0b11100000)
            {
                utf8_len = 3;
            }
            else if (utf8_chat >= 0b11000000)
            {
                utf8_len = 2;
            }
            else
            {
                // 首字节不能小于 0b11000000
                fprintf(stderr, "Invalid utf8 leading code");
                exit(1);
            }
            // 通过左移再右移的方法去掉首字节中的 UTF8 标记
            utf8_chat = utf8_chat << (utf8_len + 1);
            utf8_wchat = utf8_chat >> (utf8_len + 1);

            // 处理后续 UTF8 编码
            while (utf8_len > 1)
            {
                utf8_chat = input_file.get();
                // 如果 input_file 到达 eof，则 utf8_chat 会返回 255，刚好匹配下面的错误检查
                // 后续编码必须是 0b10xxxxxx 格式
                if (utf8_chat >= 0b11000000)
                {
                    fprintf(stderr, "Invalid utf8 tailing code");
                    exit(1);
                }
                utf8_len--;
                utf8_chat = utf8_chat & 0b00111111; // 去掉 UTF8 标记
                utf8_wchat = utf8_wchat << 6;       // 腾出 6 个 bit 的位置
                utf8_wchat += utf8_chat;            // 将去掉了 UTF8 标记的编码合并进来
            }
        }
        utf8_vector.push_back(utf8_wchat); // 存储解解析结果
    }

    input_file.close();

    return utf8_vector;
}

// 判断字符是否为可见字符（这里以 ASCII 为例，仅包括空格到~）
bool is_visible_char(uint32_t code_point)
{
    // 检查字符是否在ASCII的可见范围内（32-126）
    // 包括空格（32）、回车（10）和制表符（9）
    // • 8226
    // ’8217
    // “ 8220
    // ” 8221

    // ASCII 可见字符（空格到~）
    if (code_point >= 0x20 && code_point <= 0x7E)
    {
        return true;
    }
    // 控制字符（回车、制表符等）
    if (code_point == 0x09 || code_point == 0x0A || code_point == 0x0D)
    {
        return true;
    }
    // 常见标点符号和符号
    if (code_point >= 0x2000 && code_point <= 0x206F)
    { // General Punctuation
        return true;
    }
    if (code_point >= 0x3000 && code_point <= 0x303F)
    { // CJK Symbols and Punctuation
        return true;
    }
    if (code_point >= 0xFF00 && code_point <= 0xFFEF)
    { // Halfwidth and Fullwidth Forms
        return true;
    }

    return false;
}

// 将 Unicode 码点转换为 UTF-8 编码的字节序列
void write_utf8_code_point(std::ostream &output, uint32_t code_point)
{
    if (code_point <= 0x7F)
    {
        output.put(static_cast<char>(code_point));
    }
    else if (code_point <= 0x7FF)
    {
        output.put(static_cast<char>(0xC0 | (code_point >> 6)));
        output.put(static_cast<char>(0x80 | (code_point & 0x3F)));
    }
    else if (code_point <= 0xFFFF)
    {
        output.put(static_cast<char>(0xE0 | (code_point >> 12)));
        output.put(static_cast<char>(0x80 | ((code_point >> 6) & 0x3F)));
        output.put(static_cast<char>(0x80 | (code_point & 0x3F)));
    }
    else if (code_point <= 0x10FFFF)
    {
        output.put(static_cast<char>(0xF0 | (code_point >> 18)));
        output.put(static_cast<char>(0x80 | ((code_point >> 12) & 0x3F)));
        output.put(static_cast<char>(0x80 | ((code_point >> 6) & 0x3F)));
        output.put(static_cast<char>(0x80 | (code_point & 0x3F)));
    }
    else
    {
        // 超出 Unicode 范围，这里可以选择忽略或者抛出异常
        fprintf(stderr, "Invalid Unicode code point\n");
    }
}

// 清理待翻译文本
void datasheet_clean(std::string input_file, std::string output_file)
{

    std::vector<uint32_t> code_points = read_utf8_file(input_file);

    std::ofstream output(output_file);
    if (!output.is_open())
    {
        perror("std::ofstream -> open()");
        exit(1);
    }

    for (uint32_t code_point : code_points)
    {
        if (is_visible_char(code_point))
        {
            write_utf8_code_point(output, code_point); // 正确写入可见字符
        }
    }

    output.close();
}

// 输入文件路径
std::string input_file_path = "./datasheet_input/input.txt";

// 清理过异常字符的文件路径
std::string clean_file_path = "./datasheet_output/clean.txt";

// 输出文件路径 预处理
std::string output_preprocessing_file_path = "./datasheet_output/output_preprocessing.md";

// 输出文件路径 翻译完成
std::string output_file_path = "./datasheet_output/output.md";

// 输出文件路径 双语比对
std::string output_comparison_file_path = "./datasheet_output/output_comparison.md";

// 输出文件路径 翻译临时文件
std::string output_temporary_file_path = "./datasheet_output/output_temporary.md";

int main(int argc, char **argv)
{
    std::vector<std::string> input_paragraphs;         // 待翻译原文段落
    std::vector<std::string> preprocessing_paragraphs; // 预处理好的段落
    std::vector<std::string> output_paragraphs;        // 翻译好的段落
    std::vector<std::string> cut_paragraphs;           // 预处理好的段落

    // datasheet_clean(input_file_path, clean_file_path); // 清理待翻译文本

    // cut_paragraphs = datasheet_cut(clean_file_path);                                                    // 切割段落
    // preprocessing_paragraphs = preprocessing_datasheet(cut_paragraphs, output_preprocessing_file_path); // 预处理

    cut_paragraphs = datasheet_cut(input_file_path);                                                             // 切割段落
    output_paragraphs = fuck_datasheet(cut_paragraphs, output_comparison_file_path, output_temporary_file_path); // 翻译

    datasheet_out(output_paragraphs, output_file_path); // 输出文件

    return 0;
}
