local codecompanion_system_prompt = [[
你是一个名为“CodeCompanion”的 AI 编程助手。你目前连接到了用户机器上的 Neovim 文本编辑器中。
您的核心任务包括：
- 回答一般的编程问题。
- 解释 Neovim 缓冲区中的代码是如何工作的。
- 审查 Neovim 缓冲区中选定的代码。
- 为选定的代码生成单元测试。
- 为选定代码中的问题提出修复建议。
- 为新的工作区搭建代码框架。
- 根据用户的查询查找相关代码。
- 为测试失败提出修复建议。
- 回答有关 Neovim 的问题。
- 运行工具。

您必须：
- 仔细且严格遵循用户的各项要求。
- 回答简短且客观，尤其是当用户提供的信息超出您的任务范围时。
- 减少其他叙述性内容。
- 在回答中使用 Markdown 格式。
- 在 Markdown 代码块的开头注明编程语言名称。
- 避免在代码块中包含行号。
- 避免将整个回复用三个反引号包围。
- 只返回与当前任务相关的代码。您可能不需要返回用户分享的所有代码。
- 在回复中使用实际的换行符来开始新行，而不是使用 '\n'。
- 仅在需要显示字面意义上的反斜杠后跟字符 'n' 时使用 '\n'。
- 所有非代码回复都必须用 %s 格式。

当被赋予一项任务时：1. 请逐步思考，并用伪代码详细描述您要构建的内容的计划，除非另有要求。2. 将代码输出到一个单独的代码块中，注意只返回相关代码。3. 您应当始终为用户的下一轮对话生成简短且与对话内容相关的建议。4. 您在每次对话轮次中只能给出一个回复。
]]

local codecompanion_stock_prompt = [[
【角色设定】
你是我个人专属的“交易策略镜像顾问”。我们已共同工作多年，你对我的交易体系、哲学背景和心智模式了如指掌。我们的关系是平等的战略合伙人，你的核心任务是**审视、挑战和完善我的系统**，而非提供基础教育。

【核心指令】
1.  **禁止教学**：不得以“老师”口吻对我进行任何基础概念科普或步骤指导。
2.  **禁止给答案**：不得直接给出“你应该怎么做”的结论。必须通过苏格拉底式提问，引导我自行推导出答案。
3.  **基于共识**：所有讨论必须严格基于下文提供的《我的交易哲学身份文件》，不得偏离此框架。

【我的交易哲学身份文件】
**核心世界观：**
- 市场是多重力量的混沌博弈，非单主力控盘。摒弃“洗盘、诱多”等旧词汇。
- **战场：专注高流动性、高标股。** 流动性是首要过滤器。
- 价格是共识的化身，我分析“围绕价格线的力量”。
- 体系是 **“信号（共识转折）+过滤器（力量验证）”**。关键信号：日线缩量后的30分钟/小时级放量。
- 追求“控制回撤的模糊正确”，不信任何指标圣杯。

**个人背景与心法：**
- 前程序员，用Python系统化交易，重把控而非全自动。
- 有哲学背景，视市场为哲学实践道场。
- 当前核心矛盾：平衡“避免时间成本”与“抓住主升浪”。
- 震荡市节奏极快（隔日套利），趋势市中会调整。

【本次对话的具体议题】
“我今天按系统入场后，股价冲高回落收长上影，但成交量并未异常放大，也未破我的关键止损位。根据我们的体系，你认为这是‘共识脆弱点暴露’还是‘上涨中继的盘中洗盘’？我应该如何设计一个过滤器来更清晰地界定这两种情况？”
]]
require("codecompanion").setup({
    prompt_library = {
        ["Your_New_Prompt"] = {
            strategy = "chat",
            description = "Your Special New Prompt",
            opts = {
                ignore_system_prompt = true,
            },
            -- Your prompts here
        },
        ["stock"] = {
            strategy = "chat",
            description = "分析股票市场",
            opts = {
                short_name = "stock",
                ignore_system_prompt = true,
            },
            prompts = {
                {
                    role = "user",
                    content = codecompanion_stock_prompt,
                    opts = {
                        visible = false,
                    },
                }
            },
        },
        ["Docusaurus"] = {
            strategy = "chat",
            description = "Write documentation for me",
            opts = {
                index = 11,
                is_slash_cmd = false,
                auto_submit = false,
                short_name = "docs",
            },
            references = {
                {
                    type = "file",
                    path = {
                        "doc/.vitepress/config.mjs",
                        "lua/codecompanion/config.lua",
                        "README.md",
                    },
                },
            },
            prompts = {
                {
                    role = "user",
                    content =
                    [[I'm rewriting the documentation for my plugin CodeCompanion.nvim, as I'm moving to a vitepress website. Can you help me rewrite it?

    I'm sharing my vitepress config file so you have the context of how the documentation website is structured in the `sidebar` section of that file.

    I'm also sharing my `config.lua` file which I'm mapping to the `configuration` section of the sidebar.
    ]],
                },
            },
        },
    },
    display = {
        inline = {
            layout = "vertical", -- vertical|horizontal|buffer
        },
        chat = {
            icons = {
                pinned_buffer = " ",
                watched_buffer = "👀 ",
            },
            window = {
                layout = "buffer", -- float|vertical|horizontal|buffer
            },
            intro_message = "Welcome to CodeCompanion ✨! Press ? for options",
            show_header_separator = false, -- Show header separators in the chat buffer? Set this to false if you're using an external markdown formatting plugin
            separator = "─", -- The separator between the different messages in the chat buffer
            show_references = true, -- Show references (from slash commands and variables) in the chat buffer?
            show_settings = false, -- Show LLM settings at the top of the chat buffer?
            show_token_count = true, -- Show the token count for each response?
            start_in_insert_mode = false, -- Open the chat buffer in insert mode?
        },
        action_palette = {
            width = 95,
            height = 10,
            prompt = "Prompt ",                     -- Prompt used for interactive LLM calls
            provider = "telescope",                 -- default|telescope|mini_pick
            opts = {
                show_default_actions = true,        -- Show the default actions in the action palette?
                show_default_prompt_library = true, -- Show the default prompt library in the action palette?
            },
        },
    },
    strategies = {
        chat = { adapter = "deepseek" },
        inline = { adapter = "deepseek" },
    },
    adapters = {
        http = {
            gemini = function()
                return require("codecompanion.adapters").extend("gemini", {
                    env = {
                        api_key = os.getenv("GEMINI_API_KEY"),
                        model = 'gemini-1.5-flash'
                    },
                })
            end,
            deepseek = function()
                return require("codecompanion.adapters").extend("deepseek", {
                    env = {
                        -- url = "https://api.deepseek.com",
                        api_key = os.getenv("DEEPSEEK_API_KEY"),
                    },
                })
            end,
        },

    },
    opts = {
        language = "中文",
        log_level = "ERROR",
        system_prompt = function(opts)
            return codecompanion_system_prompt
        end

    },

    extensions = {
        history = {
            enabled = true,
            opts = {
                -- Keymap to open history from chat buffer (default: gh)
                keymap = "gh",
                -- Keymap to save the current chat manually (when auto_save is disabled)
                save_chat_keymap = "sc",
                -- Save all chats by default (disable to save only manually using 'sc')
                auto_save = true,
                -- Number of days after which chats are automatically deleted (0 to disable)
                expiration_days = 0,
                -- Picker interface (auto resolved to a valid picker)
                picker = "telescope", --- ("telescope", "snacks", "fzf-lua", or "default") 
                ---Optional filter function to control which chats are shown when browsing
                chat_filter = nil, -- function(chat_data) return boolean end
                -- Customize picker keymaps (optional)
                picker_keymaps = {
                    rename = { n = "r", i = "<M-r>" },
                    delete = { n = "d", i = "<M-d>" },
                    duplicate = { n = "<C-y>", i = "<C-y>" },
                },
                ---Automatically generate titles for new chats
                auto_generate_title = true,
                title_generation_opts = {
                    ---Adapter for generating titles (defaults to current chat adapter) 
                    adapter = nil, -- "copilot"
                    ---Model for generating titles (defaults to current chat model)
                    model = nil, -- "gpt-4o"
                    ---Number of user prompts after which to refresh the title (0 to disable)
                    refresh_every_n_prompts = 0, -- e.g., 3 to refresh after every 3rd user prompt
                    ---Maximum number of times to refresh the title (default: 3)
                    max_refreshes = 3,
                    format_title = function(original_title)
                        -- this can be a custom function that applies some custom
                        -- formatting to the title.
                        return original_title
                    end
                },
                ---On exiting and entering neovim, loads the last chat on opening chat
                continue_last_chat = false,
                ---When chat is cleared with `gx` delete the chat from history
                delete_on_clearing_chat = false,
                ---Directory path to save the chats
                dir_to_save = vim.fn.stdpath("data") .. "/codecompanion-history",
                ---Enable detailed logging for history extension
                enable_logging = false,

                -- Summary system
                summary = {
                    -- Keymap to generate summary for current chat (default: "gcs")
                    create_summary_keymap = "gcs",
                    -- Keymap to browse summaries (default: "gbs")
                    browse_summaries_keymap = "gbs",
                    
                    generation_opts = {
                        adapter = nil, -- defaults to current chat adapter
                        model = nil, -- defaults to current chat model
                        context_size = 90000, -- max tokens that the model supports
                        include_references = true, -- include slash command content
                        include_tool_outputs = true, -- include tool execution results
                        system_prompt = nil, -- custom system prompt (string or function)
                        format_summary = nil, -- custom function to format generated summary e.g to remove <think/> tags from summary
                    },
                },
                
                -- Memory system (requires VectorCode CLI)
                memory = {
                    -- Automatically index summaries when they are generated
                    auto_create_memories_on_summary_generation = true,
                    -- Path to the VectorCode executable
                    vectorcode_exe = "vectorcode",
                    -- Tool configuration
                    tool_opts = { 
                        -- Default number of memories to retrieve
                        default_num = 10 
                    },
                    -- Enable notifications for indexing progress
                    notify = true,
                    -- Index all existing memories on startup
                    -- (requires VectorCode 0.6.12+ for efficient incremental indexing)
                    index_on_startup = false,
                },
            }
        }
    }
})
