import { Message, ChatResponse } from "../types/chat";
import { ConfigService } from "./config_service";
import { proxyFetch } from "../../util/proxy";
import { Tool } from "../../ui/store/useTools";

// 常量定义
const MAX_RETRIES = 3;
const TIMEOUT = 30000; // 30秒超时

/**
 * 使用流式传输向Ollama API发送消息
 * @param content 要发送的消息内容
 * @param onStream 处理流式响应的回调函数
 * @param arg 对话上下文参数
 * @returns Promise<ChatResponse>
 */
export async function sendMessage(
    onStream: (chunk: ChatResponse) => void,
    arg: { messages: Message[]; tools: Tool[] } = {
        messages: [],
        tools: [],
    },
): Promise<ChatResponse> {
    const { messages, tools } = arg;
    try {
        // 配置检查
        const config = ConfigService.getInstance().getConfig();
        if (!config?.baseUrl) {
            throw new Error("AI server URL is not configured");
        }
        if (!config.selectedModel) {
            const errorMessage =
                '尚未选择模型，请在右上角点击"配置"，选择"模型"后再试';
            throw new Error(errorMessage);
        }

        // 构建API请求
        const apiUrl = new URL("/api/chat", config.baseUrl).toString();
        const requestBody = {
            model: config.selectedModel,
            messages,
            tools,
            stream: true,
            options: {
                temperature: 0.7,
                num_ctx: 2048,
            },
            keep_alive: "5m",
        };

        // 发送请求
        const response = await proxyFetch(apiUrl, {
            method: "POST",
            headers: {
                "Content-Type": "application/json",
            },
            body: JSON.stringify(requestBody),
        });

        // 错误处理
        if (!response.ok) {
            const errorData = await response.json().catch(() => ({}));
            throw new Error(errorData.error || "Network response was not ok");
        }

        // 处理流式响应
        if (response.body) {
            const reader = response.body.getReader();
            const decoder = new TextDecoder();
            let retryCount = 0;
            let content = "";
            try {
                while (true) {
                    const { done, value } = await reader.read();
                    if (done) {
                        break;
                    }
                    const chunk = decoder.decode(value);
                    content += chunk;
                    try {
                        const data = JSON.parse(chunk);
                        onStream(data);
                    } catch (error) {
                        console.error("Error parsing stream chunk:", error);
                        if (retryCount < MAX_RETRIES) {
                            retryCount++;
                            continue;
                        }
                        throw error;
                    }
                }
                // 返回最终响应
                return {
                    id: "",
                    role: "assistant",
                    content,
                    done: true,
                };
            } catch (error) {
                const message =
                    error instanceof Error ? error.message : "Unknown error";
                throw new Error(`Stream processing error: ${message}`);
            } finally {
                reader.releaseLock();
            }
        }

        throw new Error("No response body received");
    } catch (error) {
        console.error("Error in sendMessage:", error);
        throw error;
    }
}
